repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
40223125/w17t2 | static/Brython3.1.1-20150328-091302/Lib/stat.py | 765 | 4304 | """Constants/functions for interpreting results of os.stat() and os.lstat().
Suggested usage: from stat import *
"""
# Indices for stat struct members in the tuple returned by os.stat()
ST_MODE = 0
ST_INO = 1
ST_DEV = 2
ST_NLINK = 3
ST_UID = 4
ST_GID = 5
ST_SIZE = 6
ST_ATIME = 7
ST_MTIME = 8
ST_CTIME = 9
# Extract bits from the mode
def S_IMODE(mode):
"""Return the portion of the file's mode that can be set by
os.chmod().
"""
return mode & 0o7777
def S_IFMT(mode):
"""Return the portion of the file's mode that describes the
file type.
"""
return mode & 0o170000
# Constants used as S_IFMT() for various file types
# (not all are implemented on all systems)
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFBLK = 0o060000 # block device
S_IFREG = 0o100000 # regular file
S_IFIFO = 0o010000 # fifo (named pipe)
S_IFLNK = 0o120000 # symbolic link
S_IFSOCK = 0o140000 # socket file
# Functions to test for each file type
def S_ISDIR(mode):
"""Return True if mode is from a directory."""
return S_IFMT(mode) == S_IFDIR
def S_ISCHR(mode):
"""Return True if mode is from a character special device file."""
return S_IFMT(mode) == S_IFCHR
def S_ISBLK(mode):
"""Return True if mode is from a block special device file."""
return S_IFMT(mode) == S_IFBLK
def S_ISREG(mode):
"""Return True if mode is from a regular file."""
return S_IFMT(mode) == S_IFREG
def S_ISFIFO(mode):
"""Return True if mode is from a FIFO (named pipe)."""
return S_IFMT(mode) == S_IFIFO
def S_ISLNK(mode):
"""Return True if mode is from a symbolic link."""
return S_IFMT(mode) == S_IFLNK
def S_ISSOCK(mode):
"""Return True if mode is from a socket."""
return S_IFMT(mode) == S_IFSOCK
# Names for permission bits
S_ISUID = 0o4000 # set UID bit
S_ISGID = 0o2000 # set GID bit
S_ENFMT = S_ISGID # file locking enforcement
S_ISVTX = 0o1000 # sticky bit
S_IREAD = 0o0400 # Unix V7 synonym for S_IRUSR
S_IWRITE = 0o0200 # Unix V7 synonym for S_IWUSR
S_IEXEC = 0o0100 # Unix V7 synonym for S_IXUSR
S_IRWXU = 0o0700 # mask for owner permissions
S_IRUSR = 0o0400 # read by owner
S_IWUSR = 0o0200 # write by owner
S_IXUSR = 0o0100 # execute by owner
S_IRWXG = 0o0070 # mask for group permissions
S_IRGRP = 0o0040 # read by group
S_IWGRP = 0o0020 # write by group
S_IXGRP = 0o0010 # execute by group
S_IRWXO = 0o0007 # mask for others (not in group) permissions
S_IROTH = 0o0004 # read by others
S_IWOTH = 0o0002 # write by others
S_IXOTH = 0o0001 # execute by others
# Names for file flags
UF_NODUMP = 0x00000001 # do not dump file
UF_IMMUTABLE = 0x00000002 # file may not be changed
UF_APPEND = 0x00000004 # file may only be appended to
UF_OPAQUE = 0x00000008 # directory is opaque when viewed through a union stack
UF_NOUNLINK = 0x00000010 # file may not be renamed or deleted
UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed
UF_HIDDEN = 0x00008000 # OS X: file should not be displayed
SF_ARCHIVED = 0x00010000 # file may be archived
SF_IMMUTABLE = 0x00020000 # file may not be changed
SF_APPEND = 0x00040000 # file may only be appended to
SF_NOUNLINK = 0x00100000 # file may not be renamed or deleted
SF_SNAPSHOT = 0x00200000 # file is a snapshot file
_filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((S_IRUSR, "r"),),
((S_IWUSR, "w"),),
((S_IXUSR|S_ISUID, "s"),
(S_ISUID, "S"),
(S_IXUSR, "x")),
((S_IRGRP, "r"),),
((S_IWGRP, "w"),),
((S_IXGRP|S_ISGID, "s"),
(S_ISGID, "S"),
(S_IXGRP, "x")),
((S_IROTH, "r"),),
((S_IWOTH, "w"),),
((S_IXOTH|S_ISVTX, "t"),
(S_ISVTX, "T"),
(S_IXOTH, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form '-rwxrwxrwx'."""
perm = []
for table in _filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
| gpl-3.0 | 8,208,297,451,444,083,000 | -8,232,744,598,931,786,000 | 27.885906 | 82 | 0.606413 | false |
Fafou/Sick-Beard | lib/requests/packages/chardet2/langhungarianmodel.py | 63 | 12559 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = ( \
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = { \
'charToOrderMap': Latin2_HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "ISO-8859-2"
}
Win1250HungarianModel = { \
'charToOrderMap': win1250HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "windows-1250"
}
| gpl-3.0 | 8,811,557,491,953,755,000 | -8,663,363,489,802,380,000 | 54.817778 | 70 | 0.55506 | false |
abhishek-ch/hue | desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/SelfTest/Random/test_random.py | 117 | 7111 | # -*- coding: utf-8 -*-
#
# SelfTest/Util/test_generic.py: Self-test for the Crypto.Random.new() function
#
# Written in 2008 by Dwayne C. Litzenberger <[email protected]>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Random.new()"""
__revision__ = "$Id$"
import unittest
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
from Crypto.Util.py3compat import *
class SimpleTest(unittest.TestCase):
def runTest(self):
"""Crypto.Random.new()"""
# Import the Random module and try to use it
from Crypto import Random
randobj = Random.new()
x = randobj.read(16)
y = randobj.read(16)
self.assertNotEqual(x, y)
z = Random.get_random_bytes(16)
self.assertNotEqual(x, z)
self.assertNotEqual(y, z)
# Test the Random.random module, which
# implements a subset of Python's random API
# Not implemented:
# seed(), getstate(), setstate(), jumpahead()
# random(), uniform(), triangular(), betavariate()
# expovariate(), gammavariate(), gauss(),
# longnormvariate(), normalvariate(),
# vonmisesvariate(), paretovariate()
# weibullvariate()
# WichmannHill(), whseed(), SystemRandom()
from Crypto.Random import random
x = random.getrandbits(16*8)
y = random.getrandbits(16*8)
self.assertNotEqual(x, y)
# Test randrange
if x>y:
start = y
stop = x
else:
start = x
stop = y
for step in range(1,10):
x = random.randrange(start,stop,step)
y = random.randrange(start,stop,step)
self.assertNotEqual(x, y)
self.assertEqual(start <= x < stop, True)
self.assertEqual(start <= y < stop, True)
self.assertEqual((x - start) % step, 0)
self.assertEqual((y - start) % step, 0)
for i in range(10):
self.assertEqual(random.randrange(1,2), 1)
self.assertRaises(ValueError, random.randrange, start, start)
self.assertRaises(ValueError, random.randrange, stop, start, step)
self.assertRaises(TypeError, random.randrange, start, stop, step, step)
self.assertRaises(TypeError, random.randrange, start, stop, "1")
self.assertRaises(TypeError, random.randrange, "1", stop, step)
self.assertRaises(TypeError, random.randrange, 1, "2", step)
self.assertRaises(ValueError, random.randrange, start, stop, 0)
# Test randint
x = random.randint(start,stop)
y = random.randint(start,stop)
self.assertNotEqual(x, y)
self.assertEqual(start <= x <= stop, True)
self.assertEqual(start <= y <= stop, True)
for i in range(10):
self.assertEqual(random.randint(1,1), 1)
self.assertRaises(ValueError, random.randint, stop, start)
self.assertRaises(TypeError, random.randint, start, stop, step)
self.assertRaises(TypeError, random.randint, "1", stop)
self.assertRaises(TypeError, random.randint, 1, "2")
# Test choice
seq = range(10000)
x = random.choice(seq)
y = random.choice(seq)
self.assertNotEqual(x, y)
self.assertEqual(x in seq, True)
self.assertEqual(y in seq, True)
for i in range(10):
self.assertEqual(random.choice((1,2,3)) in (1,2,3), True)
self.assertEqual(random.choice([1,2,3]) in [1,2,3], True)
if sys.version_info[0] is 3:
self.assertEqual(random.choice(bytearray(b('123'))) in bytearray(b('123')), True)
self.assertEqual(1, random.choice([1]))
self.assertRaises(IndexError, random.choice, [])
self.assertRaises(TypeError, random.choice, 1)
# Test shuffle. Lacks random parameter to specify function.
# Make copies of seq
seq = range(500)
x = list(seq)
y = list(seq)
random.shuffle(x)
random.shuffle(y)
self.assertNotEqual(x, y)
self.assertEqual(len(seq), len(x))
self.assertEqual(len(seq), len(y))
for i in range(len(seq)):
self.assertEqual(x[i] in seq, True)
self.assertEqual(y[i] in seq, True)
self.assertEqual(seq[i] in x, True)
self.assertEqual(seq[i] in y, True)
z = [1]
random.shuffle(z)
self.assertEqual(z, [1])
if sys.version_info[0] == 3:
z = bytearray(b('12'))
random.shuffle(z)
self.assertEqual(b('1') in z, True)
self.assertRaises(TypeError, random.shuffle, b('12'))
self.assertRaises(TypeError, random.shuffle, 1)
self.assertRaises(TypeError, random.shuffle, "1")
self.assertRaises(TypeError, random.shuffle, (1,2))
# 2to3 wraps a list() around it, alas - but I want to shoot
# myself in the foot here! :D
# if sys.version_info[0] == 3:
# self.assertRaises(TypeError, random.shuffle, range(3))
# Test sample
x = random.sample(seq, 20)
y = random.sample(seq, 20)
self.assertNotEqual(x, y)
for i in range(20):
self.assertEqual(x[i] in seq, True)
self.assertEqual(y[i] in seq, True)
z = random.sample([1], 1)
self.assertEqual(z, [1])
z = random.sample((1,2,3), 1)
self.assertEqual(z[0] in (1,2,3), True)
z = random.sample("123", 1)
self.assertEqual(z[0] in "123", True)
z = random.sample(range(3), 1)
self.assertEqual(z[0] in range(3), True)
if sys.version_info[0] == 3:
z = random.sample(b("123"), 1)
self.assertEqual(z[0] in b("123"), True)
z = random.sample(bytearray(b("123")), 1)
self.assertEqual(z[0] in bytearray(b("123")), True)
self.assertRaises(TypeError, random.sample, 1)
def get_tests(config={}):
return [SimpleTest()]
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| apache-2.0 | 648,521,912,028,381,700 | -5,294,684,430,923,064,000 | 40.584795 | 93 | 0.593728 | false |
mindofmatthew/three.js | utils/converters/obj/convert_obj_three.py | 160 | 48659 | """Convert Wavefront OBJ / MTL files into Three.js (JSON model version, to be used with ascii / binary loader)
-------------------------
How to use this converter
-------------------------
python convert_obj_three.py -i infile.obj -o outfile.js [-m "morphfiles*.obj"] [-c "morphcolors*.obj"] [-a center|centerxz|top|bottom|none] [-s smooth|flat] [-t ascii|binary] [-d invert|normal] [-b] [-e]
Notes:
- flags
-i infile.obj input OBJ file
-o outfile.js output JS file
-m "morphfiles*.obj" morph OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-c "morphcolors*.obj" morph colors OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-a center|centerxz|top|bottom|none model alignment
-s smooth|flat smooth = export vertex normals, flat = no normals (face normals computed in loader)
-t ascii|binary export ascii or binary format (ascii has more features, binary just supports vertices, faces, normals, uvs and materials)
-d invert|normal invert transparency
-b bake material colors into face colors
-x 10.0 scale and truncate
-f 2 morph frame sampling step
- by default:
use smooth shading (if there were vertex normals in the original model)
will be in ASCII format
original model is assumed to use non-inverted transparency / dissolve (0.0 fully transparent, 1.0 fully opaque)
no face colors baking
no scale and truncate
morph frame step = 1 (all files will be processed)
- binary conversion will create two files:
outfile.js (materials)
outfile.bin (binary buffers)
--------------------------------------------------
How to use generated JS file in your HTML document
--------------------------------------------------
<script type="text/javascript" src="Three.js"></script>
...
<script type="text/javascript">
...
// load ascii model
var jsonLoader = new THREE.JSONLoader();
jsonLoader.load( "Model_ascii.js", createScene );
// load binary model
var binLoader = new THREE.BinaryLoader();
binLoader.load( "Model_bin.js", createScene );
function createScene( geometry, materials ) {
var mesh = new THREE.Mesh( geometry, new THREE.MeshFaceMaterial( materials ) );
}
...
</script>
-------------------------------------
Parsers based on formats descriptions
-------------------------------------
http://en.wikipedia.org/wiki/Obj
http://en.wikipedia.org/wiki/Material_Template_Library
-------------------
Current limitations
-------------------
- for the moment, only diffuse color and texture are used
(will need to extend shaders / renderers / materials in Three)
- texture coordinates can be wrong in canvas renderer
(there is crude normalization, but it doesn't
work for all cases)
- smoothing can be turned on/off only for the whole mesh
----------------------------------------------
How to get proper OBJ + MTL files with Blender
----------------------------------------------
0. Remove default cube (press DEL and ENTER)
1. Import / create model
2. Select all meshes (Select -> Select All by Type -> Mesh)
3. Export to OBJ (File -> Export -> Wavefront .obj)
- enable following options in exporter
Material Groups
Rotate X90
Apply Modifiers
High Quality Normals
Copy Images
Selection Only
Objects as OBJ Objects
UVs
Normals
Materials
- select empty folder
- give your exported file name with "obj" extension
- click on "Export OBJ" button
4. Your model is now all files in this folder (OBJ, MTL, number of images)
- this converter assumes all files staying in the same folder,
(OBJ / MTL files use relative paths)
- for WebGL, textures must be power of 2 sized
------
Author
------
AlteredQualia http://alteredqualia.com
"""
import fileinput
import operator
import random
import os.path
import getopt
import sys
import struct
import math
import glob
# #####################################################
# Configuration
# #####################################################
ALIGN = "none" # center centerxz bottom top none
SHADING = "smooth" # smooth flat
TYPE = "ascii" # ascii binary
TRANSPARENCY = "normal" # normal invert
TRUNCATE = False
SCALE = 1.0
FRAMESTEP = 1
BAKE_COLORS = False
# default colors for debugging (each material gets one distinct color):
# white, red, green, blue, yellow, cyan, magenta
COLORS = [0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee, 0xeeee00, 0x00eeee, 0xee00ee]
# #####################################################
# Templates
# #####################################################
TEMPLATE_FILE_ASCII = u"""\
{
"metadata" :
{
"formatVersion" : 3.1,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"colors" : %(ncolor)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"scale" : %(scale)f,
"materials": [%(materials)s],
"vertices": [%(vertices)s],
"morphTargets": [%(morphTargets)s],
"morphColors": [%(morphColors)s],
"normals": [%(normals)s],
"colors": [%(colors)s],
"uvs": [[%(uvs)s]],
"faces": [%(faces)s]
}
"""
TEMPLATE_FILE_BIN = u"""\
{
"metadata" :
{
"formatVersion" : 3.1,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"materials": [%(materials)s],
"buffers": "%(buffers)s"
}
"""
TEMPLATE_VERTEX = "%f,%f,%f"
TEMPLATE_VERTEX_TRUNCATE = "%d,%d,%d"
TEMPLATE_N = "%.5g,%.5g,%.5g"
TEMPLATE_UV = "%.5g,%.5g"
TEMPLATE_COLOR = "%.3g,%.3g,%.3g"
TEMPLATE_COLOR_DEC = "%d"
TEMPLATE_MORPH_VERTICES = '\t{ "name": "%s", "vertices": [%s] }'
TEMPLATE_MORPH_COLORS = '\t{ "name": "%s", "colors": [%s] }'
# #####################################################
# Utils
# #####################################################
def file_exists(filename):
"""Return true if file exists and is accessible for reading.
Should be safer than just testing for existence due to links and
permissions magic on Unix filesystems.
@rtype: boolean
"""
try:
f = open(filename, 'r')
f.close()
return True
except IOError:
return False
def get_name(fname):
"""Create model name based of filename ("path/fname.js" -> "fname").
"""
return os.path.splitext(os.path.basename(fname))[0]
def bbox(vertices):
"""Compute bounding box of vertex array.
"""
if len(vertices)>0:
minx = maxx = vertices[0][0]
miny = maxy = vertices[0][1]
minz = maxz = vertices[0][2]
for v in vertices[1:]:
if v[0]<minx:
minx = v[0]
elif v[0]>maxx:
maxx = v[0]
if v[1]<miny:
miny = v[1]
elif v[1]>maxy:
maxy = v[1]
if v[2]<minz:
minz = v[2]
elif v[2]>maxz:
maxz = v[2]
return { 'x':[minx,maxx], 'y':[miny,maxy], 'z':[minz,maxz] }
else:
return { 'x':[0,0], 'y':[0,0], 'z':[0,0] }
def translate(vertices, t):
"""Translate array of vertices by vector t.
"""
for i in xrange(len(vertices)):
vertices[i][0] += t[0]
vertices[i][1] += t[1]
vertices[i][2] += t[2]
def center(vertices):
"""Center model (middle of bounding box).
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0] + (bb['y'][1] - bb['y'][0])/2.0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def top(vertices):
"""Align top of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][1]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def bottom(vertices):
"""Align bottom of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def centerxz(vertices):
"""Center model around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = 0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def normalize(v):
"""Normalize 3d vector"""
l = math.sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
if l:
v[0] /= l
v[1] /= l
v[2] /= l
def veckey3(v):
return round(v[0], 6), round(v[1], 6), round(v[2], 6)
# #####################################################
# MTL parser
# #####################################################
def texture_relative_path(fullpath):
texture_file = os.path.basename(fullpath.replace("\\", "/"))
return texture_file
def parse_mtl(fname):
"""Parse MTL file.
"""
materials = {}
previous_line = ""
for line in fileinput.input(fname):
line = previous_line + line
if line[-2:-1] == '\\':
previous_line = line[:-2]
continue
previous_line = ""
# Only split once initially for single-parameter tags that might have additional spaces in
# their values (i.e. "newmtl Material with spaces").
chunks = line.split(None, 1)
if len(chunks) > 0:
if len(chunks) > 1:
chunks[1] = chunks[1].strip()
# Material start
# newmtl identifier
if chunks[0] == "newmtl":
if len(chunks) > 1:
identifier = chunks[1]
else:
identifier = ""
if not identifier in materials:
materials[identifier] = {}
# Diffuse texture
# map_Kd texture_diffuse.jpg
if chunks[0] == "map_Kd" and len(chunks) == 2:
materials[identifier]["mapDiffuse"] = texture_relative_path(chunks[1])
# Ambient texture
# map_Ka texture_ambient.jpg
if chunks[0] == "map_Ka" and len(chunks) == 2:
materials[identifier]["mapAmbient"] = texture_relative_path(chunks[1])
# Specular texture
# map_Ks texture_specular.jpg
if chunks[0] == "map_Ks" and len(chunks) == 2:
materials[identifier]["mapSpecular"] = texture_relative_path(chunks[1])
# Alpha texture
# map_d texture_alpha.png
if chunks[0] == "map_d" and len(chunks) == 2:
materials[identifier]["transparent"] = True
materials[identifier]["mapAlpha"] = texture_relative_path(chunks[1])
# Bump texture
# map_bump texture_bump.jpg or bump texture_bump.jpg
if (chunks[0] == "map_bump" or chunks[0] == "bump") and len(chunks) == 2:
materials[identifier]["mapBump"] = texture_relative_path(chunks[1])
# Split the remaining parameters.
if len(chunks) > 1:
chunks = [chunks[0]] + chunks[1].split()
# Diffuse color
# Kd 1.000 1.000 1.000
if chunks[0] == "Kd" and len(chunks) == 4:
materials[identifier]["colorDiffuse"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Ambient color
# Ka 1.000 1.000 1.000
if chunks[0] == "Ka" and len(chunks) == 4:
materials[identifier]["colorAmbient"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular color
# Ks 1.000 1.000 1.000
if chunks[0] == "Ks" and len(chunks) == 4:
materials[identifier]["colorSpecular"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular coefficient
# Ns 154.000
if chunks[0] == "Ns" and len(chunks) == 2:
materials[identifier]["specularCoef"] = float(chunks[1])
# Transparency
# Tr 0.9 or d 0.9
if (chunks[0] == "Tr" or chunks[0] == "d") and len(chunks) == 2:
materials[identifier]["transparent"] = True
if TRANSPARENCY == "invert":
materials[identifier]["transparency"] = 1.0 - float(chunks[1])
else:
materials[identifier]["transparency"] = float(chunks[1])
# Optical density
# Ni 1.0
if chunks[0] == "Ni" and len(chunks) == 2:
materials[identifier]["opticalDensity"] = float(chunks[1])
# Illumination
# illum 2
#
# 0. Color on and Ambient off
# 1. Color on and Ambient on
# 2. Highlight on
# 3. Reflection on and Ray trace on
# 4. Transparency: Glass on, Reflection: Ray trace on
# 5. Reflection: Fresnel on and Ray trace on
# 6. Transparency: Refraction on, Reflection: Fresnel off and Ray trace on
# 7. Transparency: Refraction on, Reflection: Fresnel on and Ray trace on
# 8. Reflection on and Ray trace off
# 9. Transparency: Glass on, Reflection: Ray trace off
# 10. Casts shadows onto invisible surfaces
if chunks[0] == "illum" and len(chunks) == 2:
materials[identifier]["illumination"] = int(chunks[1])
return materials
# #####################################################
# OBJ parser
# #####################################################
def parse_vertex(text):
"""Parse text chunk specifying single vertex.
Possible formats:
vertex index
vertex index / texture index
vertex index / texture index / normal index
vertex index / / normal index
"""
v = 0
t = 0
n = 0
chunks = text.split("/")
v = int(chunks[0])
if len(chunks) > 1:
if chunks[1]:
t = int(chunks[1])
if len(chunks) > 2:
if chunks[2]:
n = int(chunks[2])
return { 'v':v, 't':t, 'n':n }
def parse_obj(fname):
"""Parse OBJ file.
"""
vertices = []
normals = []
uvs = []
faces = []
materials = {}
material = ""
mcounter = 0
mcurrent = 0
mtllib = ""
# current face state
group = 0
object = 0
smooth = 0
previous_line = ""
for line in fileinput.input(fname):
line = previous_line + line
if line[-2:-1] == '\\':
previous_line = line[:-2]
continue
previous_line = ""
# Only split once initially for single-parameter tags that might have additional spaces in
# their values (i.e. "usemtl Material with spaces").
chunks = line.split(None, 1)
if len(chunks) > 0:
if len(chunks) > 1:
chunks[1] = chunks[1].strip()
# Group
if chunks[0] == "g" and len(chunks) == 2:
group = chunks[1]
# Object
if chunks[0] == "o" and len(chunks) == 2:
object = chunks[1]
# Materials definition
if chunks[0] == "mtllib" and len(chunks) == 2:
mtllib = chunks[1]
# Material
if chunks[0] == "usemtl":
if len(chunks) > 1:
material = chunks[1]
else:
material = ""
if not material in materials:
mcurrent = mcounter
materials[material] = mcounter
mcounter += 1
else:
mcurrent = materials[material]
# Split the remaining parameters.
if len(chunks) > 1:
chunks = [chunks[0]] + chunks[1].split()
# Vertices as (x,y,z) coordinates
# v 0.123 0.234 0.345
if chunks[0] == "v" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
vertices.append([x,y,z])
# Normals in (x,y,z) form; normals might not be unit
# vn 0.707 0.000 0.707
if chunks[0] == "vn" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
normals.append([x,y,z])
# Texture coordinates in (u,v[,w]) coordinates, w is optional
# vt 0.500 -1.352 [0.234]
if chunks[0] == "vt" and len(chunks) >= 3:
u = float(chunks[1])
v = float(chunks[2])
w = 0
if len(chunks)>3:
w = float(chunks[3])
uvs.append([u,v,w])
# Face
if chunks[0] == "f" and len(chunks) >= 4:
vertex_index = []
uv_index = []
normal_index = []
# Precompute vert / normal / uv lists
# for negative index lookup
vertlen = len(vertices) + 1
normlen = len(normals) + 1
uvlen = len(uvs) + 1
for v in chunks[1:]:
vertex = parse_vertex(v)
if vertex['v']:
if vertex['v'] < 0:
vertex['v'] += vertlen
vertex_index.append(vertex['v'])
if vertex['t']:
if vertex['t'] < 0:
vertex['t'] += uvlen
uv_index.append(vertex['t'])
if vertex['n']:
if vertex['n'] < 0:
vertex['n'] += normlen
normal_index.append(vertex['n'])
faces.append({
'vertex':vertex_index,
'uv':uv_index,
'normal':normal_index,
'material':mcurrent,
'group':group,
'object':object,
'smooth':smooth,
})
# Smooth shading
if chunks[0] == "s" and len(chunks) == 2:
smooth = chunks[1]
return faces, vertices, uvs, normals, materials, mtllib
# #####################################################
# Generator - faces
# #####################################################
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_face(f, fc):
isTriangle = ( len(f['vertex']) == 3 )
if isTriangle:
nVertices = 3
else:
nVertices = 4
hasMaterial = True # for the moment OBJs without materials get default material
hasFaceUvs = False # not supported in OBJ
hasFaceVertexUvs = ( len(f['uv']) >= nVertices )
hasFaceNormals = False # don't export any face normals (as they are computed in engine)
hasFaceVertexNormals = ( len(f["normal"]) >= nVertices and SHADING == "smooth" )
hasFaceColors = BAKE_COLORS
hasFaceVertexColors = False # not supported in OBJ
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face normal index
# face vertex normals indices
# face color index
# face vertex colors indices
faceData.append(faceType)
# must clamp in case on polygons bigger than quads
for i in xrange(nVertices):
index = f['vertex'][i] - 1
faceData.append(index)
faceData.append( f['material'] )
if hasFaceVertexUvs:
for i in xrange(nVertices):
index = f['uv'][i] - 1
faceData.append(index)
if hasFaceVertexNormals:
for i in xrange(nVertices):
index = f['normal'][i] - 1
faceData.append(index)
if hasFaceColors:
index = fc['material']
faceData.append(index)
return ",".join( map(str, faceData) )
# #####################################################
# Generator - chunks
# #####################################################
def hexcolor(c):
return ( int(c[0] * 255) << 16 ) + ( int(c[1] * 255) << 8 ) + int(c[2] * 255)
def generate_vertex(v, option_vertices_truncate, scale):
if not option_vertices_truncate:
return TEMPLATE_VERTEX % (v[0], v[1], v[2])
else:
return TEMPLATE_VERTEX_TRUNCATE % (scale * v[0], scale * v[1], scale * v[2])
def generate_normal(n):
return TEMPLATE_N % (n[0], n[1], n[2])
def generate_uv(uv):
return TEMPLATE_UV % (uv[0], uv[1])
def generate_color_rgb(c):
return TEMPLATE_COLOR % (c[0], c[1], c[2])
def generate_color_decimal(c):
return TEMPLATE_COLOR_DEC % hexcolor(c)
# #####################################################
# Morphs
# #####################################################
def generate_morph_vertex(name, vertices):
vertex_string = ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices)
return TEMPLATE_MORPH_VERTICES % (name, vertex_string)
def generate_morph_color(name, colors):
color_string = ",".join(generate_color_rgb(c) for c in colors)
return TEMPLATE_MORPH_COLORS % (name, color_string)
def extract_material_colors(materials, mtlfilename, basename):
"""Extract diffuse colors from MTL materials
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
mtlColorArraySrt = []
for m in mtl:
if m in materials:
index = materials[m]
color = mtl[m].get("colorDiffuse", [1,0,0])
mtlColorArraySrt.append([index, color])
mtlColorArraySrt.sort()
mtlColorArray = [x[1] for x in mtlColorArraySrt]
return mtlColorArray
def extract_face_colors(faces, material_colors):
"""Extract colors from materials and assign them to faces
"""
faceColors = []
for face in faces:
material_index = face['material']
faceColors.append(material_colors[material_index])
return faceColors
def generate_morph_targets(morphfiles, n_vertices, infile):
skipOriginalMorph = False
norminfile = os.path.normpath(infile)
morphVertexData = []
for mfilepattern in morphfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
indices = range(0, len(matches), FRAMESTEP)
for i in indices:
path = matches[i]
normpath = os.path.normpath(path)
if normpath != norminfile or not skipOriginalMorph:
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
if n_vertices != n_morph_vertices:
print "WARNING: skipping morph [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices)
else:
if ALIGN == "center":
center(morphVertices)
elif ALIGN == "centerxz":
centerxz(morphVertices)
elif ALIGN == "bottom":
bottom(morphVertices)
elif ALIGN == "top":
top(morphVertices)
morphVertexData.append((get_name(name), morphVertices))
print "adding [%s] with %d vertices" % (name, n_morph_vertices)
morphTargets = ""
if len(morphVertexData):
morphTargets = "\n%s\n\t" % ",\n".join(generate_morph_vertex(name, vertices) for name, vertices in morphVertexData)
return morphTargets
def generate_morph_colors(colorfiles, n_vertices, n_faces):
morphColorData = []
colorFaces = []
materialColors = []
for mfilepattern in colorfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
for path in matches:
normpath = os.path.normpath(path)
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
n_morph_faces = len(morphFaces)
if n_vertices != n_morph_vertices:
print "WARNING: skipping morph color map [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices)
elif n_faces != n_morph_faces:
print "WARNING: skipping morph color map [%s] with different number of faces [%d] than the original model [%d]" % (name, n_morph_faces, n_faces)
else:
morphMaterialColors = extract_material_colors(morphMaterials, morphMtllib, normpath)
morphFaceColors = extract_face_colors(morphFaces, morphMaterialColors)
morphColorData.append((get_name(name), morphFaceColors))
# take first color map for baking into face colors
if len(colorFaces) == 0:
colorFaces = morphFaces
materialColors = morphMaterialColors
print "adding [%s] with %d face colors" % (name, len(morphFaceColors))
morphColors = ""
if len(morphColorData):
morphColors = "\n%s\n\t" % ",\n".join(generate_morph_color(name, colors) for name, colors in morphColorData)
return morphColors, colorFaces, materialColors
# #####################################################
# Materials
# #####################################################
def generate_color(i):
"""Generate hex color corresponding to integer.
Colors should have well defined ordering.
First N colors are hardcoded, then colors are random
(must seed random number generator with deterministic value
before getting colors).
"""
if i < len(COLORS):
#return "0x%06x" % COLORS[i]
return COLORS[i]
else:
#return "0x%06x" % int(0xffffff * random.random())
return int(0xffffff * random.random())
def value2string(v):
if type(v)==str and v[0:2] != "0x":
return '"%s"' % v
elif type(v) == bool:
return str(v).lower()
return str(v)
def generate_materials(mtl, materials):
"""Generate JS array of materials objects
JS material objects are basically prettified one-to-one
mappings of MTL properties in JSON format.
"""
mtl_array = []
for m in mtl:
if m in materials:
index = materials[m]
# add debug information
# materials should be sorted according to how
# they appeared in OBJ file (for the first time)
# this index is identifier used in face definitions
mtl[m]['DbgName'] = m
mtl[m]['DbgIndex'] = index
mtl[m]['DbgColor'] = generate_color(index)
if BAKE_COLORS:
mtl[m]['vertexColors'] = "face"
mtl_raw = ",\n".join(['\t"%s" : %s' % (n, value2string(v)) for n,v in sorted(mtl[m].items())])
mtl_string = "\t{\n%s\n\t}" % mtl_raw
mtl_array.append([index, mtl_string])
return ",\n\n".join([m for i,m in sorted(mtl_array)])
def generate_mtl(materials):
"""Generate dummy materials (if there is no MTL file).
"""
mtl = {}
for m in materials:
index = materials[m]
mtl[m] = {
'DbgName': m,
'DbgIndex': index,
'DbgColor': generate_color(index)
}
return mtl
def generate_materials_string(materials, mtlfilename, basename):
"""Generate final materials string.
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
return generate_materials(mtl, materials)
def create_materials(materials, mtlfilename, basename):
"""Parse MTL file and create mapping between its materials and OBJ materials.
Eventual edge cases are handled here (missing materials, missing MTL file).
"""
random.seed(42) # to get well defined color order for debug colors
# default materials with debug colors for when
# there is no specified MTL / MTL loading failed,
# or if there were no materials / null materials
mtl = generate_mtl(materials)
if mtlfilename:
# create full pathname for MTL (included from OBJ)
path = os.path.dirname(basename)
fname = os.path.join(path, mtlfilename)
if file_exists(fname):
# override default materials with real ones from MTL
# (where they exist, otherwise keep defaults)
mtl.update(parse_mtl(fname))
else:
print "Couldn't find [%s]" % fname
return mtl
# #####################################################
# Faces
# #####################################################
def is_triangle_flat(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_triangle_flat_uv(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==3
def is_triangle_smooth(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_triangle_smooth_uv(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and len(f['uv'])==3
def is_quad_flat(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_quad_flat_uv(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==4
def is_quad_smooth(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_quad_smooth_uv(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and len(f['uv'])==4
def sort_faces(faces):
data = {
'triangles_flat': [],
'triangles_flat_uv': [],
'triangles_smooth': [],
'triangles_smooth_uv': [],
'quads_flat': [],
'quads_flat_uv': [],
'quads_smooth': [],
'quads_smooth_uv': []
}
for f in faces:
if is_triangle_flat(f):
data['triangles_flat'].append(f)
elif is_triangle_flat_uv(f):
data['triangles_flat_uv'].append(f)
elif is_triangle_smooth(f):
data['triangles_smooth'].append(f)
elif is_triangle_smooth_uv(f):
data['triangles_smooth_uv'].append(f)
elif is_quad_flat(f):
data['quads_flat'].append(f)
elif is_quad_flat_uv(f):
data['quads_flat_uv'].append(f)
elif is_quad_smooth(f):
data['quads_smooth'].append(f)
elif is_quad_smooth_uv(f):
data['quads_smooth_uv'].append(f)
return data
# #####################################################
# API - ASCII converter
# #####################################################
def convert_ascii(infile, morphfiles, colorfiles, outfile):
"""Convert infile.obj to outfile.js
Here is where everything happens. If you need to automate conversions,
just import this file as Python module and call this method.
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
# parse OBJ / MTL files
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
n_vertices = len(vertices)
n_faces = len(faces)
# align model
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
# generate normals string
nnormal = 0
normals_string = ""
if SHADING == "smooth":
normals_string = ",".join(generate_normal(n) for n in normals)
nnormal = len(normals)
# extract morph vertices
morphTargets = generate_morph_targets(morphfiles, n_vertices, infile)
# extract morph colors
morphColors, colorFaces, materialColors = generate_morph_colors(colorfiles, n_vertices, n_faces)
# generate colors string
ncolor = 0
colors_string = ""
if len(colorFaces) < len(faces):
colorFaces = faces
materialColors = extract_material_colors(materials, mtllib, infile)
if BAKE_COLORS:
colors_string = ",".join(generate_color_decimal(c) for c in materialColors)
ncolor = len(materialColors)
# generate ascii model string
text = TEMPLATE_FILE_ASCII % {
"name" : get_name(outfile),
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nuv" : len(uvs),
"nnormal" : nnormal,
"ncolor" : ncolor,
"nmaterial" : len(materials),
"materials" : generate_materials_string(materials, mtllib, infile),
"normals" : normals_string,
"colors" : colors_string,
"uvs" : ",".join(generate_uv(uv) for uv in uvs),
"vertices" : ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices),
"morphTargets" : morphTargets,
"morphColors" : morphColors,
"faces" : ",".join(generate_face(f, fc) for f, fc in zip(faces, colorFaces)),
"scale" : SCALE
}
out = open(outfile, "w")
out.write(text)
out.close()
print "%d vertices, %d faces, %d materials" % (len(vertices), len(faces), len(materials))
# #############################################################################
# API - Binary converter
# #############################################################################
def dump_materials_to_buffer(faces, buffer):
for f in faces:
data = struct.pack('<H',
f['material'])
buffer.append(data)
def dump_vertices3_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<III',
vi[0]-1, vi[1]-1, vi[2]-1)
buffer.append(data)
def dump_vertices4_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<IIII',
vi[0]-1, vi[1]-1, vi[2]-1, vi[3]-1)
buffer.append(data)
def dump_normals3_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<III',
ni[0]-1, ni[1]-1, ni[2]-1)
buffer.append(data)
def dump_normals4_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<IIII',
ni[0]-1, ni[1]-1, ni[2]-1, ni[3]-1)
buffer.append(data)
def dump_uvs3_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<III',
ui[0]-1, ui[1]-1, ui[2]-1)
buffer.append(data)
def dump_uvs4_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<IIII',
ui[0]-1, ui[1]-1, ui[2]-1, ui[3]-1)
buffer.append(data)
def add_padding(buffer, n):
if n % 4:
for i in range(4 - n % 4):
data = struct.pack('<B', 0)
buffer.append(data)
def convert_binary(infile, outfile):
"""Convert infile.obj to outfile.js + outfile.bin
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
binfile = get_name(outfile) + ".bin"
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
sfaces = sort_faces(faces)
if SHADING == "smooth":
nnormals = len(normals)
else:
nnormals = 0
# ###################
# generate JS file
# ###################
text = TEMPLATE_FILE_BIN % {
"name" : get_name(outfile),
"materials" : generate_materials_string(materials, mtllib, infile),
"buffers" : binfile,
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nmaterial" : len(materials),
"nnormal" : nnormals,
"nuv" : len(uvs)
}
out = open(outfile, "w")
out.write(text)
out.close()
# ###################
# generate BIN file
# ###################
buffer = []
# header
# ------
header_bytes = struct.calcsize('<12s')
header_bytes += struct.calcsize('<BBBBBBBB')
header_bytes += struct.calcsize('<IIIIIIIIIII')
# signature
signature = struct.pack('<12s', 'Three.js 003')
# metadata (all data is little-endian)
vertex_coordinate_bytes = 4
normal_coordinate_bytes = 1
uv_coordinate_bytes = 4
vertex_index_bytes = 4
normal_index_bytes = 4
uv_index_bytes = 4
material_index_bytes = 2
# header_bytes unsigned char 1
# vertex_coordinate_bytes unsigned char 1
# normal_coordinate_bytes unsigned char 1
# uv_coordinate_bytes unsigned char 1
# vertex_index_bytes unsigned char 1
# normal_index_bytes unsigned char 1
# uv_index_bytes unsigned char 1
# material_index_bytes unsigned char 1
bdata = struct.pack('<BBBBBBBB', header_bytes,
vertex_coordinate_bytes,
normal_coordinate_bytes,
uv_coordinate_bytes,
vertex_index_bytes,
normal_index_bytes,
uv_index_bytes,
material_index_bytes)
ntri_flat = len(sfaces['triangles_flat'])
ntri_smooth = len(sfaces['triangles_smooth'])
ntri_flat_uv = len(sfaces['triangles_flat_uv'])
ntri_smooth_uv = len(sfaces['triangles_smooth_uv'])
nquad_flat = len(sfaces['quads_flat'])
nquad_smooth = len(sfaces['quads_smooth'])
nquad_flat_uv = len(sfaces['quads_flat_uv'])
nquad_smooth_uv = len(sfaces['quads_smooth_uv'])
# nvertices unsigned int 4
# nnormals unsigned int 4
# nuvs unsigned int 4
# ntri_flat unsigned int 4
# ntri_smooth unsigned int 4
# ntri_flat_uv unsigned int 4
# ntri_smooth_uv unsigned int 4
# nquad_flat unsigned int 4
# nquad_smooth unsigned int 4
# nquad_flat_uv unsigned int 4
# nquad_smooth_uv unsigned int 4
ndata = struct.pack('<IIIIIIIIIII', len(vertices),
nnormals,
len(uvs),
ntri_flat,
ntri_smooth,
ntri_flat_uv,
ntri_smooth_uv,
nquad_flat,
nquad_smooth,
nquad_flat_uv,
nquad_smooth_uv)
buffer.append(signature)
buffer.append(bdata)
buffer.append(ndata)
# 1. vertices
# ------------
# x float 4
# y float 4
# z float 4
for v in vertices:
data = struct.pack('<fff', v[0], v[1], v[2])
buffer.append(data)
# 2. normals
# ---------------
# x signed char 1
# y signed char 1
# z signed char 1
if SHADING == "smooth":
for n in normals:
normalize(n)
data = struct.pack('<bbb', math.floor(n[0]*127+0.5),
math.floor(n[1]*127+0.5),
math.floor(n[2]*127+0.5))
buffer.append(data)
add_padding(buffer, nnormals * 3)
# 3. uvs
# -----------
# u float 4
# v float 4
for uv in uvs:
data = struct.pack('<ff', uv[0], uv[1])
buffer.append(data)
# padding
#data = struct.pack('<BB', 0, 0)
#buffer.append(data)
# 4. flat triangles (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# ------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat'], buffer)
add_padding(buffer, ntri_flat * 2)
# 5. smooth triangles (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# -------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# -------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth'], buffer)
add_padding(buffer, ntri_smooth * 2)
# 6. flat triangles uv (vertices + materials + uvs)
# --------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat_uv'], buffer)
add_padding(buffer, ntri_flat_uv * 2)
# 7. smooth triangles uv (vertices + materials + normals + uvs)
# ----------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth_uv'], buffer)
add_padding(buffer, ntri_smooth_uv * 2)
# 8. flat quads (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat'], buffer)
dump_materials_to_buffer(sfaces['quads_flat'], buffer)
add_padding(buffer, nquad_flat * 2)
# 9. smooth quads (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth'], buffer)
add_padding(buffer, nquad_smooth * 2)
# 10. flat quads uv (vertices + materials + uvs)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_flat_uv'], buffer)
add_padding(buffer, nquad_flat_uv * 2)
# 11. smooth quads uv
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth_uv'], buffer)
add_padding(buffer, nquad_smooth_uv * 2)
path = os.path.dirname(outfile)
fname = os.path.join(path, binfile)
out = open(fname, "wb")
out.write("".join(buffer))
out.close()
# #############################################################################
# Helpers
# #############################################################################
def usage():
print "Usage: %s -i filename.obj -o filename.js [-m morphfiles*.obj] [-c morphcolors*.obj] [-a center|top|bottom] [-s flat|smooth] [-t binary|ascii] [-d invert|normal]" % os.path.basename(sys.argv[0])
# #####################################################
# Main
# #####################################################
if __name__ == "__main__":
# get parameters from the command line
try:
opts, args = getopt.getopt(sys.argv[1:], "hbi:m:c:b:o:a:s:t:d:x:f:", ["help", "bakecolors", "input=", "morphs=", "colors=", "output=", "align=", "shading=", "type=", "dissolve=", "truncatescale=", "framestep="])
except getopt.GetoptError:
usage()
sys.exit(2)
infile = outfile = ""
morphfiles = ""
colorfiles = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
infile = a
elif o in ("-m", "--morphs"):
morphfiles = a
elif o in ("-c", "--colors"):
colorfiles = a
elif o in ("-o", "--output"):
outfile = a
elif o in ("-a", "--align"):
if a in ("top", "bottom", "center", "centerxz", "none"):
ALIGN = a
elif o in ("-s", "--shading"):
if a in ("flat", "smooth"):
SHADING = a
elif o in ("-t", "--type"):
if a in ("binary", "ascii"):
TYPE = a
elif o in ("-d", "--dissolve"):
if a in ("normal", "invert"):
TRANSPARENCY = a
elif o in ("-b", "--bakecolors"):
BAKE_COLORS = True
elif o in ("-x", "--truncatescale"):
TRUNCATE = True
SCALE = float(a)
elif o in ("-f", "--framestep"):
FRAMESTEP = int(a)
if infile == "" or outfile == "":
usage()
sys.exit(2)
print "Converting [%s] into [%s] ..." % (infile, outfile)
if morphfiles:
print "Morphs [%s]" % morphfiles
if colorfiles:
print "Colors [%s]" % colorfiles
if TYPE == "ascii":
convert_ascii(infile, morphfiles, colorfiles, outfile)
elif TYPE == "binary":
convert_binary(infile, outfile)
| mit | -9,151,687,110,624,921,000 | -1,803,946,248,818,527,700 | 29.092146 | 219 | 0.518178 | false |
InAnimaTe/CouchPotatoServer | libs/CodernityDB/indexcreator.py | 81 | 25444 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import tokenize
import token
import uuid
class IndexCreatorException(Exception):
def __init__(self, ex, line=None):
self.ex = ex
self.line = line
def __str__(self):
if self.line:
return repr(self.ex + "(in line: %d)" % self.line)
return repr(self.ex)
class IndexCreatorFunctionException(IndexCreatorException):
pass
class IndexCreatorValueException(IndexCreatorException):
pass
class Parser(object):
def __init__(self):
pass
def parse(self, data, name=None):
if not name:
self.name = "_" + uuid.uuid4().hex
else:
self.name = name
self.ind = 0
self.stage = 0
self.logic = ['and', 'or', 'in']
self.logic2 = ['&', '|']
self.allowed_props = {'TreeBasedIndex': ['type', 'name', 'key_format', 'node_capacity', 'pointer_format', 'meta_format'],
'HashIndex': ['type', 'name', 'key_format', 'hash_lim', 'entry_line_format'],
'MultiHashIndex': ['type', 'name', 'key_format', 'hash_lim', 'entry_line_format'],
'MultiTreeBasedIndex': ['type', 'name', 'key_format', 'node_capacity', 'pointer_format', 'meta_format']
}
self.funcs = {'md5': (['md5'], ['.digest()']),
'len': (['len'], []),
'str': (['str'], []),
'fix_r': (['self.fix_r'], []),
'prefix': (['self.prefix'], []),
'infix': (['self.infix'], []),
'suffix': (['self.suffix'], [])
}
self.handle_int_imports = {'infix': "from itertools import izip\n"}
self.funcs_with_body = {'fix_r':
(""" def fix_r(self,s,l):
e = len(s)
if e == l:
return s
elif e > l:
return s[:l]
else:
return s.rjust(l,'_')\n""", False),
'prefix':
(""" def prefix(self,s,m,l,f):
t = len(s)
if m < 1:
m = 1
o = set()
if t > l:
s = s[:l]
t = l
while m <= t:
o.add(s.rjust(f,'_'))
s = s[:-1]
t -= 1
return o\n""", False),
'suffix':
(""" def suffix(self,s,m,l,f):
t = len(s)
if m < 1:
m = 1
o = set()
if t > l:
s = s[t-l:]
t = len(s)
while m <= t:
o.add(s.rjust(f,'_'))
s = s[1:]
t -= 1
return o\n""", False),
'infix':
(""" def infix(self,s,m,l,f):
t = len(s)
o = set()
for x in xrange(m - 1, l):
t = (s, )
for y in xrange(0, x):
t += (s[y + 1:],)
o.update(set(''.join(x).rjust(f, '_').lower() for x in izip(*t)))
return o\n""", False)}
self.none = ['None', 'none', 'null']
self.props_assign = ['=', ':']
self.all_adj_num_comp = {token.NUMBER: (
token.NUMBER, token.NAME, '-', '('),
token.NAME: (token.NUMBER, token.NAME, '-', '('),
')': (token.NUMBER, token.NAME, '-', '(')
}
self.all_adj_num_op = {token.NUMBER: (token.NUMBER, token.NAME, '('),
token.NAME: (token.NUMBER, token.NAME, '('),
')': (token.NUMBER, token.NAME, '(')
}
self.allowed_adjacent = {
"<=": self.all_adj_num_comp,
">=": self.all_adj_num_comp,
">": self.all_adj_num_comp,
"<": self.all_adj_num_comp,
"==": {token.NUMBER: (token.NUMBER, token.NAME, '('),
token.NAME: (token.NUMBER, token.NAME, token.STRING, '('),
token.STRING: (token.NAME, token.STRING, '('),
')': (token.NUMBER, token.NAME, token.STRING, '('),
']': (token.NUMBER, token.NAME, token.STRING, '(')
},
"+": {token.NUMBER: (token.NUMBER, token.NAME, '('),
token.NAME: (token.NUMBER, token.NAME, token.STRING, '('),
token.STRING: (token.NAME, token.STRING, '('),
')': (token.NUMBER, token.NAME, token.STRING, '('),
']': (token.NUMBER, token.NAME, token.STRING, '(')
},
"-": {token.NUMBER: (token.NUMBER, token.NAME, '('),
token.NAME: (token.NUMBER, token.NAME, '('),
')': (token.NUMBER, token.NAME, '('),
'<': (token.NUMBER, token.NAME, '('),
'>': (token.NUMBER, token.NAME, '('),
'<=': (token.NUMBER, token.NAME, '('),
'>=': (token.NUMBER, token.NAME, '('),
'==': (token.NUMBER, token.NAME, '('),
']': (token.NUMBER, token.NAME, '(')
},
"*": self.all_adj_num_op,
"/": self.all_adj_num_op,
"%": self.all_adj_num_op,
",": {token.NUMBER: (token.NUMBER, token.NAME, token.STRING, '{', '[', '('),
token.NAME: (token.NUMBER, token.NAME, token.STRING, '(', '{', '['),
token.STRING: (token.NAME, token.STRING, token.NUMBER, '(', '{', '['),
')': (token.NUMBER, token.NAME, token.STRING, '(', '{', '['),
']': (token.NUMBER, token.NAME, token.STRING, '(', '{', '['),
'}': (token.NUMBER, token.NAME, token.STRING, '(', '{', '[')
}
}
def is_num(s):
m = re.search('[^0-9*()+\-\s/]+', s)
return not m
def is_string(s):
m = re.search('\s*(?P<a>[\'\"]+).*?(?P=a)\s*', s)
return m
data = re.split('make_key_value\:', data)
if len(data) < 2:
raise IndexCreatorFunctionException(
"Couldn't find a definition of make_key_value function!\n")
spl1 = re.split('make_key\:', data[0])
spl2 = re.split('make_key\:', data[1])
self.funcs_rev = False
if len(spl1) > 1:
data = [spl1[0]] + [data[1]] + [spl1[1]]
self.funcs_rev = True
elif len(spl2) > 1:
data = [data[0]] + spl2
else:
data.append("key")
if data[1] == re.search('\s*', data[1], re.S | re.M).group(0):
raise IndexCreatorFunctionException("Empty function body ",
len(re.split('\n', data[0])) + (len(re.split('\n', data[2])) if self.funcs_rev else 1) - 1)
if data[2] == re.search('\s*', data[2], re.S | re.M).group(0):
raise IndexCreatorFunctionException("Empty function body ",
len(re.split('\n', data[0])) + (1 if self.funcs_rev else len(re.split('\n', data[1]))) - 1)
if data[0] == re.search('\s*', data[0], re.S | re.M).group(0):
raise IndexCreatorValueException("You didn't set any properity or you set them not at the begining of the code\n")
data = [re.split(
'\n', data[0]), re.split('\n', data[1]), re.split('\n', data[2])]
self.cnt_lines = (len(data[0]), len(data[1]), len(data[2]))
ind = 0
self.predata = data
self.data = [[], [], []]
for i, v in enumerate(self.predata[0]):
for k, w in enumerate(self.predata[0][i]):
if self.predata[0][i][k] in self.props_assign:
if not is_num(self.predata[0][i][k + 1:]) and self.predata[0][i].strip()[:4] != 'type' and self.predata[0][i].strip()[:4] != 'name':
s = self.predata[0][i][k + 1:]
self.predata[0][i] = self.predata[0][i][:k + 1]
m = re.search('\s+', s.strip())
if not is_string(s) and not m:
s = "'" + s.strip() + "'"
self.predata[0][i] += s
break
for n, i in enumerate(self.predata):
for k in i:
k = k.strip()
if k:
self.data[ind].append(k)
self.check_enclosures(k, n)
ind += 1
return self.parse_ex()
def readline(self, stage):
def foo():
if len(self.data[stage]) <= self.ind:
self.ind = 0
return ""
else:
self.ind += 1
return self.data[stage][self.ind - 1]
return foo
def add(self, l, i):
def add_aux(*args):
# print args,self.ind
if len(l[i]) < self.ind:
l[i].append([])
l[i][self.ind - 1].append(args)
return add_aux
def parse_ex(self):
self.index_name = ""
self.index_type = ""
self.curLine = -1
self.con = -1
self.brackets = -1
self.curFunc = None
self.colons = 0
self.line_cons = ([], [], [])
self.pre_tokens = ([], [], [])
self.known_dicts_in_mkv = []
self.prop_name = True
self.prop_assign = False
self.is_one_arg_enough = False
self.funcs_stack = []
self.last_line = [-1, -1, -1]
self.props_set = []
self.custom_header = set()
self.tokens = []
self.tokens_head = ['# %s\n' % self.name, 'class %s(' % self.name, '):\n', ' def __init__(self, *args, **kwargs): ']
for i in xrange(3):
tokenize.tokenize(self.readline(i), self.add(self.pre_tokens, i))
# tokenize treats some keyword not in the right way, thats why we
# have to change some of them
for nk, k in enumerate(self.pre_tokens[i]):
for na, a in enumerate(k):
if a[0] == token.NAME and a[1] in self.logic:
self.pre_tokens[i][nk][
na] = (token.OP, a[1], a[2], a[3], a[4])
for i in self.pre_tokens[1]:
self.line_cons[1].append(self.check_colons(i, 1))
self.check_adjacents(i, 1)
if self.check_for_2nd_arg(i) == -1 and not self.is_one_arg_enough:
raise IndexCreatorValueException("No 2nd value to return (did u forget about ',None'?", self.cnt_line_nr(i[0][4], 1))
self.is_one_arg_enough = False
for i in self.pre_tokens[2]:
self.line_cons[2].append(self.check_colons(i, 2))
self.check_adjacents(i, 2)
for i in self.pre_tokens[0]:
self.handle_prop_line(i)
self.cur_brackets = 0
self.tokens += ['\n super(%s, self).__init__(*args, **kwargs)\n def make_key_value(self, data): ' % self.name]
for i in self.pre_tokens[1]:
for k in i:
self.handle_make_value(*k)
self.curLine = -1
self.con = -1
self.cur_brackets = 0
self.tokens += ['\n def make_key(self, key):']
for i in self.pre_tokens[2]:
for k in i:
self.handle_make_key(*k)
if self.index_type == "":
raise IndexCreatorValueException("Missing index type definition\n")
if self.index_name == "":
raise IndexCreatorValueException("Missing index name\n")
self.tokens_head[0] = "# " + self.index_name + "\n" + \
self.tokens_head[0]
for i in self.funcs_with_body:
if self.funcs_with_body[i][1]:
self.tokens_head.insert(4, self.funcs_with_body[i][0])
if None in self.custom_header:
self.custom_header.remove(None)
if self.custom_header:
s = ' custom_header = """'
for i in self.custom_header:
s += i
s += '"""\n'
self.tokens_head.insert(4, s)
if self.index_type in self.allowed_props:
for i in self.props_set:
if i not in self.allowed_props[self.index_type]:
raise IndexCreatorValueException("Properity %s is not allowed for index type: %s" % (i, self.index_type))
# print "".join(self.tokens_head)
# print "----------"
# print (" ".join(self.tokens))
return "".join(self.custom_header), "".join(self.tokens_head) + (" ".join(self.tokens))
# has to be run BEFORE tokenize
def check_enclosures(self, d, st):
encs = []
contr = {'(': ')', '{': '}', '[': ']', "'": "'", '"': '"'}
ends = [')', '}', ']', "'", '"']
for i in d:
if len(encs) > 0 and encs[-1] in ['"', "'"]:
if encs[-1] == i:
del encs[-1]
elif i in contr:
encs += [i]
elif i in ends:
if len(encs) < 1 or contr[encs[-1]] != i:
raise IndexCreatorValueException("Missing opening enclosure for \'%s\'" % i, self.cnt_line_nr(d, st))
del encs[-1]
if len(encs) > 0:
raise IndexCreatorValueException("Missing closing enclosure for \'%s\'" % encs[0], self.cnt_line_nr(d, st))
def check_adjacents(self, d, st):
def std_check(d, n):
if n == 0:
prev = -1
else:
prev = d[n - 1][1] if d[n - 1][0] == token.OP else d[n - 1][0]
cur = d[n][1] if d[n][0] == token.OP else d[n][0]
# there always is an endmarker at the end, but this is a precaution
if n + 2 > len(d):
nex = -1
else:
nex = d[n + 1][1] if d[n + 1][0] == token.OP else d[n + 1][0]
if prev not in self.allowed_adjacent[cur]:
raise IndexCreatorValueException("Wrong left value of the %s" % cur, self.cnt_line_nr(line, st))
# there is an assumption that whole data always ends with 0 marker, the idea prolly needs a rewritting to allow more whitespaces
# between tokens, so it will be handled anyway
elif nex not in self.allowed_adjacent[cur][prev]:
raise IndexCreatorValueException("Wrong right value of the %s" % cur, self.cnt_line_nr(line, st))
for n, (t, i, _, _, line) in enumerate(d):
if t == token.NAME or t == token.STRING:
if n + 1 < len(d) and d[n + 1][0] in [token.NAME, token.STRING]:
raise IndexCreatorValueException("Did you forget about an operator in between?", self.cnt_line_nr(line, st))
elif i in self.allowed_adjacent:
std_check(d, n)
def check_colons(self, d, st):
cnt = 0
br = 0
def check_ret_args_nr(a, s):
c_b_cnt = 0
s_b_cnt = 0
n_b_cnt = 0
comas_cnt = 0
for _, i, _, _, line in a:
if c_b_cnt == n_b_cnt == s_b_cnt == 0:
if i == ',':
comas_cnt += 1
if (s == 1 and comas_cnt > 1) or (s == 2 and comas_cnt > 0):
raise IndexCreatorFunctionException("Too much arguments to return", self.cnt_line_nr(line, st))
if s == 0 and comas_cnt > 0:
raise IndexCreatorValueException("A coma here doesn't make any sense", self.cnt_line_nr(line, st))
elif i == ':':
if s == 0:
raise IndexCreatorValueException("A colon here doesn't make any sense", self.cnt_line_nr(line, st))
raise IndexCreatorFunctionException("Two colons don't make any sense", self.cnt_line_nr(line, st))
if i == '{':
c_b_cnt += 1
elif i == '}':
c_b_cnt -= 1
elif i == '(':
n_b_cnt += 1
elif i == ')':
n_b_cnt -= 1
elif i == '[':
s_b_cnt += 1
elif i == ']':
s_b_cnt -= 1
def check_if_empty(a):
for i in a:
if i not in [token.NEWLINE, token.INDENT, token.ENDMARKER]:
return False
return True
if st == 0:
check_ret_args_nr(d, st)
return
for n, i in enumerate(d):
if i[1] == ':':
if br == 0:
if len(d) < n or check_if_empty(d[n + 1:]):
raise IndexCreatorValueException(
"Empty return value", self.cnt_line_nr(i[4], st))
elif len(d) >= n:
check_ret_args_nr(d[n + 1:], st)
return cnt
else:
cnt += 1
elif i[1] == '{':
br += 1
elif i[1] == '}':
br -= 1
check_ret_args_nr(d, st)
return -1
def check_for_2nd_arg(self, d):
c_b_cnt = 0 # curly brackets counter '{}'
s_b_cnt = 0 # square brackets counter '[]'
n_b_cnt = 0 # normal brackets counter '()'
def check_2nd_arg(d, ind):
d = d[ind[0]:]
for t, i, (n, r), _, line in d:
if i == '{' or i is None:
return 0
elif t == token.NAME:
self.known_dicts_in_mkv.append((i, (n, r)))
return 0
elif t == token.STRING or t == token.NUMBER:
raise IndexCreatorValueException("Second return value of make_key_value function has to be a dictionary!", self.cnt_line_nr(line, 1))
for ind in enumerate(d):
t, i, _, _, _ = ind[1]
if s_b_cnt == n_b_cnt == c_b_cnt == 0:
if i == ',':
return check_2nd_arg(d, ind)
elif (t == token.NAME and i not in self.funcs) or i == '{':
self.is_one_arg_enough = True
if i == '{':
c_b_cnt += 1
self.is_one_arg_enough = True
elif i == '}':
c_b_cnt -= 1
elif i == '(':
n_b_cnt += 1
elif i == ')':
n_b_cnt -= 1
elif i == '[':
s_b_cnt += 1
elif i == ']':
s_b_cnt -= 1
return -1
def cnt_line_nr(self, l, stage):
nr = -1
for n, i in enumerate(self.predata[stage]):
# print i,"|||",i.strip(),"|||",l
if l == i.strip():
nr = n
if nr == -1:
return -1
if stage == 0:
return nr + 1
elif stage == 1:
return nr + self.cnt_lines[0] + (self.cnt_lines[2] - 1 if self.funcs_rev else 0)
elif stage == 2:
return nr + self.cnt_lines[0] + (self.cnt_lines[1] - 1 if not self.funcs_rev else 0)
return -1
def handle_prop_line(self, d):
d_len = len(d)
if d[d_len - 1][0] == token.ENDMARKER:
d_len -= 1
if d_len < 3:
raise IndexCreatorValueException("Can't handle properity assingment ", self.cnt_line_nr(d[0][4], 0))
if not d[1][1] in self.props_assign:
raise IndexCreatorValueException(
"Did you forget : or =?", self.cnt_line_nr(d[0][4], 0))
if d[0][0] == token.NAME or d[0][0] == token.STRING:
if d[0][1] in self.props_set:
raise IndexCreatorValueException("Properity %s is set more than once" % d[0][1], self.cnt_line_nr(d[0][4], 0))
self.props_set += [d[0][1]]
if d[0][1] == "type" or d[0][1] == "name":
t, tk, _, _, line = d[2]
if d_len > 3:
raise IndexCreatorValueException(
"Wrong value to assign", self.cnt_line_nr(line, 0))
if t == token.STRING:
m = re.search('\s*(?P<a>[\'\"]+)(.*?)(?P=a)\s*', tk)
if m:
tk = m.groups()[1]
elif t != token.NAME:
raise IndexCreatorValueException(
"Wrong value to assign", self.cnt_line_nr(line, 0))
if d[0][1] == "type":
if d[2][1] == "TreeBasedIndex":
self.custom_header.add("from CodernityDB.tree_index import TreeBasedIndex\n")
elif d[2][1] == "MultiTreeBasedIndex":
self.custom_header.add("from CodernityDB.tree_index import MultiTreeBasedIndex\n")
elif d[2][1] == "MultiHashIndex":
self.custom_header.add("from CodernityDB.hash_index import MultiHashIndex\n")
self.tokens_head.insert(2, tk)
self.index_type = tk
else:
self.index_name = tk
return
else:
self.tokens += ['\n kwargs["' + d[0][1] + '"]']
else:
raise IndexCreatorValueException("Can't handle properity assingment ", self.cnt_line_nr(d[0][4], 0))
self.tokens += ['=']
self.check_adjacents(d[2:], 0)
self.check_colons(d[2:], 0)
for i in d[2:]:
self.tokens += [i[1]]
def generate_func(self, t, tk, pos_start, pos_end, line, hdata, stage):
if self.last_line[stage] != -1 and pos_start[0] > self.last_line[stage] and line != '':
raise IndexCreatorFunctionException("This line will never be executed!", self.cnt_line_nr(line, stage))
if t == 0:
return
if pos_start[1] == 0:
if self.line_cons[stage][pos_start[0] - 1] == -1:
self.tokens += ['\n return']
self.last_line[stage] = pos_start[0]
else:
self.tokens += ['\n if']
elif tk == ':' and self.line_cons[stage][pos_start[0] - 1] > -1:
if self.line_cons[stage][pos_start[0] - 1] == 0:
self.tokens += [':\n return']
return
self.line_cons[stage][pos_start[0] - 1] -= 1
if tk in self.logic2:
# print tk
if line[pos_start[1] - 1] != tk and line[pos_start[1] + 1] != tk:
self.tokens += [tk]
if line[pos_start[1] - 1] != tk and line[pos_start[1] + 1] == tk:
if tk == '&':
self.tokens += ['and']
else:
self.tokens += ['or']
return
if self.brackets != 0:
def search_through_known_dicts(a):
for i, (n, r) in self.known_dicts_in_mkv:
if i == tk and r > pos_start[1] and n == pos_start[0] and hdata == 'data':
return True
return False
if t == token.NAME and len(self.funcs_stack) > 0 and self.funcs_stack[-1][0] == 'md5' and search_through_known_dicts(tk):
raise IndexCreatorValueException("Second value returned by make_key_value for sure isn't a dictionary ", self.cnt_line_nr(line, 1))
if tk == ')':
self.cur_brackets -= 1
if len(self.funcs_stack) > 0 and self.cur_brackets == self.funcs_stack[-1][1]:
self.tokens += [tk]
self.tokens += self.funcs[self.funcs_stack[-1][0]][1]
del self.funcs_stack[-1]
return
if tk == '(':
self.cur_brackets += 1
if tk in self.none:
self.tokens += ['None']
return
if t == token.NAME and tk not in self.logic and tk != hdata:
if tk not in self.funcs:
self.tokens += [hdata + '["' + tk + '"]']
else:
self.tokens += self.funcs[tk][0]
if tk in self.funcs_with_body:
self.funcs_with_body[tk] = (
self.funcs_with_body[tk][0], True)
self.custom_header.add(self.handle_int_imports.get(tk))
self.funcs_stack += [(tk, self.cur_brackets)]
else:
self.tokens += [tk]
def handle_make_value(self, t, tk, pos_start, pos_end, line):
self.generate_func(t, tk, pos_start, pos_end, line, 'data', 1)
def handle_make_key(self, t, tk, pos_start, pos_end, line):
self.generate_func(t, tk, pos_start, pos_end, line, 'key', 2)
| gpl-3.0 | -3,512,247,217,473,797,600 | 6,094,498,583,429,444,000 | 38.448062 | 153 | 0.449379 | false |
rahul003/mxnet | plugin/opencv/opencv.py | 61 | 6214 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=too-many-arguments,no-member,invalid-name
"""Opencv plugin for mxnet"""
import random
import ctypes
import cv2
import mxnet as mx
from mxnet.base import _LIB
from mxnet.base import mx_uint, NDArrayHandle, check_call
def imdecode(str_img, flag=1):
"""Decode image from str buffer.
Wrapper for cv2.imdecode that uses mx.nd.NDArray
Parameters
----------
str_img : str
str buffer read from image file
flag : int
same as flag for cv2.imdecode
Returns
-------
img : NDArray
decoded image in (width, height, channels)
with BGR color channel order
"""
hdl = NDArrayHandle()
check_call(_LIB.MXCVImdecode(ctypes.c_char_p(str_img),
mx_uint(len(str_img)),
flag, ctypes.byref(hdl)))
return mx.nd.NDArray(hdl)
def resize(src, size, interpolation=cv2.INTER_LINEAR):
"""Decode image from str buffer.
Wrapper for cv2.imresize that uses mx.nd.NDArray
Parameters
----------
src : NDArray
image in (width, height, channels)
size : tuple
target size in (width, height)
interpolation : int
same as interpolation for cv2.imresize
Returns
-------
img : NDArray
resized image
"""
hdl = NDArrayHandle()
check_call(_LIB.MXCVResize(src.handle, mx_uint(size[0]), mx_uint(size[1]),
interpolation, ctypes.byref(hdl)))
return mx.nd.NDArray(hdl)
def copyMakeBorder(src, top, bot, left, right, border_type=cv2.BORDER_CONSTANT, value=0):
"""Pad image border
Wrapper for cv2.copyMakeBorder that uses mx.nd.NDArray
Parameters
----------
src : NDArray
Image in (width, height, channels).
Others are the same with cv2.copyMakeBorder
Returns
-------
img : NDArray
padded image
"""
hdl = NDArrayHandle()
check_call(_LIB.MXCVcopyMakeBorder(src.handle, ctypes.c_int(top), ctypes.c_int(bot),
ctypes.c_int(left), ctypes.c_int(right),
ctypes.c_int(border_type), ctypes.c_double(value),
ctypes.byref(hdl)))
return mx.nd.NDArray(hdl)
def scale_down(src_size, size):
"""Scale down crop size if it's bigger than image size"""
w, h = size
sw, sh = src_size
if sh < h:
w, h = float(w*sh)/h, sh
if sw < w:
w, h = sw, float(h*sw)/w
return int(w), int(h)
def fixed_crop(src, x0, y0, w, h, size=None, interpolation=cv2.INTER_CUBIC):
"""Crop src at fixed location, and (optionally) resize it to size"""
out = mx.nd.crop(src, begin=(y0, x0, 0), end=(y0+h, x0+w, int(src.shape[2])))
if size is not None and (w, h) != size:
out = resize(out, size, interpolation=interpolation)
return out
def random_crop(src, size):
"""Randomly crop src with size. Upsample result if src is smaller than size"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size)
return out, (x0, y0, new_w, new_h)
def color_normalize(src, mean, std):
"""Normalize src with mean and std"""
src -= mean
src /= std
return src
def random_size_crop(src, size, min_area=0.25, ratio=(3.0/4.0, 4.0/3.0)):
"""Randomly crop src with size. Randomize area and aspect ratio"""
h, w, _ = src.shape
area = w*h
for _ in range(10):
new_area = random.uniform(min_area, 1.0) * area
new_ratio = random.uniform(*ratio)
new_w = int(new_area*new_ratio)
new_h = int(new_area/new_ratio)
if random.uniform(0., 1.) < 0.5:
new_w, new_h = new_h, new_w
if new_w > w or new_h > h:
continue
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size)
return out, (x0, y0, new_w, new_h)
return random_crop(src, size)
class ImageListIter(mx.io.DataIter):
"""An example image iterator using opencv plugin"""
def __init__(self, root, flist, batch_size, size, mean=None):
mx.io.DataIter.__init__(self)
self.root = root
self.list = [line.strip() for line in open(flist).readlines()]
self.cur = 0
self.batch_size = batch_size
self.size = size
if mean is not None:
self.mean = mx.nd.array(mean)
else:
self.mean = None
def reset(self):
"""Reset iterator position to 0"""
self.cur = 0
def next(self):
"""Move iterator position forward"""
batch = mx.nd.zeros((self.batch_size, self.size[1], self.size[0], 3))
i = self.cur
for i in range(self.cur, min(len(self.list), self.cur+self.batch_size)):
str_img = open(self.root+self.list[i]+'.jpg').read()
img = imdecode(str_img, 1)
img, _ = random_crop(img, self.size)
batch[i - self.cur] = img
batch = mx.nd.transpose(batch, axes=(0, 3, 1, 2))
ret = mx.io.DataBatch(data=[batch],
label=[],
pad=self.batch_size-(i-self.cur),
index=None)
self.cur = i
return ret
| apache-2.0 | -9,100,350,060,475,671,000 | -3,053,159,368,159,204,000 | 31.364583 | 89 | 0.588671 | false |
openmv/micropython | tests/basics/string_format_error.py | 62 | 1415 | # tests for errors in {} format string
try:
'{0:0}'.format('zzz')
except (ValueError):
print('ValueError')
try:
'{1:}'.format(1)
except IndexError:
print('IndexError')
try:
'}'.format('zzzz')
except ValueError:
print('ValueError')
# end of format parsing conversion specifier
try:
'{!'.format('a')
except ValueError:
print('ValueError')
# unknown conversion specifier
try:
'abc{!d}'.format('1')
except ValueError:
print('ValueError')
try:
'{abc'.format('zzzz')
except ValueError:
print('ValueError')
# expected ':' after specifier
try:
'{!s :}'.format(2)
except ValueError:
print('ValueError')
try:
'{}{0}'.format(1, 2)
except ValueError:
print('ValueError')
try:
'{1:}'.format(1)
except IndexError:
print('IndexError')
try:
'{ 0 :*^10}'.format(12)
except KeyError:
print('KeyError')
try:
'{0}{}'.format(1)
except ValueError:
print('ValueError')
try:
'{}{}'.format(1)
except IndexError:
print('IndexError')
try:
'{0:+s}'.format('1')
except ValueError:
print('ValueError')
try:
'{0:+c}'.format(1)
except ValueError:
print('ValueError')
try:
'{0:s}'.format(1)
except ValueError:
print('ValueError')
try:
'{:*"1"}'.format('zz')
except ValueError:
print('ValueError')
# unknown format code for str arg
try:
'{:X}'.format('zz')
except ValueError:
print('ValueError')
| mit | 1,138,570,337,376,434,000 | -692,875,356,169,992,300 | 14.722222 | 44 | 0.614134 | false |
jezdez/kuma | vendor/packages/pygments/lexers/_lua_builtins.py | 43 | 6965 | # -*- coding: utf-8 -*-
"""
pygments.lexers._lua_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names and modules of lua functions
It is able to re-generate itself, but for adding new functions you
probably have to add some callbacks (see function module_callbacks).
Do not edit the MODULES dict by hand.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
MODULES = {'basic': ('_G',
'_VERSION',
'assert',
'collectgarbage',
'dofile',
'error',
'getfenv',
'getmetatable',
'ipairs',
'load',
'loadfile',
'loadstring',
'next',
'pairs',
'pcall',
'print',
'rawequal',
'rawget',
'rawset',
'select',
'setfenv',
'setmetatable',
'tonumber',
'tostring',
'type',
'unpack',
'xpcall'),
'coroutine': ('coroutine.create',
'coroutine.resume',
'coroutine.running',
'coroutine.status',
'coroutine.wrap',
'coroutine.yield'),
'debug': ('debug.debug',
'debug.getfenv',
'debug.gethook',
'debug.getinfo',
'debug.getlocal',
'debug.getmetatable',
'debug.getregistry',
'debug.getupvalue',
'debug.setfenv',
'debug.sethook',
'debug.setlocal',
'debug.setmetatable',
'debug.setupvalue',
'debug.traceback'),
'io': ('io.close',
'io.flush',
'io.input',
'io.lines',
'io.open',
'io.output',
'io.popen',
'io.read',
'io.tmpfile',
'io.type',
'io.write'),
'math': ('math.abs',
'math.acos',
'math.asin',
'math.atan2',
'math.atan',
'math.ceil',
'math.cosh',
'math.cos',
'math.deg',
'math.exp',
'math.floor',
'math.fmod',
'math.frexp',
'math.huge',
'math.ldexp',
'math.log10',
'math.log',
'math.max',
'math.min',
'math.modf',
'math.pi',
'math.pow',
'math.rad',
'math.random',
'math.randomseed',
'math.sinh',
'math.sin',
'math.sqrt',
'math.tanh',
'math.tan'),
'modules': ('module',
'require',
'package.cpath',
'package.loaded',
'package.loadlib',
'package.path',
'package.preload',
'package.seeall'),
'os': ('os.clock',
'os.date',
'os.difftime',
'os.execute',
'os.exit',
'os.getenv',
'os.remove',
'os.rename',
'os.setlocale',
'os.time',
'os.tmpname'),
'string': ('string.byte',
'string.char',
'string.dump',
'string.find',
'string.format',
'string.gmatch',
'string.gsub',
'string.len',
'string.lower',
'string.match',
'string.rep',
'string.reverse',
'string.sub',
'string.upper'),
'table': ('table.concat',
'table.insert',
'table.maxn',
'table.remove',
'table.sort')}
if __name__ == '__main__': # pragma: no cover
import re
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
import pprint
# you can't generally find out what module a function belongs to if you
# have only its name. Because of this, here are some callback functions
# that recognize if a gioven function belongs to a specific module
def module_callbacks():
def is_in_coroutine_module(name):
return name.startswith('coroutine.')
def is_in_modules_module(name):
if name in ['require', 'module'] or name.startswith('package'):
return True
else:
return False
def is_in_string_module(name):
return name.startswith('string.')
def is_in_table_module(name):
return name.startswith('table.')
def is_in_math_module(name):
return name.startswith('math')
def is_in_io_module(name):
return name.startswith('io.')
def is_in_os_module(name):
return name.startswith('os.')
def is_in_debug_module(name):
return name.startswith('debug.')
return {'coroutine': is_in_coroutine_module,
'modules': is_in_modules_module,
'string': is_in_string_module,
'table': is_in_table_module,
'math': is_in_math_module,
'io': is_in_io_module,
'os': is_in_os_module,
'debug': is_in_debug_module}
def get_newest_version():
f = urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">Lua \1</A>')
for line in f:
m = r.match(line)
if m is not None:
return m.groups()[0]
def get_lua_functions(version):
f = urlopen('http://www.lua.org/manual/%s/' % version)
r = re.compile(r'^<A HREF="manual.html#pdf-(.+)">\1</A>')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def get_function_module(name):
for mod, cb in module_callbacks().items():
if cb(name):
return mod
if '.' in name:
return name.split('.')[0]
else:
return 'basic'
def regenerate(filename, modules):
with open(filename) as fp:
content = fp.read()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
with open(filename, 'w') as fp:
fp.write(header)
fp.write('MODULES = %s\n\n' % pprint.pformat(modules))
fp.write(footer)
def run():
version = get_newest_version()
print('> Downloading function index for Lua %s' % version)
functions = get_lua_functions(version)
print('> %d functions found:' % len(functions))
modules = {}
for full_function_name in functions:
print('>> %s' % full_function_name)
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
regenerate(__file__, modules)
run()
| mpl-2.0 | 1,283,139,893,290,277,400 | -8,603,112,021,419,145,000 | 26.749004 | 75 | 0.476813 | false |
DukeOfHazard/crits | crits/core/views.py | 7 | 83579 | import datetime
import json
import logging
from bson import json_util
from dateutil.parser import parse
from time import gmtime, strftime
from django.conf import settings
from django.contrib.auth.decorators import user_passes_test
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.template.loader import render_to_string
from crits.actors.actor import ActorThreatIdentifier
from crits.actors.forms import AddActorForm, AddActorIdentifierTypeForm
from crits.actors.forms import AddActorIdentifierForm, AttributeIdentifierForm
from crits.backdoors.forms import AddBackdoorForm
from crits.campaigns.campaign import Campaign
from crits.campaigns.forms import AddCampaignForm, CampaignForm
from crits.certificates.forms import UploadCertificateForm
from crits.comments.forms import AddCommentForm, InlineCommentForm
from crits.config.config import CRITsConfig
from crits.core.data_tools import json_handler
from crits.core.forms import SourceAccessForm, AddSourceForm, AddUserRoleForm
from crits.core.forms import SourceForm, DownloadFileForm, AddReleasabilityForm
from crits.core.forms import TicketForm
from crits.core.handlers import add_releasability, add_releasability_instance
from crits.core.handlers import remove_releasability, remove_releasability_instance
from crits.core.handlers import add_new_source, generate_counts_jtable
from crits.core.handlers import source_add_update, source_remove, source_remove_all
from crits.core.handlers import modify_bucket_list, promote_bucket_list
from crits.core.handlers import download_object_handler, unflatten
from crits.core.handlers import modify_sector_list
from crits.core.handlers import generate_bucket_jtable, generate_bucket_csv
from crits.core.handlers import generate_sector_jtable, generate_sector_csv
from crits.core.handlers import generate_dashboard, generate_global_search
from crits.core.handlers import login_user, reset_user_password
from crits.core.handlers import generate_user_profile, generate_user_preference
from crits.core.handlers import modify_source_access, get_bucket_autocomplete
from crits.core.handlers import dns_timeline, email_timeline, indicator_timeline
from crits.core.handlers import generate_users_jtable, generate_items_jtable
from crits.core.handlers import toggle_item_state, download_grid_file
from crits.core.handlers import get_data_for_item, generate_audit_jtable
from crits.core.handlers import details_from_id, status_update
from crits.core.handlers import get_favorites, favorite_update
from crits.core.handlers import generate_favorites_jtable
from crits.core.handlers import ticket_add, ticket_update, ticket_remove
from crits.core.handlers import description_update
from crits.core.source_access import SourceAccess
from crits.core.user import CRITsUser
from crits.core.user_role import UserRole
from crits.core.user_tools import user_can_view_data, is_admin, user_sources
from crits.core.user_tools import user_is_admin, get_user_list, get_nav_template
from crits.core.user_tools import get_user_role, get_user_email_notification
from crits.core.user_tools import get_user_info, get_user_organization
from crits.core.user_tools import is_user_subscribed, unsubscribe_user
from crits.core.user_tools import subscribe_user, subscribe_to_source
from crits.core.user_tools import unsubscribe_from_source, is_user_subscribed_to_source
from crits.core.user_tools import add_new_user_role, change_user_password, toggle_active
from crits.core.user_tools import save_user_secret
from crits.core.user_tools import toggle_user_preference, update_user_preference
from crits.core.user_tools import get_api_key_by_name, create_api_key_by_name
from crits.core.user_tools import revoke_api_key_by_name, make_default_api_key_by_name
from crits.core.class_mapper import class_from_id
from crits.domains.forms import TLDUpdateForm, AddDomainForm
from crits.emails.forms import EmailUploadForm, EmailEMLForm, EmailYAMLForm, EmailRawUploadForm, EmailOutlookForm
from crits.events.forms import EventForm
from crits.exploits.forms import AddExploitForm
from crits.indicators.forms import UploadIndicatorCSVForm, UploadIndicatorTextForm
from crits.indicators.forms import UploadIndicatorForm, NewIndicatorActionForm
from crits.indicators.indicator import IndicatorAction
from crits.ips.forms import AddIPForm
from crits.locations.forms import AddLocationForm
from crits.notifications.handlers import get_user_notifications
from crits.notifications.handlers import remove_user_from_notification
from crits.notifications.handlers import remove_user_notifications
from crits.objects.forms import AddObjectForm
from crits.pcaps.forms import UploadPcapForm
from crits.raw_data.forms import UploadRawDataFileForm, UploadRawDataForm
from crits.raw_data.forms import NewRawDataTypeForm
from crits.raw_data.raw_data import RawDataType
from crits.relationships.forms import ForgeRelationshipForm
from crits.samples.forms import UploadFileForm
from crits.screenshots.forms import AddScreenshotForm
from crits.targets.forms import TargetInfoForm
from crits.vocabulary.sectors import Sectors
logger = logging.getLogger(__name__)
@user_passes_test(user_can_view_data)
def update_object_description(request):
"""
Toggle favorite in a user profile.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
type_ = request.POST['type']
id_ = request.POST['id']
description = request.POST['description']
analyst = request.user.username
return HttpResponse(json.dumps(description_update(type_,
id_,
description,
analyst)),
mimetype="application/json")
else:
return render_to_response("error.html",
{"error" : 'Expected AJAX POST.'},
RequestContext(request))
@user_passes_test(user_can_view_data)
def toggle_favorite(request):
"""
Toggle favorite in a user profile.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
type_ = request.POST['type']
id_ = request.POST['id']
analyst = request.user.username
return HttpResponse(json.dumps(favorite_update(type_,
id_,
analyst)),
mimetype="application/json")
else:
return render_to_response("error.html",
{"error" : 'Expected AJAX POST.'},
RequestContext(request))
@user_passes_test(user_can_view_data)
def favorites(request):
"""
Get favorites for a user.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
analyst = request.user.username
return HttpResponse(json.dumps(get_favorites(analyst)),
mimetype="application/json")
else:
return render_to_response("error.html",
{"error" : 'Expected AJAX POST.'},
RequestContext(request))
@user_passes_test(user_can_view_data)
def favorites_list(request, ctype=None, option=None):
"""
Get favorites for a user for jtable.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
return generate_favorites_jtable(request, ctype, option)
@user_passes_test(user_can_view_data)
def get_dialog(request):
"""
Get a specific dialog for rendering in the UI.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
dialog = request.GET.get('dialog', '')
# Regex in urls.py doesn't seem to be working, should sanity check dialog
return render_to_response(dialog + ".html",
{"error" : 'Dialog not found'},
RequestContext(request))
@user_passes_test(user_can_view_data)
def update_status(request, type_, id_):
"""
Update the status of a top-level object. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param type_: The top-level object to update.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
value = request.POST['value']
analyst = request.user.username
return HttpResponse(json.dumps(status_update(type_,
id_,
value,
analyst)),
mimetype="application/json")
else:
return render_to_response("error.html",
{"error" : 'Expected AJAX POST.'},
RequestContext(request))
@user_passes_test(user_can_view_data)
def get_item_data(request):
"""
Get basic data for an item. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
item_id = request.POST.get('id','')
item_type = request.POST.get('type','')
# Right now we pass the id/type for the data we want
# If we write a function that doesn't pass these values,
# then grab them from the cookies
if not item_id:
item_id = request.COOKIES.get('crits_rel_id','')
if not item_type:
item_type = request.COOKIES.get('crits_rel_type','')
response = get_data_for_item(item_type, item_id)
return HttpResponse(json.dumps(response, default=json_handler),
content_type="application/json")
@user_passes_test(user_can_view_data)
def global_search_listing(request):
"""
Return results for a global search.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
# For object searches
if 'q' not in request.GET:
return render_to_response("error.html",
{"error" : 'No valid search criteria'},
RequestContext(request))
args = generate_global_search(request)
# If we matched a single ObjectID
if 'url' in args:
return redirect(args['url'], args['key'])
# For all other searches
if 'Result' in args and args['Result'] == "ERROR":
return render_to_response("error.html",
{"error": args['Message']},
RequestContext(request))
return render_to_response("search_listing.html",
args,
RequestContext(request))
def about(request):
"""
Return the About page.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
return render_to_response('about.html',
{},
RequestContext(request))
def help(request):
"""
Return the Help page.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
return render_to_response('help.html',
{},
RequestContext(request))
# Mongo Auth
def login(request):
"""
Authenticate a user.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
# Gather basic request information
crits_config = CRITsConfig.objects().first()
url = request.GET.get('next')
user_agent = request.META.get('HTTP_USER_AGENT', '')
remote_addr = request.META.get('REMOTE_ADDR', '')
accept_language = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
next_url = request.REQUEST.get('next', None)
# Setup defaults
username = None
login = True
show_auth = True
message = crits_config.crits_message
token_message = """
<b>If you are not using TOTP or not sure what TOTP is,<br />leave the Token field empty.</b><br />
If you are setting up TOTP for the first time, please enter a PIN above.<br />
If you are already setup with TOTP, please enter your PIN + Key above."""
response = {}
# Check for remote user being enabled and check for user
if crits_config.remote_user:
show_auth = False
username = request.META.get(settings.REMOTE_USER_META,None)
if username:
resp = login_user(username, None, next_url, user_agent,
remote_addr, accept_language, request,
totp_pass=None)
if resp['success']:
return HttpResponseRedirect(resp['message'])
else:
# Login failed, set messages/settings and continue
message = resp['message']
login = False
if resp['type'] == "totp_required":
login = True
else:
logger.warn("REMOTE_USER enabled, but no user passed.")
message = 'REMOTE_USER not provided. Please notify an admin.'
return render_to_response('login.html',
{'next': url,
'theme': 'default',
'login': False,
'show_auth': False,
'message': message,
'token_message': token_message},
RequestContext(request))
# Attempt authentication
if request.method == 'POST' and request.is_ajax():
next_url = request.POST.get('next_url', None)
# Get username from form if this is not Remote User
if not crits_config.remote_user:
username = request.POST.get('username', None)
# Even if it is remote user, try to get password.
# Remote user will not have one so we pass None.
password = request.POST.get('password', None)
# TOTP can still be required for Remote Users
totp_pass = request.POST.get('totp_pass', None)
if (not username or
(not totp_pass and crits_config.totp_web == 'Required')):
response['success'] = False
response['message'] = 'Unknown user or bad password.'
return HttpResponse(json.dumps(response),
mimetype="application/json")
#This casues auth failures with LDAP and upper case name parts
#username = username.lower()
# login_user will handle the following cases:
# - User logging in with no TOTP enabled.
# - User logging in with TOTP enabled.
# - User logging in and setting up TOTP for the first time.
# It should return the string to use for setting up their
# authenticator and then prompt the user to submit pin + token.
resp = login_user(username, password, next_url, user_agent,
remote_addr, accept_language, request,
totp_pass=totp_pass)
return HttpResponse(json.dumps(resp), mimetype="application/json")
# Display template for authentication
return render_to_response('login.html',
{'next': url,
'theme': 'default',
'login': login,
'show_auth': show_auth,
'message': message,
'token_message': token_message},
RequestContext(request))
def reset_password(request):
"""
Reset a user password.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
action = request.POST.get('action', None)
username = request.POST.get('username', None)
email = request.POST.get('email', None)
submitted_rcode = request.POST.get('reset_code', None)
new_p = request.POST.get('new_p', None)
new_p_c = request.POST.get('new_p_c', None)
analyst = request.user.username
return reset_user_password(username=username,
action=action,
email=email,
submitted_rcode=submitted_rcode,
new_p=new_p,
new_p_c=new_p_c,
analyst=analyst)
return render_to_response('login.html',
{'reset': True},
RequestContext(request))
@user_passes_test(user_can_view_data)
def profile(request, user=None):
"""
Render the User Profile page.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param username: The user to render the profile page for.
:type username: str
:returns: :class:`django.http.HttpResponse`
"""
if user and is_admin(request.user.username):
username = user
else:
username = request.user.username
args = generate_user_profile(username,request)
if 'status'in args and args['status'] == "ERROR":
return render_to_response('error.html',
{'data': request,
'error': "Invalid request"},
RequestContext(request))
return render_to_response('profile.html',
args,
RequestContext(request))
@user_passes_test(user_can_view_data)
def dashboard(request):
"""
Render the Dashboard.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
return generate_dashboard(request)
@user_passes_test(user_can_view_data)
def counts_listing(request,option=None):
"""
Render the Counts jtable.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
return generate_counts_jtable(request, option)
@user_passes_test(user_can_view_data)
def source_releasability(request):
"""
Modify a top-level object's releasability. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
type_ = request.POST.get('type', None)
id_ = request.POST.get('id', None)
name = request.POST.get('name', None)
action = request.POST.get('action', None)
date = request.POST.get('date', datetime.datetime.now())
if not isinstance(date, datetime.datetime):
date = parse(date, fuzzy=True)
user = str(request.user.username)
if not type_ or not id_ or not name or not action:
error = "Modifying releasability requires a type, id, source, and action"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
if action == "add":
result = add_releasability(type_, id_, name, user)
elif action == "add_instance":
result = add_releasability_instance(type_, id_, name, user)
elif action == "remove":
result = remove_releasability(type_, id_, name, user)
elif action == "remove_instance":
result = remove_releasability_instance(type_, id_, name, date, user)
else:
error = "Unknown releasability action: %s" % action
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
if result['success']:
subscription = {
'type': type_,
'id': id_
}
html = render_to_string('releasability_header_widget.html',
{'releasability': result['obj'],
'subscription': subscription},
RequestContext(request))
response = {'success': result['success'],
'html': html}
else:
response = {'success': result['success'],
'error': result['message']}
return HttpResponse(json.dumps(response),
mimetype="application/json")
else:
error = "Expected AJAX POST!"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
def source_access(request):
"""
Modify a user's profile. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if not is_admin(request.user.username):
error = "You do not have permission to use this feature!"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
if request.method == 'POST' and request.is_ajax():
form = SourceAccessForm(request.POST)
if form.is_valid():
data = form.cleaned_data
result = modify_source_access(request.user.username,
data)
if result['success']:
message = '<div>User modified successfully!</div>'
result['message'] = message
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
return HttpResponse(json.dumps({'form':form.as_table()}),
mimetype="application/json")
else:
error = "Expected AJAX POST!"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_is_admin)
def source_add(request):
"""
Add a source to CRITs. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
source_form = AddSourceForm(request.POST)
analyst = request.user.username
if source_form.is_valid():
result = add_new_source(source_form.cleaned_data['source'],
analyst)
if result:
msg = ('<div>Source added successfully! Add this source to '
'users to utilize it.</div>')
message = {'message': msg,
'success': True}
else:
message = {'message': '<div>Source addition failed!</div>', 'success':
False}
else:
message = {'success': False,
'form': source_form.as_table()}
return HttpResponse(json.dumps(message),
mimetype="application/json")
return render_to_response("error.html",
{"error" : 'Expected AJAX POST' },
RequestContext(request))
@user_passes_test(user_is_admin)
def user_role_add(request):
"""
Add a user role to CRITs. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
role_form = AddUserRoleForm(request.POST)
analyst = request.user.username
if role_form.is_valid() and is_admin(request.user.username):
result = add_new_user_role(role_form.cleaned_data['role'],
analyst)
if result:
message = {'message': '<div>User role added successfully!</div>',
'success': True}
else:
message = {'message': '<div>User role addition failed!</div>',
'success': False}
else:
message = {'success': False,
'form': role_form.as_table()}
return HttpResponse(json.dumps(message),
mimetype="application/json")
return render_to_response("error.html",
{"error" : 'Expected AJAX POST'},
RequestContext(request))
@user_passes_test(user_can_view_data)
def add_update_source(request, method, obj_type, obj_id):
"""
Add/Update a source for a top-level object. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param method: Whether this is an "add" or "update".
:type method: str
:param obj_type: The type of top-level object.
:type obj_type: str
:param obj_id: The ObjectId to search for.
:type obj_id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
form = SourceForm(request.user.username, request.POST)
if form.is_valid():
data = form.cleaned_data
analyst = request.user.username
# check to see that this user can already see the object
if (data['name'] in user_sources(analyst)):
if method == "add":
date = datetime.datetime.now()
else:
date = datetime.datetime.strptime(data['date'],
settings.PY_DATETIME_FORMAT)
result = source_add_update(obj_type,
obj_id,
method,
data['name'],
method=data['method'],
reference=data['reference'],
date=date,
analyst=analyst)
if 'object' in result:
if method == "add":
result['header'] = result['object'].name
result['data_field'] = 'name'
result['html'] = render_to_string('sources_header_widget.html',
{'source': result['object'],
'obj_type': obj_type,
'obj_id': obj_id},
RequestContext(request))
else:
result['html'] = render_to_string('sources_row_widget.html',
{'source': result['object'],
'instance': result['instance'],
'obj_type': obj_type,
'obj_id': obj_id},
RequestContext(request))
return HttpResponse(json.dumps(result,
default=json_handler),
mimetype='application/json')
else:
return HttpResponse(json.dumps({'success': False,
'form': form.as_table()}),
mimetype='application/json')
else:
return HttpResponse(json.dumps({'success': False,
'form':form.as_table()}),
mimetype='application/json')
return HttpResponse({})
@user_passes_test(user_can_view_data)
def remove_source(request, obj_type, obj_id):
"""
Remove a source from a top-level object. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param obj_type: The type of top-level object.
:type obj_type: str
:param obj_id: The ObjectId to search for.
:type obj_id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
if is_admin(request.user.username):
date = datetime.datetime.strptime(request.POST['key'],
settings.PY_DATETIME_FORMAT)
name = request.POST['name']
result = source_remove(obj_type,
obj_id,
name,
date,
'%s' % request.user.username)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "You do not have permission to remove this item"
return render_to_response("error.html",
{'error': error},
RequestContext(request))
return HttpResponse({})
@user_passes_test(user_can_view_data)
def remove_all_source(request, obj_type, obj_id):
"""
Remove all sources from a top-level object. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param obj_type: The type of top-level object.
:type obj_type: str
:param obj_id: The ObjectId to search for.
:type obj_id: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
if is_admin(request.user.username):
name = request.POST['key']
result = source_remove_all(obj_type,
obj_id,
name, '%s' % request.user.username)
result['last'] = True
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "You do not have permission to remove this item"
return render_to_response("error.html",
{'error': error},
RequestContext(request))
return HttpResponse({})
@user_passes_test(user_can_view_data)
def bucket_promote(request):
"""
Promote a bucket to a Campaign. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
bucket = request.GET.get("name", None)
if not bucket:
return render_to_response("error.html",
{'error': 'Need a bucket.'},
RequestContext(request))
form = CampaignForm(request.POST)
if form.is_valid():
analyst = request.user.username
confidence = form.cleaned_data['confidence']
name = form.cleaned_data['name']
related = form.cleaned_data['related']
description = form.cleaned_data['description']
result = promote_bucket_list(bucket,
confidence,
name,
related,
description,
analyst)
return HttpResponse(json.dumps(result), mimetype="application/json")
@user_passes_test(user_can_view_data)
def bucket_modify(request):
"""
Modify a bucket list for a top-level object. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
tags = request.POST['tags'].split(",")
oid = request.POST['oid']
itype = request.POST['itype']
modify_bucket_list(itype, oid, tags, request.user.username)
return HttpResponse({})
@user_passes_test(user_can_view_data)
def bucket_list(request, option=None):
"""
Generate the jtable data for rendering in the list template.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
if option == "csv":
return generate_bucket_csv(request)
return generate_bucket_jtable(request, option)
@user_passes_test(user_can_view_data)
def download_object(request):
"""
Download a top-level object.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method != "POST":
return render_to_response("error.html",
{"error" : "Expecting POST."},
RequestContext(request))
form = DownloadFileForm(request.POST)
if form.is_valid():
total_limit = form.cleaned_data['total_limit']
depth_limit = form.cleaned_data['depth_limit']
rel_limit = form.cleaned_data['rel_limit']
bin_fmt = form.cleaned_data['bin_fmt']
rst_fmt = form.cleaned_data['rst_fmt']
objects = form.cleaned_data['objects']
obj_type = form.cleaned_data['obj_type']
obj_id = form.cleaned_data['obj_id']
crits_config = CRITsConfig.objects().first()
total_max = getattr(crits_config, 'total_max', settings.TOTAL_MAX)
depth_max = getattr(crits_config, 'depth_max', settings.DEPTH_MAX)
rel_max = getattr(crits_config, 'rel_max', settings.REL_MAX)
try:
total_limit = int(total_limit)
depth_limit = int(depth_limit)
rel_limit = int(rel_limit)
if total_limit < 0 or depth_limit < 0 or rel_limit < 0:
raise
except:
return render_to_response("error.html",
{"error" : "Limits must be positive integers."},
RequestContext(request))
# Don't exceed the configured maximums. This is done in the view
# so that scripts can enforce their own limmits.
if total_limit > total_max:
total_limit = total_max
if depth_limit > depth_max:
depth_limit = depth_max
if rel_limit > rel_max:
rel_limit = rel_max
sources = user_sources(request.user.username)
if not sources:
return render_to_response("error.html",
{"error" : "No matching data."},
RequestContext(request))
result = download_object_handler(total_limit,
depth_limit,
rel_limit,
rst_fmt,
bin_fmt,
objects,
[(obj_type, obj_id)],
sources)
if not result['success']:
return render_to_response("error.html",
{"error" : "No matching data."},
RequestContext(request))
response = HttpResponse()
response['mimetype'] = result['mimetype']
response['Content-Disposition'] = 'attachment; filename=%s' % result['filename']
response.write(result['data'])
return response
else:
return render_to_response("error.html",
{"error" : "Invalid form."},
RequestContext(request))
@user_passes_test(user_can_view_data)
def timeline(request, data_type="dns"):
"""
Render the timeline.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param data_type: The type of data to include in the timeline.
:type data_type: str
:returns: :class:`django.http.HttpResponse`
"""
format = request.GET.get("format", "none")
analyst = request.user.username
sources = user_sources(analyst)
query = {}
params = {}
if request.GET.get("campaign"):
query["campaign.name"] = request.GET.get("campaign")
params["campaign"] = query["campaign.name"]
if request.GET.get("backdoor"):
query["backdoor.name"] = request.GET.get("backdoor")
params["backdoor"] = query["backdoor.name"]
query["source.name"] = {"$in": sources}
page_title = data_type
if format == "json":
timeglider = []
tline = {}
tline['id'] = "tline"
tline['focus_date'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
tline['initial_zoom'] = "20"
tline['timezone'] = strftime("%z", gmtime())
events = []
# DNS data
if data_type == "dns":
tline['title'] = "DNS"
events = dns_timeline(query, analyst, sources)
# Email data
elif data_type == "email":
tline['title'] = "Emails"
events = email_timeline(query, analyst, sources)
# Indicator data
elif data_type == "indicator":
tline['title'] = "Indicators"
tline['initial_zoom'] = "14"
events = indicator_timeline(query, analyst, sources)
tline['events'] = events
timeglider.append(tline)
return HttpResponse(json.dumps(timeglider,
default=json_util.default),
mimetype="application/json")
else:
return render_to_response('timeline.html',
{'data_type': data_type,
'params': json.dumps(params),
'page_title': page_title},
RequestContext(request))
def base_context(request):
"""
Set of common content to include in the Response so it is always available
to every template on every page. This is included in settings.py in the
TEMPLATE_CONTEXT_PROCESSORS.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: dict
"""
crits_config = CRITsConfig.objects().first()
base_context = {}
classification = getattr(crits_config,
'classification',
settings.CLASSIFICATION)
instance_name = getattr(crits_config,
'instance_name',
settings.INSTANCE_NAME)
company_name = getattr(crits_config,
'company_name',
settings.COMPANY_NAME)
crits_version = settings.CRITS_VERSION
enable_toasts = getattr(crits_config,
'enable_toasts',
settings.ENABLE_TOASTS)
git_branch = getattr(crits_config,
'git_branch',
settings.GIT_BRANCH)
git_hash = getattr(crits_config,
'git_hash',
settings.GIT_HASH)
git_hash_long = getattr(crits_config,
'git_hash_long',
settings.GIT_HASH_LONG)
git_repo_url = getattr(crits_config,
'git_repo_url',
settings.GIT_REPO_URL)
hide_git_hash = getattr(crits_config,
'hide_git_hash',
settings.HIDE_GIT_HASH)
splunk_url = getattr(crits_config,
'splunk_search_url',
settings.SPLUNK_SEARCH_URL)
secure_cookie = getattr(crits_config,
'secure_cookie',
settings.SECURE_COOKIE)
mongo_database = settings.MONGO_DATABASE
base_context['crits_config'] = crits_config
base_context['current_datetime'] = datetime.datetime.now()
base_context['classification'] = classification.upper()
base_context['instance_name'] = instance_name
base_context['company_name'] = company_name
base_context['crits_version'] = crits_version
base_context['enable_toasts'] = enable_toasts
if git_repo_url:
base_context['git_repo_link'] = "<a href='"+git_repo_url+"/commit/"+git_hash_long+"'>"+git_branch+':'+git_hash+"</a>"
else:
base_context['git_repo_link'] = "%s:%s" % (git_branch, git_hash)
base_context['hide_git_hash'] = hide_git_hash
base_context['splunk_search_url'] = splunk_url
base_context['mongo_database'] = mongo_database
base_context['secure_cookie'] = secure_cookie
base_context['service_nav_templates'] = settings.SERVICE_NAV_TEMPLATES
base_context['service_cp_templates'] = settings.SERVICE_CP_TEMPLATES
base_context['service_tab_templates'] = settings.SERVICE_TAB_TEMPLATES
if request.user.is_authenticated():
user = request.user.username
# Forms that don't require a user
base_context['add_indicator_action'] = NewIndicatorActionForm()
base_context['add_target'] = TargetInfoForm()
base_context['campaign_add'] = AddCampaignForm()
base_context['comment_add'] = AddCommentForm()
base_context['inline_comment_add'] = InlineCommentForm()
base_context['campaign_form'] = CampaignForm()
base_context['location_add'] = AddLocationForm()
base_context['add_raw_data_type'] = NewRawDataTypeForm()
base_context['relationship_form'] = ForgeRelationshipForm()
base_context['source_access'] = SourceAccessForm()
base_context['upload_tlds'] = TLDUpdateForm()
base_context['user_role_add'] = AddUserRoleForm()
base_context['new_ticket'] = TicketForm(initial={'date': datetime.datetime.now()})
base_context['add_actor_identifier_type'] = AddActorIdentifierTypeForm()
base_context['attribute_actor_identifier'] = AttributeIdentifierForm()
# Forms that require a user
try:
base_context['actor_add'] = AddActorForm(user)
except Exception, e:
logger.warning("Base Context AddActorForm Error: %s" % e)
try:
base_context['add_actor_identifier'] = AddActorIdentifierForm(user)
except Exception, e:
logger.warning("Base Context AddActorIdentifierForm Error: %s" % e)
try:
base_context['backdoor_add'] = AddBackdoorForm(user)
except Exception, e:
logger.warning("Base Context AddBackdoorForm Error: %s" % e)
try:
base_context['exploit_add'] = AddExploitForm(user)
except Exception, e:
logger.warning("Base Context AddExploitForm Error: %s" % e)
try:
base_context['add_domain'] = AddDomainForm(user)
except Exception, e:
logger.warning("Base Context AddDomainForm Error: %s" % e)
try:
base_context['ip_form'] = AddIPForm(user, None)
except Exception, e:
logger.warning("Base Context AddIPForm Error: %s" % e)
try:
base_context['source_add'] = SourceForm(user,
initial={'analyst': user})
except Exception, e:
logger.warning("Base Context SourceForm Error: %s" % e)
try:
base_context['upload_cert'] = UploadCertificateForm(user)
except Exception, e:
logger.warning("Base Context UploadCertificateForm Error: %s" % e)
try:
base_context['upload_csv'] = UploadIndicatorCSVForm(user)
except Exception, e:
logger.warning("Base Context UploadIndicatorCSVForm Error: %s" % e)
try:
base_context['upload_email_outlook'] = EmailOutlookForm(user)
except Exception, e:
logger.warning("Base Context EmailOutlookForm Error: %s" % e)
try:
base_context['upload_email_eml'] = EmailEMLForm(user)
except Exception, e:
logger.warning("Base Context EmailEMLForm Error: %s" % e)
try:
base_context['upload_email_fields'] = EmailUploadForm(user)
except Exception, e:
logger.warning("Base Context EmailUploadForm Error: %s" % e)
try:
base_context['upload_email_yaml'] = EmailYAMLForm(user)
except Exception, e:
logger.warning("Base Context EmailYAMLForm Error: %s" % e)
try:
base_context['upload_email_raw'] = EmailRawUploadForm(user)
except Exception, e:
logger.warning("Base Context EmailRawUploadForm Error: %s" % e)
try:
base_context['upload_event'] = EventForm(user)
except Exception, e:
logger.warning("Base Context EventForm Error: %s" % e)
try:
base_context['upload_ind'] = UploadIndicatorForm(user)
except Exception, e:
logger.warning("Base Context UploadIndicatorForm Error: %s" % e)
try:
base_context['upload_pcap'] = UploadPcapForm(user)
except Exception, e:
logger.warning("Base Context UploadPcapForm Error: %s" % e)
try:
base_context['upload_text'] = UploadIndicatorTextForm(user)
except Exception, e:
logger.warning("Base Context UploadIndicatorTextForm Error: %s" % e)
try:
base_context['upload_sample'] = UploadFileForm(user)
except Exception, e:
logger.warning("Base Context UploadFileForm Error: %s" % e)
try:
base_context['object_form'] = AddObjectForm(user, None)
except Exception, e:
logger.warning("Base Context AddObjectForm Error: %s" % e)
try:
base_context['releasability_form'] = AddReleasabilityForm(user)
except Exception, e:
logger.warning("Base Context AddReleasabilityForm Error: %s" % e)
try:
base_context['screenshots_form'] = AddScreenshotForm(user)
except Exception, e:
logger.warning("Base Context AddScreenshotForm Error: %s" % e)
try:
base_context['upload_raw_data'] = UploadRawDataForm(user)
except Exception, e:
logger.warning("Base Context UploadRawDataForm Error: %s" % e)
try:
base_context['upload_raw_data_file'] = UploadRawDataFileForm(user)
except Exception, e:
logger.warning("Base Context UploadRawDataFileForm Error: %s" % e)
# Other info acquired from functions
try:
base_context['user_list'] = get_user_list()
except Exception, e:
logger.warning("Base Context get_user_list Error: %s" % e)
try:
base_context['email_notifications'] = get_user_email_notification(user)
except Exception, e:
logger.warning("Base Context get_user_email_notification Error: %s" % e)
try:
base_context['user_notifications'] = get_user_notifications(user,
count=True)
except Exception, e:
logger.warning("Base Context get_user_notifications Error: %s" % e)
try:
base_context['user_organization'] = get_user_organization(user)
except Exception, e:
logger.warning("Base Context get_user_organization Error: %s" % e)
try:
base_context['user_role'] = get_user_role(user)
except Exception, e:
logger.warning("Base Context get_user_role Error: %s" % e)
try:
base_context['user_source_list'] = user_sources(user)
except Exception, e:
logger.warning("Base Context user_sources Error: %s" % e)
nav_template = get_nav_template(request.user.prefs.nav)
if nav_template != None:
base_context['nav_template'] = nav_template
base_context['newer_notifications_location'] = request.user.prefs.toast_notifications.get('newer_notifications_location', 'top')
base_context['initial_notifications_display'] = request.user.prefs.toast_notifications.get('initial_notifications_display', 'show')
base_context['max_visible_notifications'] = request.user.prefs.toast_notifications.get('max_visible_notifications', 5)
base_context['notification_anchor_location'] = request.user.prefs.toast_notifications.get('notification_anchor_location', 'bottom_right')
base_context['nav_config'] = {'text_color': request.user.prefs.nav.get('text_color'),
'background_color': request.user.prefs.nav.get('background_color'),
'hover_text_color': request.user.prefs.nav.get('hover_text_color'),
'hover_background_color': request.user.prefs.nav.get('hover_background_color')}
if is_admin(request.user.username):
try:
base_context['source_create'] = AddSourceForm()
except Exception, e:
logger.warning("Base Context AddSourceForm Error: %s" % e)
base_context['category_list'] = [
{'collection': '', 'name': ''},
{'collection': settings.COL_BACKDOORS,
'name': 'Backdoors'},
{'collection': settings.COL_CAMPAIGNS,
'name': 'Campaigns'},
{'collection': settings.COL_EVENT_TYPES,
'name': 'Event Types'},
{'collection': settings.COL_IDB_ACTIONS,
'name': 'Indicator Actions'},
{'collection': settings.COL_INTERNAL_LOCATIONS,
'name': 'Internal Locations'},
{'collection': settings.COL_OBJECT_TYPES,
'name': 'Object Types'},
{'collection': settings.COL_RAW_DATA_TYPES,
'name': 'Raw Data Types'},
{'collection': settings.COL_RELATIONSHIP_TYPES,
'name': 'Relationship Types'},
{'collection': settings.COL_SOURCE_ACCESS,
'name': 'Sources'},
{'collection': settings.COL_USER_ROLES,
'name': 'User Roles'}
]
return base_context
@user_passes_test(user_can_view_data)
def user_context(request):
"""
Set of common content about the user to include in the Response so it is
always available to every template on every page. This is included in
settings.py in the TEMPLATE_CONTEXT_PROCESSORS.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: dict
"""
context = {}
try:
context['admin'] = is_admin(request.user.username)
except:
context['admin'] = False
# Get user theme
user = CRITsUser.objects(username=request.user.username).first()
context['theme'] = user.get_preference('ui', 'theme', 'default')
favorite_count = 0
favorites = user.favorites.to_dict()
for favorite in favorites.values():
favorite_count += len(favorite)
context['user_favorites'] = user.favorites.to_json()
context['favorite_count'] = favorite_count
return context
@user_passes_test(user_can_view_data)
def get_user_source_list(request):
"""
Get a user's source list. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
user_source_access = user_sources('%s' % request.user.username)
message = {'success': True,
'data': user_source_access}
return HttpResponse(json.dumps(message),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_is_admin)
def user_source_access(request, username=None):
"""
Get a user's source access list. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param username: The user to get the sources for.
:type username: str.
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
if not username:
username = request.POST.get('username', None)
user = get_user_info(username)
if user:
user = user.to_dict()
if 'sources' not in user:
user['sources'] = ''
else:
user = {'username': '',
'sources': '',
'organization': settings.COMPANY_NAME}
form = SourceAccessForm(initial=user)
message = {'success': True,
'message': form.as_table()}
return HttpResponse(json.dumps(message),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def user_preference_toggle(request, section, setting):
"""
Toggle a preference for a user. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param section: The preferences section to toggle.
:type section: str.
:param setting: The setting to toggle.
:type setting: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
pref = generate_user_preference(request, section, 'toggle', setting)
if not pref or 'toggle' not in pref:
error = "Unexpected Preference Toggle Received in AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
result = toggle_user_preference(request.user.username, section, setting, is_enabled=pref.get('enabled'))
if result['success']:
result["message"] = "(Saved)"
if result['state']:
result["text"] = "Enabled"
result["title"]= "Click to Disable"
else:
result["text"] = "Disabled"
result["title"]= "Click to Enable"
if 'reload' in pref:
result["reload"] = pref['reload']
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def user_preference_update(request, section):
"""
Update a user preference. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param section: The preferences section to toggle.
:type section: str.
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
result = {}
pref = generate_user_preference(request,section)
if not pref or 'formclass' not in pref or not callable(pref['formclass']):
error = "Unexpected Form Received in AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
form = (pref['formclass'])(request, request.POST)
if form.is_valid():
data = form.cleaned_data
# Incoming attributes may be flattened, e.g.
# option.one.sub.key = value
# So this will unflatten them into a option: {one: a} dict
values = unflatten( data )
result = update_user_preference(request.user.username, section, values)
result['values'] = values
if result['success']:
result["message"] = "(Saved)"
if 'reload' in pref:
result["reload"] = pref['reload']
else:
result['success'] = False
pref['form'] = form # Inject our form instance with validation results
result['html'] = render_to_string("preferences_widget.html",
{'pref': pref},
RequestContext(request))
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def clear_user_notifications(request):
"""
Clear a user's notifications.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
remove_user_notifications("%s" % request.user.username)
return HttpResponseRedirect(reverse('crits.core.views.profile') + '#notifications_button')
@user_passes_test(user_can_view_data)
def delete_user_notification(request, type_, oid):
"""
Delete a user notification. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param type_: The top-level object type.
:type type_: str
:param oid: The ObjectId.
:type oid: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
result = remove_user_from_notification("%s" % request.user.username,
oid,
type_)
message = "<p style=\"text-align: center;\">You have no new notifications!</p>"
result['message'] = message
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def change_subscription(request, stype, oid):
"""
Subscribe/unsubscribe a user from this top-level object. Should be an AJAX
POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param stype: The CRITs type of the top-level object.
:type stype: str
:param oid: The ObjectId of the top-level object.
:type oid: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
username = "%s" % request.user.username
message = ""
if is_user_subscribed(username, stype, oid):
unsubscribe_user(username, stype, oid)
message = ("<span class=\"ui-icon ui-icon-signal-diag subscription_link"
"_disable\" title=\"Subscribe\"></span>")
else:
subscribe_user(username, stype, oid)
message = ("<span class=\"ui-icon ui-icon-close subscription_link"
"_enable\" title=\"Unsubscribe\"></span>")
result = {'success': True,
'message': message}
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def source_subscription(request):
"""
Subscribe/unsubscribe a user from this source. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
username = "%s" % request.user.username
user_source_access = user_sources(username)
source = request.POST['source']
if source not in user_source_access:
error = "You do not have access to that source."
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
message = ""
if is_user_subscribed_to_source(username, source):
unsubscribe_from_source(username, source)
message = "unsubscribed"
else:
subscribe_to_source(username, source)
message = "subscribed"
result = {'success': True, 'message': message}
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
def collections(request):
"""
Set of common content about collections to include in the Response so it is
always available to every template on every page. This is included in
settings.py in the TEMPLATE_CONTEXT_PROCESSORS.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: dict
"""
colls = {}
colls['COL_CERTIFICATES'] = settings.COL_CERTIFICATES
colls['COL_EMAIL'] = settings.COL_EMAIL
colls['COL_EVENTS'] = settings.COL_EVENTS
colls['COL_DOMAINS'] = settings.COL_DOMAINS
colls['COL_INDICATORS'] = settings.COL_INDICATORS
colls['COL_IPS'] = settings.COL_IPS
colls['COL_PCAPS'] = settings.COL_PCAPS
colls['COL_RAW_DATA'] = settings.COL_RAW_DATA
colls['COL_SAMPLES'] = settings.COL_SAMPLES
colls['COL_TARGETS'] = settings.COL_TARGETS
return colls
@user_passes_test(user_can_view_data)
def change_password(request):
"""
Change a user's password. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
username = request.user.username
current_p = request.POST['current_p']
new_p = request.POST['new_p']
new_p_c = request.POST['new_p_c']
result = change_user_password(username,
current_p,
new_p,
new_p_c)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def change_totp_pin(request):
"""
Change a user's TOTP pin. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
username = request.user.username
new_pin = request.POST.get('new_pin', None)
if new_pin:
result = save_user_secret(username, new_pin, "crits", (200,200))
if result['success']:
result['message'] = "Secret: %s" % result['secret']
if result['qr_img']:
qr_img = result['qr_img']
result['qr_img'] = '<br /><img src="data:image/png;base64,'
result['qr_img'] += '%s" />' % qr_img
else:
result['message'] = "Secret generation failed"
else:
result = {'message': "Please provide a pin"}
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_is_admin)
def control_panel(request):
"""
Render the control panel.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
return render_to_response("control_panel.html",
{},
RequestContext(request))
@user_passes_test(user_is_admin)
def users_listing(request, option=None):
"""
Generate the jtable data for rendering in the list template.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
return generate_users_jtable(request, option)
@user_passes_test(user_is_admin)
def toggle_user_active(request):
"""
Toggle a user active/inactive. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
user = request.POST.get('username', None)
analyst = request.user.username
if not user:
result = {'success': False}
else:
toggle_active(user, analyst)
result = {'success': True}
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_is_admin)
def item_editor(request):
"""
Render the item editor control panel page.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
counts = {}
obj_list = [ActorThreatIdentifier,
Campaign,
IndicatorAction,
RawDataType,
SourceAccess,
UserRole]
for col_obj in obj_list:
counts[col_obj._meta['crits_type']] = col_obj.objects().count()
return render_to_response("item_editor.html",
{'counts': counts},
RequestContext(request))
@user_passes_test(user_is_admin)
def items_listing(request, itype, option=None):
"""
Generate the jtable data for rendering in the list template.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param itype: The item type.
:type itype: str
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
return generate_items_jtable(request, itype, option)
@user_passes_test(user_is_admin)
def audit_listing(request, option=None):
"""
Generate the jtable data for rendering in the list template.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
return generate_audit_jtable(request, option)
@user_passes_test(user_can_view_data)
def toggle_item_active(request):
"""
Toggle an item active/inactive. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == 'POST' and request.is_ajax():
type_ = request.POST.get('coll', None)
oid = request.POST.get('oid', None)
analyst = request.user.username
if not oid or not type_:
result = {'success': False}
else:
result = toggle_item_state(type_, oid, analyst)
return HttpResponse(json.dumps(result), mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def download_file(request, sample_md5):
"""
Download a file. Used mainly for situations where you are not using the
standard download file modal form in the UI.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param sample_md5: The MD5 of the file to download.
:type sample_md5: str
:returns: :class:`django.http.HttpResponse`
"""
dtype = request.GET.get("type", "sample")
if dtype in ('object', 'pcap', 'cert'):
return download_grid_file(request, dtype, sample_md5)
else:
return render_to_response('error.html',
{'data': request,
'error': "Unknown Type: %s" % dtype},
RequestContext(request))
@user_passes_test(user_can_view_data)
def details(request, type_=None, id_=None):
"""
Redirect to the details page. This is useful for getting to the details page
when you know the type and ID but not the information necessary for normally
getting to the Details page.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:returns: :class:`django.http.HttpResponseRedirect`
"""
if not type_ or not id_:
return render_to_response('error.html',
{'error': "Need a type and id to redirect to."},
RequestContext(request))
redir = details_from_id(type_, id_)
if redir:
return HttpResponseRedirect(redir)
else:
return render_to_response('error.html',
{'error': "No details page exists for type %s" % type_},
RequestContext(request))
@user_passes_test(user_can_view_data)
def add_update_ticket(request, method, type_=None, id_=None):
"""
Add/update/remove a ticket for a top-level object.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param method: Whether this is an "add", "update", or "remove".
:type method: str
:param type_: The CRITs type of the top-level object.
:type type_: str
:param id_: The ObjectId to search for.
:type id_: str
:returns: :class:`django.http.HttpResponseRedirect`
"""
if method == "remove" and request.method == "POST" and request.is_ajax():
analyst = request.user.username
if is_admin(analyst):
date = datetime.datetime.strptime(request.POST['key'],
settings.PY_DATETIME_FORMAT)
date = date.replace(microsecond=date.microsecond/1000*1000)
result = ticket_remove(type_, id_, date, analyst)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "You do not have permission to remove this item."
return render_to_response("error.html",
{'error': error},
RequestContext(request))
if request.method == "POST" and request.is_ajax():
form = TicketForm(request.POST)
if form.is_valid():
data = form.cleaned_data
add = {
'ticket_number': data['ticket_number'],
'analyst': request.user.username
}
if method == "add":
add['date'] = datetime.datetime.now()
result = ticket_add(type_, id_, add)
else:
date = datetime.datetime.strptime(data['date'],
settings.PY_DATETIME_FORMAT)
date = date.replace(microsecond=date.microsecond/1000*1000)
add['date'] = date
result = ticket_update(type_, id_, add)
crits_config = CRITsConfig.objects().first()
if 'object' in result:
result['html'] = render_to_string('tickets_row_widget.html',
{'ticket': result['object'],
'admin': is_admin(request.user),
'crits_config': crits_config,
'obj_type': type_,
'obj': class_from_id(type_, id_)})
return HttpResponse(json.dumps(result,
default=json_handler),
mimetype="application/json")
else: #invalid form
return HttpResponse(json.dumps({'success':False,
'form': form.as_table()}),
mimetype="application/json")
#default. Should we do anything else here?
return HttpResponse({})
@user_passes_test(user_can_view_data)
def get_search_help(request):
"""
Render the search help box. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
result = {'template': render_to_string('search_help.html', {})}
return HttpResponse(json.dumps(result, default=json_handler),
mimetype="application/json")
@user_passes_test(user_can_view_data)
def get_api_key(request):
"""
Get an API key for a user. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
username = request.user.username
name = request.POST.get('name', None)
if not name:
return HttpResponse(json.dumps({'success': False,
'message': 'Need a name.'}),
mimetype="application/json")
result = get_api_key_by_name(username, name)
if result:
return HttpResponse(json.dumps({'success': True,
'message': result}),
mimetype="application/json")
else:
return HttpResponse(json.dumps({'success': False,
'message': 'No key for that name.'}),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def create_api_key(request):
"""
Create an API key for a user. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
username = request.user.username
name = request.POST.get('name', None)
if not name:
return HttpResponse(json.dumps({'success': False,
'message': 'Need a name.'}),
mimetype="application/json")
result = create_api_key_by_name(username, name)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def make_default_api_key(request):
"""
Set an API key as default for a user. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
username = request.user.username
name = request.POST.get('name', None)
if not name:
return HttpResponse(json.dumps({'success': False,
'message': 'Need a name.'}),
mimetype="application/json")
result = make_default_api_key_by_name(username, name)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def revoke_api_key(request):
"""
Revoke an API key for a user. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == "POST" and request.is_ajax():
username = request.user.username
name = request.POST.get('name', None)
if not name:
return HttpResponse(json.dumps({'success': False,
'message': 'Need a name.'}),
mimetype="application/json")
result = revoke_api_key_by_name(username, name)
return HttpResponse(json.dumps(result),
mimetype="application/json")
else:
error = "Expected AJAX POST"
return render_to_response("error.html",
{"error" : error },
RequestContext(request))
@user_passes_test(user_can_view_data)
def sector_modify(request):
"""
Modify a sectors list for a top-level object. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
sectors = request.POST['sectors'].split(",")
oid = request.POST['oid']
itype = request.POST['itype']
modify_sector_list(itype, oid, sectors, request.user.username)
return HttpResponse({})
@user_passes_test(user_can_view_data)
def sector_list(request, option=None):
"""
Generate the jtable data for rendering in the list template.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
if option == "csv":
return generate_sector_csv(request)
return generate_sector_jtable(request, option)
@user_passes_test(user_can_view_data)
def get_available_sectors(request):
"""
Get the available sectors to use.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
return HttpResponse(
json.dumps(Sectors.values(sort=True), default=json_handler),
content_type='application/json'
)
return HttpResponse({})
@user_passes_test(user_can_view_data)
def bucket_autocomplete(request):
"""
Get the list of current buckets to autocomplete.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
term = request.POST.get('term', None)
if term:
return get_bucket_autocomplete(term)
return HttpResponse({})
| mit | -1,856,386,553,880,469,200 | -1,540,119,114,871,687,700 | 39.750366 | 145 | 0.563 | false |
petemounce/ansible | lib/ansible/modules/cloud/openstack/os_port.py | 11 | 12625 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_port
short_description: Add/Update/Delete ports from an OpenStack cloud.
extends_documentation_fragment: openstack
author: "Davide Agnello (@dagnello)"
version_added: "2.0"
description:
- Add, Update or Remove ports from an OpenStack cloud. A I(state) of
'present' will ensure the port is created or updated if required.
options:
network:
description:
- Network ID or name this port belongs to.
required: true
name:
description:
- Name that has to be given to the port.
required: false
default: None
fixed_ips:
description:
- Desired IP and/or subnet for this port. Subnet is referenced by
subnet_id and IP is referenced by ip_address.
required: false
default: None
admin_state_up:
description:
- Sets admin state.
required: false
default: None
mac_address:
description:
- MAC address of this port.
required: false
default: None
security_groups:
description:
- Security group(s) ID(s) or name(s) associated with the port (comma
separated string or YAML list)
required: false
default: None
no_security_groups:
description:
- Do not associate a security group with this port.
required: false
default: False
allowed_address_pairs:
description:
- "Allowed address pairs list. Allowed address pairs are supported with
dictionary structure.
e.g. allowed_address_pairs:
- ip_address: 10.1.0.12
mac_address: ab:cd:ef:12:34:56
- ip_address: ..."
required: false
default: None
extra_dhcp_opts:
description:
- "Extra dhcp options to be assigned to this port. Extra options are
supported with dictionary structure.
e.g. extra_dhcp_opts:
- opt_name: opt name1
opt_value: value1
- opt_name: ..."
required: false
default: None
device_owner:
description:
- The ID of the entity that uses this port.
required: false
default: None
device_id:
description:
- Device ID of device using this port.
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
'''
EXAMPLES = '''
# Create a port
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
# Create a port with a static IP
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
fixed_ips:
- ip_address: 10.1.0.21
# Create a port with No security groups
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
no_security_groups: True
# Update the existing 'port1' port with multiple security groups (version 1)
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/d
username: admin
password: admin
project_name: admin
name: port1
security_groups: 1496e8c7-4918-482a-9172-f4f00fc4a3a5,057d4bdf-6d4d-472...
# Update the existing 'port1' port with multiple security groups (version 2)
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/d
username: admin
password: admin
project_name: admin
name: port1
security_groups:
- 1496e8c7-4918-482a-9172-f4f00fc4a3a5
- 057d4bdf-6d4d-472...
'''
RETURN = '''
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the port.
returned: success
type: string
network_id:
description: Network ID this port belongs in.
returned: success
type: string
security_groups:
description: Security group(s) associated with this port.
returned: success
type: list
status:
description: Port's status.
returned: success
type: string
fixed_ips:
description: Fixed ip(s) associated with this port.
returned: success
type: list
tenant_id:
description: Tenant id associated with this port.
returned: success
type: string
allowed_address_pairs:
description: Allowed address pairs with this port.
returned: success
type: list
admin_state_up:
description: Admin state up flag for this port.
returned: success
type: bool
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _needs_update(module, port, cloud):
"""Check for differences in the updatable values.
NOTE: We don't currently allow name updates.
"""
compare_simple = ['admin_state_up',
'mac_address',
'device_owner',
'device_id']
compare_dict = ['allowed_address_pairs',
'extra_dhcp_opts']
compare_list = ['security_groups']
for key in compare_simple:
if module.params[key] is not None and module.params[key] != port[key]:
return True
for key in compare_dict:
if module.params[key] is not None and module.params[key] != port[key]:
return True
for key in compare_list:
if module.params[key] is not None and (set(module.params[key]) !=
set(port[key])):
return True
# NOTE: if port was created or updated with 'no_security_groups=True',
# subsequent updates without 'no_security_groups' flag or
# 'no_security_groups=False' and no specified 'security_groups', will not
# result in an update to the port where the default security group is
# applied.
if module.params['no_security_groups'] and port['security_groups'] != []:
return True
if module.params['fixed_ips'] is not None:
for item in module.params['fixed_ips']:
if 'ip_address' in item:
# if ip_address in request does not match any in existing port,
# update is required.
if not any(match['ip_address'] == item['ip_address']
for match in port['fixed_ips']):
return True
if 'subnet_id' in item:
return True
for item in port['fixed_ips']:
# if ip_address in existing port does not match any in request,
# update is required.
if not any(match.get('ip_address') == item['ip_address']
for match in module.params['fixed_ips']):
return True
return False
def _system_state_change(module, port, cloud):
state = module.params['state']
if state == 'present':
if not port:
return True
return _needs_update(module, port, cloud)
if state == 'absent' and port:
return True
return False
def _compose_port_args(module, cloud):
port_kwargs = {}
optional_parameters = ['name',
'fixed_ips',
'admin_state_up',
'mac_address',
'security_groups',
'allowed_address_pairs',
'extra_dhcp_opts',
'device_owner',
'device_id']
for optional_param in optional_parameters:
if module.params[optional_param] is not None:
port_kwargs[optional_param] = module.params[optional_param]
if module.params['no_security_groups']:
port_kwargs['security_groups'] = []
return port_kwargs
def get_security_group_id(module, cloud, security_group_name_or_id):
security_group = cloud.get_security_group(security_group_name_or_id)
if not security_group:
module.fail_json(msg="Security group: %s, was not found"
% security_group_name_or_id)
return security_group['id']
def main():
argument_spec = openstack_full_argument_spec(
network=dict(required=False),
name=dict(required=False),
fixed_ips=dict(type='list', default=None),
admin_state_up=dict(type='bool', default=None),
mac_address=dict(default=None),
security_groups=dict(default=None, type='list'),
no_security_groups=dict(default=False, type='bool'),
allowed_address_pairs=dict(type='list', default=None),
extra_dhcp_opts=dict(type='list', default=None),
device_owner=dict(default=None),
device_id=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['no_security_groups', 'security_groups'],
]
)
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
state = module.params['state']
try:
cloud = shade.openstack_cloud(**module.params)
if module.params['security_groups']:
# translate security_groups to UUID's if names where provided
module.params['security_groups'] = [
get_security_group_id(module, cloud, v)
for v in module.params['security_groups']
]
port = None
network_id = None
if name:
port = cloud.get_port(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, port, cloud))
changed = False
if state == 'present':
if not port:
network = module.params['network']
if not network:
module.fail_json(
msg="Parameter 'network' is required in Port Create"
)
port_kwargs = _compose_port_args(module, cloud)
network_object = cloud.get_network(network)
if network_object:
network_id = network_object['id']
else:
module.fail_json(
msg="Specified network was not found."
)
port = cloud.create_port(network_id, **port_kwargs)
changed = True
else:
if _needs_update(module, port, cloud):
port_kwargs = _compose_port_args(module, cloud)
port = cloud.update_port(port['id'], **port_kwargs)
changed = True
module.exit_json(changed=changed, id=port['id'], port=port)
if state == 'absent':
if port:
cloud.delete_port(port['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 | 8,352,212,342,189,816,000 | 7,958,565,373,606,546,000 | 30.641604 | 80 | 0.594772 | false |
nfallen/servo | tests/wpt/css-tests/css21_dev/xhtml1print/reference/support/fonts/makegsubfonts.py | 820 | 14309 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData() | mpl-2.0 | -733,030,620,572,698,800 | 6,772,366,632,485,426,000 | 28.444444 | 135 | 0.641275 | false |
mnahm5/django-estore | Lib/site-packages/boto/kinesis/__init__.py | 145 | 1652 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the Amazon Kinesis service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.kinesis.layer1 import KinesisConnection
return get_regions('kinesis', connection_cls=KinesisConnection)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit | -3,923,211,542,746,305,000 | -5,506,751,749,026,234,000 | 39.292683 | 74 | 0.743341 | false |
yavuzovski/playground | python/django/RESTTest/.venv/lib/python3.4/site-packages/django/contrib/gis/geos/linestring.py | 136 | 6019 | from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry, LinearGeometryMixin
from django.contrib.gis.geos.point import Point
from django.contrib.gis.shortcuts import numpy
from django.utils.six.moves import range
class LineString(LinearGeometryMixin, GEOSGeometry):
_init_func = capi.create_linestring
_minlength = 2
has_cs = True
def __init__(self, *args, **kwargs):
"""
Initializes on the given sequence -- may take lists, tuples, NumPy arrays
of X,Y pairs, or Point objects. If Point objects are used, ownership is
_not_ transferred to the LineString object.
Examples:
ls = LineString((1, 1), (2, 2))
ls = LineString([(1, 1), (2, 2)])
ls = LineString(array([(1, 1), (2, 2)]))
ls = LineString(Point(1, 1), Point(2, 2))
"""
# If only one argument provided, set the coords array appropriately
if len(args) == 1:
coords = args[0]
else:
coords = args
if not (isinstance(coords, (tuple, list)) or numpy and isinstance(coords, numpy.ndarray)):
raise TypeError('Invalid initialization input for LineStrings.')
# If SRID was passed in with the keyword arguments
srid = kwargs.get('srid')
ncoords = len(coords)
if not ncoords:
super(LineString, self).__init__(self._init_func(None), srid=srid)
return
if ncoords < self._minlength:
raise ValueError(
'%s requires at least %d points, got %s.' % (
self.__class__.__name__,
self._minlength,
ncoords,
)
)
if isinstance(coords, (tuple, list)):
# Getting the number of coords and the number of dimensions -- which
# must stay the same, e.g., no LineString((1, 2), (1, 2, 3)).
ndim = None
# Incrementing through each of the coordinates and verifying
for coord in coords:
if not isinstance(coord, (tuple, list, Point)):
raise TypeError('Each coordinate should be a sequence (list or tuple)')
if ndim is None:
ndim = len(coord)
self._checkdim(ndim)
elif len(coord) != ndim:
raise TypeError('Dimension mismatch.')
numpy_coords = False
else:
shape = coords.shape # Using numpy's shape.
if len(shape) != 2:
raise TypeError('Too many dimensions.')
self._checkdim(shape[1])
ndim = shape[1]
numpy_coords = True
# Creating a coordinate sequence object because it is easier to
# set the points using GEOSCoordSeq.__setitem__().
cs = GEOSCoordSeq(capi.create_cs(ncoords, ndim), z=bool(ndim == 3))
for i in range(ncoords):
if numpy_coords:
cs[i] = coords[i, :]
elif isinstance(coords[i], Point):
cs[i] = coords[i].tuple
else:
cs[i] = coords[i]
# Calling the base geometry initialization with the returned pointer
# from the function.
super(LineString, self).__init__(self._init_func(cs.ptr), srid=srid)
def __iter__(self):
"Allows iteration over this LineString."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of points in this LineString."
return len(self._cs)
def _get_single_external(self, index):
return self._cs[index]
_get_single_internal = _get_single_external
def _set_list(self, length, items):
ndim = self._cs.dims
hasz = self._cs.hasz # I don't understand why these are different
# create a new coordinate sequence and populate accordingly
cs = GEOSCoordSeq(capi.create_cs(length, ndim), z=hasz)
for i, c in enumerate(items):
cs[i] = c
ptr = self._init_func(cs.ptr)
if ptr:
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(self.srid)
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._checkindex(index)
self._cs[index] = value
def _checkdim(self, dim):
if dim not in (2, 3):
raise TypeError('Dimension mismatch.')
# #### Sequence Properties ####
@property
def tuple(self):
"Returns a tuple version of the geometry from the coordinate sequence."
return self._cs.tuple
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function. Will return a numpy array if possible.
"""
lst = [func(i) for i in range(len(self))]
if numpy:
return numpy.array(lst) # ARRRR!
else:
return lst
@property
def array(self):
"Returns a numpy array for the LineString."
return self._listarr(self._cs.__getitem__)
@property
def x(self):
"Returns a list or numpy array of the X variable."
return self._listarr(self._cs.getX)
@property
def y(self):
"Returns a list or numpy array of the Y variable."
return self._listarr(self._cs.getY)
@property
def z(self):
"Returns a list or numpy array of the Z variable."
if not self.hasz:
return None
else:
return self._listarr(self._cs.getZ)
# LinearRings are LineStrings used within Polygons.
class LinearRing(LineString):
_minlength = 4
_init_func = capi.create_linearring
| gpl-3.0 | 5,336,641,141,121,556,000 | -8,353,198,424,115,223,000 | 33.00565 | 98 | 0.573517 | false |
Snyder005/StatisticalMethods | examples/XrayImage/cluster.py | 10 | 5264 | import astropy.io.fits as pyfits
import numpy as np
import os
# ====================================================================
# Functions for realizing the model:
def beta_model_profile(r, S0, rc, beta):
'''
The fabled beta model, radial profile S(r)
'''
return S0 * (1.0 + (r/rc)**2)**(-3.0*beta + 0.5)
def beta_model_image(x, y, x0, y0, S0, rc, beta):
'''
Here, x and y are arrays ("meshgrids" or "ramps") containing x and y pixel numbers,
and the other arguments are galaxy cluster beta model parameters.
Returns a surface brightness image of the same shape as x and y.
'''
return beta_model_profile(np.sqrt((x-x0)**2 + (y-y0)**2), S0, rc, beta)
def model_image(x, y, ex, pb, x0, y0, S0, rc, beta, b):
'''
Here, x, y, ex and pb are images, all of the same shape, and the other args are
cluster model and X-ray background parameters. ex is the (constant) exposure map
and pb is the (constant) particle background map.
'''
return (beta_model_image(x, y, x0, y0, S0, rc, beta) + b) * ex + pb
# ====================================================================
class XrayData:
def __init__(self):
self.pars = np.zeros(6)
return
def read_in_data(self):
# Download the data if we don't already have it
self.targdir = 'a1835_xmm/'
os.system('mkdir -p ' + self.targdir)
imagefile = 'P0098010101M2U009IMAGE_3000.FTZ'
expmapfile = 'P0098010101M2U009EXPMAP3000.FTZ'
bkgmapfile = 'P0098010101M2X000BKGMAP3000.FTZ'
remotedir = 'http://heasarc.gsfc.nasa.gov/FTP/xmm/data/rev0/0098010101/PPS/'
for filename in [imagefile,expmapfile,bkgmapfile]:
path = self.targdir + filename
url = remotedir + filename
if not os.path.isfile(path): # i.e. if the file does not exist already:
os.system('wget -nd -O ' + path + ' ' + 'url')
# Read in the data
self.imfits = pyfits.open(self.targdir + imagefile)
self.im = self.imfits[0].data
self.pbfits = pyfits.open(self.targdir + bkgmapfile)
self.pb = self.pbfits[0].data
self.exfits = pyfits.open(self.targdir + expmapfile)
self.ex = self.exfits[0].data
return
def set_up_maps(self):
# Make x and y ramps
self.x = np.array([np.arange(self.im.shape[0]) for j in np.arange(self.im.shape[1])])
self.y = np.array([[j for i in np.arange(self.im.shape[1])] for j in np.arange(self.im.shape[0])])
### mask a list of circular regions covering non-cluster sources
maskfile = 'M2ptsrc.txt'
mask = np.loadtxt(self.targdir + maskfile)
for reg in mask:
distance2 = (self.x-(reg[0]-1.0))**2 + (self.y-(reg[1]-1.0))**2
self.ex[distance2 <= reg[2]**2] = 0.0
# helpful mask image to keep track of which pixels we can ignore
self.mask = self.ex * 0.0
self.mask[self.ex > 0.0] = 1.0
return
def set_pars(self,x0,y0,S0,rc,beta,b):
self.pars[0] = x0
self.pars[1] = y0
self.pars[2] = S0
self.pars[3] = rc
self.pars[4] = beta
self.pars[5] = b
return
def make_mean_image(self):
x0 = self.pars[0]
y0 = self.pars[1]
S0 = self.pars[2]
rc = self.pars[3]
beta = self.pars[4]
b = self.pars[5]
self.mu = model_image(self.x,self.y,self.ex,self.pb,x0,y0,S0,rc,beta,b)
return
def make_mock_data(self):
self.mock = np.random.poisson(self.mu,self.mu.shape)
return
def evaluate_log_prior(self):
# Uniform in all parameters...
return 0.0
def evaluate_log_likelihood(self):
self.make_mean_image()
# Return un-normalized Poisson sampling distribution:
# log (\mu^N e^{-\mu} / N!) = N log \mu - \mu + constant
return np.sum(self.im * np.log(self.mu) - self.mu)
def evaluate_unnormalised_log_posterior(self,x0,y0,S0,rc,beta,b):
self.set_pars(x0,y0,S0,rc,beta,b)
return self.evaluate_log_likelihood() + self.evaluate_log_prior()
# ====================================================================
'''
Adam's routines for input into MCMC:
def lnpost(params, data):
# assumes S0 is a free parameter
x0 = params[0]
y0 = params[1]
S0 = params[2]
rc = params[3]
beta = params[4]
bg = params[5]
if x0 < 0. or x0 >= data.im.shape[0] or y0 < 0. or y0 > data.im.shape[1] or S0 <= 0. or rc <= 0. or beta <= 0.0:
return -np.inf
mod = modelImage(data, x0, y0, S0, rc, beta, bg)
if np.min(mod) <= 0.0:
return -np.inf
return np.sum( (-mod + data.im * np.log(mod)) * data.mask )
def lnpost2(params, data):
# assumes log(S0) is a free parameter
x0 = params[0]
y0 = params[1]
S0 = np.exp(params[2])
rc = params[3]
beta = params[4]
bg = params[5]
if x0 < 0. or x0 >= data.im.shape[0] or y0 < 0. or y0 > data.im.shape[1] or S0 <= 0. or rc <= 0. or beta <= 0.0:
return -np.inf
mod = modelImage(data, x0, y0, S0, rc, beta, bg)
if np.min(mod) <= 0.0:
return -np.inf
return np.sum( (-mod + data.im * np.log(mod)) * data.mask )
'''
| gpl-2.0 | 320,709,109,385,701,400 | 77,539,614,803,731,860 | 31.294479 | 116 | 0.556231 | false |
beni55/django | tests/template_tests/filter_tests/test_unordered_list.py | 14 | 6527 | from django.template.defaultfilters import unordered_list
from django.test import SimpleTestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import python_2_unicode_compatible
from django.utils.safestring import mark_safe
from ..utils import setup
class UnorderedListTests(SimpleTestCase):
@setup({'unordered_list01': '{{ a|unordered_list }}'})
def test_unordered_list01(self):
output = self.engine.render_to_string('unordered_list01', {'a': ['x>', ['<y']]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@ignore_warnings(category=RemovedInDjango20Warning)
@setup({'unordered_list02': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list02(self):
output = self.engine.render_to_string('unordered_list02', {'a': ['x>', ['<y']]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list03': '{{ a|unordered_list }}'})
def test_unordered_list03(self):
output = self.engine.render_to_string('unordered_list03', {'a': ['x>', [mark_safe('<y')]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list04': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list04(self):
output = self.engine.render_to_string('unordered_list04', {'a': ['x>', [mark_safe('<y')]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list05': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list05(self):
output = self.engine.render_to_string('unordered_list05', {'a': ['x>', ['<y']]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@ignore_warnings(category=RemovedInDjango20Warning)
class DeprecatedUnorderedListSyntaxTests(SimpleTestCase):
@setup({'unordered_list01': '{{ a|unordered_list }}'})
def test_unordered_list01(self):
output = self.engine.render_to_string('unordered_list01', {'a': ['x>', [['<y', []]]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list02': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list02(self):
output = self.engine.render_to_string('unordered_list02', {'a': ['x>', [['<y', []]]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list03': '{{ a|unordered_list }}'})
def test_unordered_list03(self):
output = self.engine.render_to_string('unordered_list03', {'a': ['x>', [[mark_safe('<y'), []]]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list04': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list04(self):
output = self.engine.render_to_string('unordered_list04', {'a': ['x>', [[mark_safe('<y'), []]]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list05': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list05(self):
output = self.engine.render_to_string('unordered_list05', {'a': ['x>', [['<y', []]]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
class FunctionTests(SimpleTestCase):
def test_list(self):
self.assertEqual(unordered_list(['item 1', 'item 2']), '\t<li>item 1</li>\n\t<li>item 2</li>')
def test_nested(self):
self.assertEqual(
unordered_list(['item 1', ['item 1.1']]),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>',
)
def test_nested2(self):
self.assertEqual(
unordered_list(['item 1', ['item 1.1', 'item1.2'], 'item 2']),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t\t<li>item1.2'
'</li>\n\t</ul>\n\t</li>\n\t<li>item 2</li>',
)
def test_nested_multiple(self):
self.assertEqual(
unordered_list(['item 1', ['item 1.1', ['item 1.1.1', ['item 1.1.1.1']]]]),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1\n\t\t<ul>\n\t\t\t<li>'
'item 1.1.1\n\t\t\t<ul>\n\t\t\t\t<li>item 1.1.1.1</li>\n\t\t\t'
'</ul>\n\t\t\t</li>\n\t\t</ul>\n\t\t</li>\n\t</ul>\n\t</li>',
)
def test_nested_multiple2(self):
self.assertEqual(
unordered_list(['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]),
'\t<li>States\n\t<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>'
'Lawrence</li>\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>'
'\n\t\t<li>Illinois</li>\n\t</ul>\n\t</li>',
)
def test_ulitem(self):
@python_2_unicode_compatible
class ULItem(object):
def __init__(self, title):
self.title = title
def __str__(self):
return 'ulitem-%s' % str(self.title)
a = ULItem('a')
b = ULItem('b')
self.assertEqual(unordered_list([a, b]), '\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>')
def item_generator():
yield a
yield b
self.assertEqual(unordered_list(item_generator()), '\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>')
@ignore_warnings(category=RemovedInDjango20Warning)
def test_legacy(self):
"""
Old format for unordered lists should still work
"""
self.assertEqual(unordered_list(['item 1', []]), '\t<li>item 1</li>')
self.assertEqual(
unordered_list(['item 1', [['item 1.1', []]]]),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>',
)
self.assertEqual(
unordered_list(['item 1', [['item 1.1', []],
['item 1.2', []]]]), '\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1'
'</li>\n\t\t<li>item 1.2</li>\n\t</ul>\n\t</li>',
)
self.assertEqual(
unordered_list(['States', [['Kansas', [['Lawrence', []], ['Topeka', []]]], ['Illinois', []]]]),
'\t<li>States\n\t<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>Lawrence</li>'
'\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>\n\t\t<li>Illinois</li>\n\t</ul>\n\t</li>',
)
| bsd-3-clause | -8,774,713,137,577,228,000 | 1,549,953,191,010,348,000 | 44.643357 | 107 | 0.552168 | false |
Veske/POL-POM-5 | phoenicis-bash/src/main/python/BashBinder/CommandParser.py | 3 | 9173 | #!/usr/bin/env python
# coding=utf-8
# Copyright (C) 2015 Pâris Quentin
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
from Environment.EnvironmentFormatter import EnvironmentFormatter
from com.playonlinux.framework import Downloader
from com.playonlinux.core.scripts import ScriptFailureException
from com.playonlinux.framework import WineVersion
from com.playonlinux.framework import Wine
from com.playonlinux.framework import WineShortcut
from com.playonlinux.core.utils import Architecture
from java.net import URL
from java.io import File, FileOutputStream
class CommandParser(object):
def __init__(self, setupWindowManager, command):
self.command = command
self.splitCommand = self.command.split("\t")
self.setupWindowManager = setupWindowManager
def getCookie(self):
return self.splitCommand[0]
def getCommand(self):
return self.splitCommand[1]
def executeCommand(self):
commandExecutor = CommandParser.CommandExecutor(self.splitCommand, self.setupWindowManager)
return getattr(commandExecutor, self.getCommand())()
class CommandExecutor():
def __init__(self, command, setupWindowManager):
self.command = command
self.setupWindowManager = setupWindowManager
def POL_SetupWindow_Init(self):
setupWindowId = self.command[2]
if("TITLE" in os.environ.keys()):
windowTitle = os.environ["TITLE"]
else:
windowTitle = "${application.name} Wizard";
self.setupWindowManager.newWindow(setupWindowId, windowTitle).init()
def POL_SetupWindow_message(self):
setupWindowId = self.command[2]
textToShow = self.command[3]
self.setupWindowManager.getWindow(setupWindowId).message(textToShow)
def POL_SetupWindow_presentation(self):
setupWindowId = self.command[2]
programName = self.command[3]
programEditor = self.command[4]
editorURL = self.command[5]
scriptorName = self.command[6]
prefixName = self.command[7]
self.setupWindowManager.getWindow(setupWindowId).presentation(programName, programEditor, editorURL, scriptorName, prefixName)
def POL_SetupWindow_free_presentation(self):
setupWindowId = self.command[2]
textToShow = self.command[3]
self.setupWindowManager.getWindow(setupWindowId).presentation(textToShow)
def POL_SetupWindow_wait(self):
setupWindowId = self.command[2]
textToShow = self.command[3]
self.setupWindowManager.getWindow(setupWindowId).wait(textToShow)
def POL_SetupWindow_browse(self):
setupWindowId = self.command[2]
textToShow = self.command[3]
try:
currentDirectory = self.command[4]
except IndexError:
currentDirectory = ""
return self.setupWindowManager.getWindow(setupWindowId).browse(textToShow, currentDirectory, allowedFiles)
def POL_SetupWindow_textbox(self):
setupWindowId = self.command[2]
textToShow = self.command[3]
try:
defaultValue = self.command[4]
except IndexError:
defaultValue = ""
return self.setupWindowManager.getWindow(setupWindowId).textbox(textToShow, defaultValue)
def POL_SetupWindow_menu(self):
setupWindowId = self.command[2]
textToShow = self.command[3]
try:
separator = self.command[5]
except IndexError:
separator = "~"
items = self.command[4].split(separator)
return self.setupWindowManager.getWindow(setupWindowId).menu(textToShow, items)
def POL_SetupWindow_Close(self):
setupWindowId = self.command[2]
self.setupWindowManager.getWindow(setupWindowId).close()
def POL_Download(self):
setupWindowId = self.command[2]
url = self.command[3]
currentDirectory = self.command[4]
try:
checkSum = self.command[5]
except IndexError:
checkSum = ""
setupWindow = self.setupWindowManager.getWindow(setupWindowId)
localFile = os.path.join(currentDirectory,
Downloader.wizard(setupWindow).findFileNameFromURL(URL(url)))
downloader = Downloader.wizard(setupWindow).get(url, localFile)
if(checkSum != ""):
downloader.check(checkSum)
def POL_SetupWindow_licence(self):
setupWindowId = self.command[2]
textToShow = self.command[3]
licenceFilePath = self.command[5]
self.setupWindowManager.getWindow(setupWindowId).licenceFile(textToShow, licenceFilePath)
def POL_Throw(self):
raise ScriptFailureException(self.command[3])
def POL_Print(self):
message = self.command[3]
self.setupWindowManager.template.log(message)
def POL_Wine_InstallVersion(self):
setupWindowId = self.command[2]
version = self.command[3]
arch = self.command[4]
wineVersion = WineVersion(version, "upstream-%s" % arch,
self.setupWindowManager.getWindow(setupWindowId))
wineVersion.install()
def POL_Wine_PrefixCreate(self):
setupWindowId = self.command[2]
setupWindow = self.setupWindowManager.getWindow(setupWindowId)
prefixName = self.command[3]
version = self.command[4]
try:
arch = self.command[5]
arch = str(Architecture.fromWinePackageName(arch).name())
except IndexError:
arch = None
if(arch is not None):
Wine.wizard(setupWindow).selectPrefix(prefixName).createPrefix(version, "upstream", arch)
else:
Wine.wizard(setupWindow).selectPrefix(prefixName).createPrefix(version, arch)
def POL_Wine(self):
setupWindowId = self.command[2]
setupWindow = self.setupWindowManager.getWindow(setupWindowId)
workingDirectory = self.command[3]
prefixName = self.command[4]
fifoOut = self.command[5]
fifoErr = self.command[6]
env = EnvironmentFormatter.getEnvironmentVarsFromBashBase64EncodedString(self.command[7])
prgmName = self.command[8]
args = self.command[9::1]
return Wine.wizard(setupWindow).selectPrefix(prefixName)\
.withErrorStream(FileOutputStream(File(fifoErr)))\
.withOutputStream(FileOutputStream(File(fifoOut)))\
.runForeground(
workingDirectory,
prgmName,
args,
env
).getLastReturnCode()
def POL_Config_PrefixRead(self):
setupWindowId = self.command[2]
setupWindow = self.setupWindowManager.getWindow(setupWindowId)
prefixName = self.command[3]
key = self.command[4]
return Wine.wizard(setupWindow).selectPrefix(prefixName).config().readValue(key)
def POL_Config_PrefixWrite(self):
setupWindowId = self.command[2]
setupWindow = self.setupWindowManager.getWindow(setupWindowId)
prefixName = self.command[3]
key = self.command[4]
value = self.command[5]
return Wine(setupWindow).selectPrefix(prefixName).config().writeValue(key, value)
def POL_Shortcut(self):
setupWindowId = self.command[2]
setupWindow = self.setupWindowManager.getWindow(setupWindowId)
winePrefix = self.command[3]
binary = self.command[4]
shortcutName = self.command[5]
websiteIcon = self.command[6]
argument = self.command[7]
categories = self.command[8]
WineShortcut.wizard(setupWindow)\
.withArguments([argument])\
.withExecutableName(binary)\
.withWinePrefix(winePrefix)\
.withName(shortcutName)\
.create() | gpl-3.0 | 3,413,608,956,706,181,600 | -638,910,320,269,110,500 | 35.114173 | 138 | 0.618295 | false |
Kaisuke5/chainer | chainer/functions/array/split_axis.py | 13 | 3058 | import collections
import six
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class SplitAxis(function.Function):
"""Function that splits multiple arrays towards the specified axis."""
def __init__(self, indices_or_sections, axis):
if not isinstance(indices_or_sections, (int, collections.Iterable)):
raise TypeError('indices_or_sections must be integer or 1-D array')
self.indices_or_sections = indices_or_sections
self.axis = axis
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].ndim >= self.axis)
if isinstance(self.indices_or_sections, collections.Iterable):
max_index = type_check.Variable(
self.indices_or_sections[-1], 'max_index')
type_check.expect(in_types[0].shape[self.axis] > max_index)
else:
sections = type_check.Variable(
self.indices_or_sections, 'sections')
type_check.expect(in_types[0].shape[self.axis] % sections == 0)
def forward(self, x):
if isinstance(self.indices_or_sections, collections.Iterable):
cdimx = x[0].shape[self.axis]
ind = list(self.indices_or_sections)
ind.append(cdimx)
prev_i = 0
for i in ind:
cdimy = max(0, min(i, cdimx) - prev_i)
if cdimy == 0:
raise ValueError('Not support if shape contains 0')
prev_i = i
xp = cuda.get_array_module(*x)
return tuple(xp.split(x[0], self.indices_or_sections, self.axis))
def backward(self, x, gys):
xp = cuda.get_array_module(*x)
if any(gy is None for gy in gys):
gx = xp.zeros_like(x[0])
gxs = xp.split(gx, self.indices_or_sections, self.axis)
for gxi, gy in six.moves.zip(gxs, gys):
if gy is None:
continue
gxi[:] = gy
return gx,
else:
return xp.concatenate(gys, axis=self.axis),
def split_axis(x, indices_or_sections, axis):
"""Splits given variables along an axis.
Args:
x (tuple of Variables): Variables to be split.
indices_or_sections (int or 1-D array): If this argument is an integer,
N, the array will be divided into N equal arrays along axis.
If it is a 1-D array of sorted integers, it
indicates the positions where the array is split.
axis (int): Axis that the input array is split along.
Returns:
``tuple`` or ``Variable``: Tuple of :class:`~chainer.Variable` objects
if the number of outputs is more than 1 or
:class:`~chainer.Variable` otherwise.
.. note::
This function raises ``ValueError`` if at least
one of the outputs is splitted to zero-size
(i.e. `axis`-th value of its shape is zero).
"""
return SplitAxis(indices_or_sections, axis)(x)
| mit | 8,675,845,474,331,301,000 | 2,452,910,300,085,180,000 | 35.843373 | 79 | 0.591236 | false |
abhishekgahlot/youtube-dl | youtube_dl/extractor/testtube.py | 109 | 3295 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
qualities,
)
class TestTubeIE(InfoExtractor):
_VALID_URL = r'https?://testtube\.com/[^/?#]+/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://testtube.com/dnews/5-weird-ways-plants-can-eat-animals?utm_source=FB&utm_medium=DNews&utm_campaign=DNewsSocial',
'info_dict': {
'id': '60163',
'display_id': '5-weird-ways-plants-can-eat-animals',
'duration': 275,
'ext': 'webm',
'title': '5 Weird Ways Plants Can Eat Animals',
'description': 'Why have some plants evolved to eat meat?',
'thumbnail': 're:^https?://.*\.jpg$',
'uploader': 'DNews',
'uploader_id': 'dnews',
},
}, {
'url': 'https://testtube.com/iflscience/insane-jet-ski-flipping',
'info_dict': {
'id': 'fAGfJ4YjVus',
'ext': 'mp4',
'title': 'Flipping Jet-Ski Skills | Outrageous Acts of Science',
'uploader': 'Science Channel',
'uploader_id': 'ScienceChannel',
'upload_date': '20150203',
'description': 'md5:e61374030015bae1d2e22f096d4769d6',
}
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
youtube_url = self._html_search_regex(
r'<iframe[^>]+src="((?:https?:)?//www.youtube.com/embed/[^"]+)"',
webpage, 'youtube iframe', default=None)
if youtube_url:
return self.url_result(youtube_url, 'Youtube', video_id=display_id)
video_id = self._search_regex(
r"player\.loadRevision3Item\('video_id',\s*([0-9]+)\);",
webpage, 'video ID')
all_info = self._download_json(
'https://testtube.com/api/getPlaylist.json?api_key=ba9c741bce1b9d8e3defcc22193f3651b8867e62&codecs=h264,vp8,theora&video_id=%s' % video_id,
video_id)
info = all_info['items'][0]
formats = []
for vcodec, fdatas in info['media'].items():
for name, fdata in fdatas.items():
formats.append({
'format_id': '%s-%s' % (vcodec, name),
'url': fdata['url'],
'vcodec': vcodec,
'tbr': fdata.get('bitrate'),
})
self._sort_formats(formats)
duration = int_or_none(info.get('duration'))
images = info.get('images')
thumbnails = None
preference = qualities(['mini', 'small', 'medium', 'large'])
if images:
thumbnails = [{
'id': thumbnail_id,
'url': img_url,
'preference': preference(thumbnail_id)
} for thumbnail_id, img_url in images.items()]
return {
'id': video_id,
'display_id': display_id,
'title': info['title'],
'description': info.get('summary'),
'thumbnails': thumbnails,
'uploader': info.get('show', {}).get('name'),
'uploader_id': info.get('show', {}).get('slug'),
'duration': duration,
'formats': formats,
}
| unlicense | 5,669,819,765,768,407,000 | 9,158,713,306,151,299,000 | 35.611111 | 151 | 0.515023 | false |
OndrejIT/pyload | module/plugins/crypter/GoogledriveComDereferer.py | 6 | 2747 | # -*- coding: utf-8 -*
from module.network.HTTPRequest import BadHeader
from ..internal.Crypter import Crypter
from ..internal.misc import json
class GoogledriveComDereferer(Crypter):
__name__ = "GoogledriveComDereferer"
__type__ = "crypter"
__version__ = "0.01"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?(?:drive|docs)\.google\.com/open\?(?:.+;)?id=(?P<ID>[-\w]+)'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("folder_per_package", "Default;Yes;No", "Create folder for each package", "Default"),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Drive.google.com dereferer plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "[email protected]"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
NAME_PATTERN = r"folderName: '(?P<N>.+?)'"
OFFLINE_PATTERN = r'<TITLE>'
API_URL = "https://www.googleapis.com/drive/v3/"
API_KEY = "AIzaSyAcA9c4evtwSY1ifuvzo6HKBkeot5Bk_U4"
def api_response(self, cmd, **kwargs):
kwargs['key'] = self.API_KEY
try:
json_data = json.loads(self.load("%s%s" % (self.API_URL, cmd),
get=kwargs))
self.log_debug("API response: %s" % json_data)
return json_data
except BadHeader, e:
try:
json_data = json.loads(e.content)
self.log_error("API Error: %s" % cmd,
json_data['error']['message'],
"ID: %s" % self.info['pattern']['ID'],
"Error code: %s" % e.code)
except ValueError:
self.log_error("API Error: %s" % cmd,
e,
"ID: %s" % self.info['pattern']['ID'],
"Error code: %s" % e.code)
return None
def decrypt(self, pyfile):
json_data = self.api_response("files/%s" % self.info['pattern']['ID'])
if json_data is None:
self.fail("API error")
if 'error' in json_data:
if json_data['error']['code'] == 404:
self.offline()
else:
self.fail(json_data['error']['message'])
link = "https://drive.google.com/%s/%s" % \
(("file/d" if json_data['mimeType'] != "application/vnd.google-apps.folder" else "drive/folders"),
self.info['pattern']['ID'])
self.packages = [(pyfile.package().folder, [link], pyfile.package().name)]
| gpl-3.0 | -8,952,169,316,261,700,000 | -6,739,934,925,522,651,000 | 37.690141 | 113 | 0.502366 | false |
phoebusliang/parallel-lettuce | tests/integration/lib/Django-1.2.5/django/core/xheaders.py | 518 | 1157 | """
Pages in Django can are served up with custom HTTP headers containing useful
information about those pages -- namely, the content type and object ID.
This module contains utility functions for retrieving and doing interesting
things with these special "X-Headers" (so called because the HTTP spec demands
that custom headers are prefixed with "X-").
Next time you're at slashdot.org, watch out for X-Fry and X-Bender. :)
"""
def populate_xheaders(request, response, model, object_id):
"""
Adds the "X-Object-Type" and "X-Object-Id" headers to the given
HttpResponse according to the given model and object_id -- but only if the
given HttpRequest object has an IP address within the INTERNAL_IPS setting
or if the request is from a logged in staff member.
"""
from django.conf import settings
if (request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS
or (hasattr(request, 'user') and request.user.is_active
and request.user.is_staff)):
response['X-Object-Type'] = "%s.%s" % (model._meta.app_label, model._meta.object_name.lower())
response['X-Object-Id'] = str(object_id)
| gpl-3.0 | 3,335,908,681,528,135,700 | 2,400,864,259,477,033,000 | 47.208333 | 102 | 0.707001 | false |
JPJPJPOPOP/zulip | scripts/nagios/cron_file_helper.py | 52 | 1044 | import time
# Avoid requiring the typing module to be installed
if False:
from typing import Tuple
def nagios_from_file(results_file):
# type: (str) -> Tuple[int, str]
"""Returns a nagios-appropriate string and return code obtained by
parsing the desired file on disk. The file on disk should be of format
%s|%s % (timestamp, nagios_string)
This file is created by various nagios checking cron jobs such as
check-rabbitmq-queues and check-rabbitmq-consumers"""
data = open(results_file).read().strip()
pieces = data.split('|')
if not len(pieces) == 4:
state = 'UNKNOWN'
ret = 3
data = "Results file malformed"
else:
timestamp = int(pieces[0])
time_diff = time.time() - timestamp
if time_diff > 60 * 2:
ret = 3
state = 'UNKNOWN'
data = "Results file is stale"
else:
ret = int(pieces[1])
state = pieces[2]
data = pieces[3]
return (ret, "%s: %s" % (state, data))
| apache-2.0 | 5,620,525,675,582,215,000 | 1,055,059,522,564,094,700 | 27.216216 | 74 | 0.588123 | false |
hachreak/invenio-ext | invenio_ext/sqlalchemy/types/marshal_binary.py | 5 | 1985 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Implement compressed column type."""
from sqlalchemy.types import LargeBinary, TypeDecorator
from invenio_utils.serializers import ZlibMarshal
class MarshalBinary(TypeDecorator):
"""Implement compressed column type."""
impl = LargeBinary
def __init__(self, default_value, force_type=None, *args, **kwargs):
"""Initialize default value and type."""
super(MarshalBinary, self).__init__(*args, **kwargs)
self.default_value = default_value
self.force_type = force_type if force_type is not None else lambda x: x
def process_bind_param(self, value, dialect):
"""Compress data in column."""
if value is not None:
value = ZlibMarshal.dumps(self.force_type(value))
return value
return value
def process_result_value(self, value, dialect):
"""Load comressed data from column."""
if value is not None:
try:
value = ZlibMarshal.loads(value)
except:
value = None
return value if value is not None else \
(self.default_value() if callable(self.default_value) else
self.default_value)
| gpl-2.0 | -7,961,685,805,471,745,000 | -9,120,792,316,337,065,000 | 35.090909 | 79 | 0.671033 | false |
CanalTP/navitia | source/jormungandr/tests/kirin_realtime_tests.py | 1 | 155835 | # Copyright (c) 2001-2015, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
# Note: the tests_mechanism should be the first
# import for the conf to be loaded correctly when only this test is ran
from __future__ import absolute_import
from copy import deepcopy
from datetime import datetime
import uuid
from tests.tests_mechanism import dataset
from jormungandr.utils import str_to_time_stamp, make_namedtuple
from tests import gtfs_realtime_pb2, kirin_pb2
from tests.check_utils import (
get_not_null,
journey_basic_query,
isochrone_basic_query,
get_used_vj,
get_arrivals,
get_valid_time,
is_valid_disruption,
check_journey,
Journey,
Section,
SectionStopDT,
is_valid_graphical_isochrone,
sub_query,
has_the_disruption,
get_disruptions_by_id,
)
from tests.rabbitmq_utils import RabbitMQCnxFixture, rt_topic
from shapely.geometry import asShape
UpdatedStopTime = make_namedtuple(
'UpdatedStopTime',
'stop_id',
'arrival',
'departure',
arrival_delay=0,
departure_delay=0,
message=None,
departure_skipped=False,
arrival_skipped=False,
is_added=False,
is_detour=False,
)
class MockKirinDisruptionsFixture(RabbitMQCnxFixture):
"""
Mock a chaos disruption message, in order to check the api
"""
def _make_mock_item(self, *args, **kwargs):
return make_mock_kirin_item(*args, **kwargs)
def tstamp(str):
"""just to have clearer tests"""
return str_to_time_stamp(str)
def _dt(h, m, s):
"""syntaxic sugar"""
return datetime(1900, 1, 1, hour=h, minute=m, second=s)
MAIN_ROUTING_TEST_SETTING = {
'main_routing_test': {'kraken_args': ['--BROKER.rt_topics=' + rt_topic, 'spawn_maintenance_worker']}
}
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinOnVJDeletion(MockKirinDisruptionsFixture):
def test_vj_deletion(self):
"""
send a mock kirin vj cancellation and test that the vj is not taken
"""
response = self.query_region(journey_basic_query + "&data_freshness=realtime")
isochrone = self.query_region(isochrone_basic_query + "&data_freshness=realtime")
# with no cancellation, we have 2 journeys, one direct and one with the vj:A:0
assert get_arrivals(response) == ['20120614T080222', '20120614T080436']
assert get_used_vj(response) == [['vehicle_journey:vjA'], []]
# Disruption impacting lines A, B, C starts at 06:00 and ends at 11:59:59
# Get VJ at 12:00 and disruption doesn't appear
pt_response = self.query_region('vehicle_journeys/vehicle_journey:vjA?_current_datetime=20120614T120000')
assert len(pt_response['disruptions']) == 0
is_valid_graphical_isochrone(isochrone, self.tester, isochrone_basic_query + "&data_freshness=realtime")
geojson = isochrone['isochrones'][0]['geojson']
multi_poly = asShape(geojson)
# we have 3 departures and 1 disruption (linked to line A departure)
departures = self.query_region("stop_points/stop_point:stopB/departures?_current_datetime=20120614T0800")
assert len(departures['disruptions']) == 1
assert len(departures['departures']) == 4
# A new disruption impacting vjA is created between 08:01:00 and 08:01:01
self.send_mock("vjA", "20120614", 'canceled', disruption_id='disruption_bob')
def _check_train_cancel_disruption(dis):
is_valid_disruption(dis, chaos_disrup=False)
assert dis['contributor'] == rt_topic
assert dis['disruption_id'] == 'disruption_bob'
assert dis['severity']['effect'] == 'NO_SERVICE'
assert len(dis['impacted_objects']) == 1
ptobj = dis['impacted_objects'][0]['pt_object']
assert ptobj['embedded_type'] == 'trip'
assert ptobj['id'] == 'vjA'
assert ptobj['name'] == 'vjA'
# for cancellation we do not output the impacted stops
assert 'impacted_stops' not in dis['impacted_objects'][0]
# We should see the disruption
pt_response = self.query_region('vehicle_journeys/vehicle_journey:vjA?_current_datetime=20120614T1337')
assert len(pt_response['disruptions']) == 1
_check_train_cancel_disruption(pt_response['disruptions'][0])
# and we should be able to query for the vj's disruption
disrup_response = self.query_region('vehicle_journeys/vehicle_journey:vjA/disruptions')
assert len(disrup_response['disruptions']) == 1
_check_train_cancel_disruption(disrup_response['disruptions'][0])
traffic_reports_response = self.query_region('traffic_reports?_current_datetime=20120614T0800')
traffic_reports = get_not_null(traffic_reports_response, 'traffic_reports')
assert len(traffic_reports) == 1
vjs = get_not_null(traffic_reports[0], "vehicle_journeys")
assert len(vjs) == 1
assert vjs[0]['id'] == 'vehicle_journey:vjA'
new_response = self.query_region(journey_basic_query + "&data_freshness=realtime")
assert set(get_arrivals(new_response)) == set(['20120614T080436', '20120614T080223'])
assert get_used_vj(new_response) == [['vehicle_journey:vjM'], []]
isochrone_realtime = self.query_region(isochrone_basic_query + "&data_freshness=realtime")
is_valid_graphical_isochrone(
isochrone_realtime, self.tester, isochrone_basic_query + "&data_freshness=realtime"
)
geojson_realtime = isochrone_realtime['isochrones'][0]['geojson']
multi_poly_realtime = asShape(geojson_realtime)
isochrone_base_schedule = self.query_region(isochrone_basic_query + "&data_freshness=base_schedule")
is_valid_graphical_isochrone(
isochrone_base_schedule, self.tester, isochrone_basic_query + "&data_freshness=base_schedule"
)
geojson_base_schedule = isochrone_base_schedule['isochrones'][0]['geojson']
multi_poly_base_schedule = asShape(geojson_base_schedule)
assert not multi_poly.difference(multi_poly_realtime).is_empty
assert multi_poly.equals(multi_poly_base_schedule)
# We have one less departure (vjA because of disruption)
# The disruption doesn't appear because the lines departing aren't impacted during the period
departures = self.query_region("stop_points/stop_point:stopB/departures?_current_datetime=20120614T0800")
assert len(departures['disruptions']) == 0
assert len(departures['departures']) == 3
# We still have 2 passages in base schedule, but we have the new disruption
departures = self.query_region(
"stop_points/stop_point:stopB/departures?_current_datetime=20120614T0800&data_freshness=base_schedule"
)
assert len(departures['disruptions']) == 2
assert len(departures['departures']) == 4
# it should not have changed anything for the theoric
new_base = self.query_region(journey_basic_query + "&data_freshness=base_schedule")
assert get_arrivals(new_base) == ['20120614T080222', '20120614T080436']
assert get_used_vj(new_base) == [['vehicle_journey:vjA'], []]
# see http://jira.canaltp.fr/browse/NAVP-266,
# _current_datetime is needed to make it work
# assert len(new_base['disruptions']) == 1
# remove links as the calling url is not the same
for j in new_base['journeys']:
j.pop('links', None)
for j in response['journeys']:
j.pop('links', None)
assert new_base['journeys'] == response['journeys']
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestMainStopAreaWeightFactorWithKirinUpdate(MockKirinDisruptionsFixture):
def test_main_stop_area_weight_factor_with_kirin_update(self):
response = self.query_region("places?type[]=stop_area&q=stop")
places = response['places']
assert len(places) == 3
assert places[0]['id'] == 'stopA'
assert places[1]['id'] == 'stopB'
assert places[2]['id'] == 'stopC'
# only used to activate the autocomplete rebuild process
self.send_mock("id", "20120614", 'type', disruption_id='disruption_bob')
response = self.query_region("places?type[]=stop_area&q=stop&_main_stop_area_weight_factor=5")
places = response['places']
assert len(places) == 3
assert places[0]['id'] == 'stopC'
assert places[1]['id'] == 'stopA'
assert places[2]['id'] == 'stopB'
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestAutocompleteOnWaysWithKirinUpdate(MockKirinDisruptionsFixture):
def test_autocomplete_on_ways_with_kirin_update(self):
response = self.query_region("places?&q=rue ts")
places = response['places']
assert len(places) == 1
assert places[0]['embedded_type'] == 'address'
assert places[0]['name'] == 'rue ts (Condom)'
# only used to activate the autocomplete rebuild process
self.send_mock("id", "20120614", 'type', disruption_id='disruption_bob')
# After injection of realtime, we should not return way with visible=false.
response = self.query_region("places?&q=rue ts")
places = response['places']
assert len(places) == 1
assert places[0]['embedded_type'] == 'address'
assert places[0]['name'] == 'rue ts (Condom)'
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinOnVJDelay(MockKirinDisruptionsFixture):
def test_vj_delay(self):
"""
send a mock kirin vj delay and test that the vj is not taken
"""
response = self.query_region(journey_basic_query + "&data_freshness=realtime")
# with no cancellation, we have 2 journeys, one direct and one with the vj:A:0
assert get_arrivals(response) == ['20120614T080222', '20120614T080436']
assert get_used_vj(response) == [['vehicle_journey:vjA'], []]
# we have 3 departures and 1 disruption (linked to the first passage)
departures = self.query_region("stop_points/stop_point:stopB/departures?_current_datetime=20120614T0800")
assert len(departures['disruptions']) == 1
assert len(departures['departures']) == 4
assert departures['departures'][0]['stop_date_time']['departure_date_time'] == '20120614T080100'
pt_response = self.query_region('vehicle_journeys')
initial_nb_vehicle_journeys = len(pt_response['vehicle_journeys'])
assert initial_nb_vehicle_journeys == 9
# no disruption yet
pt_response = self.query_region('vehicle_journeys/vehicle_journey:vjA?_current_datetime=20120614T1337')
assert len(pt_response['disruptions']) == 0
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival=tstamp("20120614T080224"),
departure=tstamp("20120614T080225"),
arrival_delay=60 + 24,
departure_delay=60 + 25,
message='cow on tracks',
),
UpdatedStopTime(
"stop_point:stopA",
arrival=tstamp("20120614T080400"),
departure=tstamp("20120614T080400"),
arrival_delay=3 * 60 + 58,
departure_delay=3 * 60 + 58,
),
],
disruption_id='vjA_delayed',
)
# A new vj is created, which the vj with the impact of the disruption
pt_response = self.query_region('vehicle_journeys')
assert len(pt_response['vehicle_journeys']) == (initial_nb_vehicle_journeys + 1)
vj_ids = [vj['id'] for vj in pt_response['vehicle_journeys']]
assert 'vehicle_journey:vjA:modified:0:vjA_delayed' in vj_ids
def _check_train_delay_disruption(dis):
is_valid_disruption(dis, chaos_disrup=False)
assert dis['disruption_id'] == 'vjA_delayed'
assert dis['severity']['effect'] == 'SIGNIFICANT_DELAYS'
assert len(dis['impacted_objects']) == 1
ptobj = dis['impacted_objects'][0]['pt_object']
assert ptobj['embedded_type'] == 'trip'
assert ptobj['id'] == 'vjA'
assert ptobj['name'] == 'vjA'
# for delay we should have detail on the impacted stops
impacted_objs = get_not_null(dis['impacted_objects'][0], 'impacted_stops')
assert len(impacted_objs) == 2
imp_obj1 = impacted_objs[0]
assert get_valid_time(get_not_null(imp_obj1, 'amended_arrival_time')) == _dt(h=8, m=2, s=24)
assert get_valid_time(get_not_null(imp_obj1, 'amended_departure_time')) == _dt(h=8, m=2, s=25)
assert get_not_null(imp_obj1, 'cause') == 'cow on tracks'
assert get_not_null(imp_obj1, 'departure_status') == 'delayed'
assert get_not_null(imp_obj1, 'arrival_status') == 'delayed'
assert get_not_null(imp_obj1, 'stop_time_effect') == 'delayed'
assert get_valid_time(get_not_null(imp_obj1, 'base_arrival_time')) == _dt(8, 1, 0)
assert get_valid_time(get_not_null(imp_obj1, 'base_departure_time')) == _dt(8, 1, 0)
imp_obj2 = impacted_objs[1]
assert get_valid_time(get_not_null(imp_obj2, 'amended_arrival_time')) == _dt(h=8, m=4, s=0)
assert get_valid_time(get_not_null(imp_obj2, 'amended_departure_time')) == _dt(h=8, m=4, s=0)
assert imp_obj2['cause'] == ''
assert get_not_null(imp_obj1, 'stop_time_effect') == 'delayed'
assert get_not_null(imp_obj1, 'departure_status') == 'delayed'
assert get_not_null(imp_obj1, 'arrival_status') == 'delayed'
assert get_valid_time(get_not_null(imp_obj2, 'base_departure_time')) == _dt(8, 1, 2)
assert get_valid_time(get_not_null(imp_obj2, 'base_arrival_time')) == _dt(8, 1, 2)
# we should see the disruption
pt_response = self.query_region('vehicle_journeys/vehicle_journey:vjA?_current_datetime=20120614T1337')
assert len(pt_response['disruptions']) == 1
_check_train_delay_disruption(pt_response['disruptions'][0])
# In order to not disturb the test, line M which was added afterwards for shared section tests, is forbidden here
new_response = self.query_region(journey_basic_query + "&data_freshness=realtime&forbidden_uris[]=M&")
assert get_arrivals(new_response) == ['20120614T080436', '20120614T080520']
assert get_used_vj(new_response) == [[], ['vehicle_journey:vjA:modified:0:vjA_delayed']]
pt_journey = new_response['journeys'][1]
check_journey(
pt_journey,
Journey(
sections=[
Section(
departure_date_time='20120614T080208',
arrival_date_time='20120614T080225',
base_departure_date_time=None,
base_arrival_date_time=None,
stop_date_times=[],
),
Section(
departure_date_time='20120614T080225',
arrival_date_time='20120614T080400',
base_departure_date_time='20120614T080100',
base_arrival_date_time='20120614T080102',
stop_date_times=[
SectionStopDT(
departure_date_time='20120614T080225',
arrival_date_time='20120614T080224',
base_departure_date_time='20120614T080100',
base_arrival_date_time='20120614T080100',
),
SectionStopDT(
departure_date_time='20120614T080400',
arrival_date_time='20120614T080400',
base_departure_date_time='20120614T080102',
base_arrival_date_time='20120614T080102',
),
],
),
Section(
departure_date_time='20120614T080400',
arrival_date_time='20120614T080520',
base_departure_date_time=None,
base_arrival_date_time=None,
stop_date_times=[],
),
]
),
)
# it should not have changed anything for the theoric
new_base = self.query_region(journey_basic_query + "&data_freshness=base_schedule")
assert get_arrivals(new_base) == ['20120614T080222', '20120614T080436']
assert get_used_vj(new_base), [['vehicle_journey:vjA'] == []]
# we have one delayed departure
departures = self.query_region("stop_points/stop_point:stopB/departures?_current_datetime=20120614T0800")
assert len(departures['disruptions']) == 2
assert len(departures['departures']) == 4
assert departures['departures'][1]['stop_date_time']['departure_date_time'] == '20120614T080225'
# Same as realtime except the departure date time
departures = self.query_region(
"stop_points/stop_point:stopB/departures?_current_datetime=20120614T0800&data_freshness=base_schedule"
)
assert len(departures['disruptions']) == 2
assert len(departures['departures']) == 4
assert departures['departures'][0]['stop_date_time']['departure_date_time'] == '20120614T080100'
# We send again the same disruption
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival=tstamp("20120614T080224"),
departure=tstamp("20120614T080225"),
arrival_delay=60 + 24,
departure_delay=60 + 25,
message='cow on tracks',
),
UpdatedStopTime(
"stop_point:stopA",
arrival=tstamp("20120614T080400"),
departure=tstamp("20120614T080400"),
arrival_delay=3 * 60 + 58,
departure_delay=3 * 60 + 58,
),
],
disruption_id='vjA_delayed',
)
# A new vj is created, but a useless vj has been cleaned, so the number of vj does not change
pt_response = self.query_region('vehicle_journeys')
assert len(pt_response['vehicle_journeys']) == (initial_nb_vehicle_journeys + 1)
pt_response = self.query_region('vehicle_journeys/vehicle_journey:vjA?_current_datetime=20120614T1337')
assert len(pt_response['disruptions']) == 1
_check_train_delay_disruption(pt_response['disruptions'][0])
# So the first real-time vj created for the first disruption should be deactivated
# In order to not disturb the test, line M which was added afterwards for shared section tests, is forbidden here
new_response = self.query_region(journey_basic_query + "&data_freshness=realtime&forbidden_uris[]=M&")
assert get_arrivals(new_response) == ['20120614T080436', '20120614T080520']
assert get_used_vj(new_response), [[] == ['vehicle_journey:vjA:modified:1:vjA_delayed']]
# it should not have changed anything for the theoric
new_base = self.query_region(journey_basic_query + "&data_freshness=base_schedule")
assert get_arrivals(new_base) == ['20120614T080222', '20120614T080436']
assert get_used_vj(new_base), [['vehicle_journey:vjA'] == []]
# we then try to send a delay on another train.
# we should not have lost the first delay
self.send_mock(
"vjB",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
tstamp("20120614T180224"),
tstamp("20120614T180225"),
arrival_delay=60 + 24,
departure_delay=60 + 25,
),
UpdatedStopTime(
"stop_point:stopA",
tstamp("20120614T180400"),
tstamp("20120614T180400"),
message="bob's in the place",
),
],
)
pt_response = self.query_region('vehicle_journeys/vehicle_journey:vjA?_current_datetime=20120614T1337')
assert len(pt_response['disruptions']) == 1
_check_train_delay_disruption(pt_response['disruptions'][0])
# we should also have the disruption on vjB
assert (
len(
self.query_region('vehicle_journeys/vehicle_journey:vjB?_current_datetime=20120614T1337')[
'disruptions'
]
)
== 1
)
###################################
# We now send a partial delete on B
###################################
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopB", arrival=tstamp("20120614T080100"), departure=tstamp("20120614T080100")
),
UpdatedStopTime(
"stop_point:stopA",
arrival=tstamp("20120614T080102"),
departure=tstamp("20120614T080102"),
message='cow on tracks',
arrival_skipped=True,
),
],
disruption_id='vjA_skip_A',
)
# A new vj is created
vjs = self.query_region('vehicle_journeys?_current_datetime=20120614T1337')
assert len(vjs['vehicle_journeys']) == (initial_nb_vehicle_journeys + 2)
vjA = self.query_region('vehicle_journeys/vehicle_journey:vjA?_current_datetime=20120614T1337')
# we now have 2 disruption on vjA
assert len(vjA['disruptions']) == 2
all_dis = {d['id']: d for d in vjA['disruptions']}
assert 'vjA_skip_A' in all_dis
dis = all_dis['vjA_skip_A']
is_valid_disruption(dis, chaos_disrup=False)
assert dis['disruption_id'] == 'vjA_skip_A'
assert dis['severity']['effect'] == 'REDUCED_SERVICE'
assert len(dis['impacted_objects']) == 1
ptobj = dis['impacted_objects'][0]['pt_object']
assert ptobj['embedded_type'] == 'trip'
assert ptobj['id'] == 'vjA'
assert ptobj['name'] == 'vjA'
# for delay we should have detail on the impacted stops
impacted_objs = get_not_null(dis['impacted_objects'][0], 'impacted_stops')
assert len(impacted_objs) == 2
imp_obj1 = impacted_objs[0]
assert get_valid_time(get_not_null(imp_obj1, 'amended_arrival_time')) == _dt(8, 1, 0)
assert get_valid_time(get_not_null(imp_obj1, 'amended_departure_time')) == _dt(8, 1, 0)
assert get_not_null(imp_obj1, 'stop_time_effect') == 'unchanged'
assert get_not_null(imp_obj1, 'arrival_status') == 'unchanged'
assert get_not_null(imp_obj1, 'departure_status') == 'unchanged'
assert get_valid_time(get_not_null(imp_obj1, 'base_arrival_time')) == _dt(8, 1, 0)
assert get_valid_time(get_not_null(imp_obj1, 'base_departure_time')) == _dt(8, 1, 0)
imp_obj2 = impacted_objs[1]
assert 'amended_arrival_time' not in imp_obj2
assert get_not_null(imp_obj2, 'cause') == 'cow on tracks'
assert get_not_null(imp_obj2, 'stop_time_effect') == 'deleted' # the stoptime is marked as deleted
assert get_not_null(imp_obj2, 'arrival_status') == 'deleted'
assert get_not_null(imp_obj2, 'departure_status') == 'unchanged' # the departure is not changed
assert get_valid_time(get_not_null(imp_obj2, 'base_departure_time')) == _dt(8, 1, 2)
assert get_valid_time(get_not_null(imp_obj2, 'base_arrival_time')) == _dt(8, 1, 2)
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinOnVJDelayDayAfter(MockKirinDisruptionsFixture):
def test_vj_delay_day_after(self):
"""
send a mock kirin vj delaying on day after and test that the vj is not taken
"""
response = self.query_region(journey_basic_query + "&data_freshness=realtime")
pt_response = self.query_region('vehicle_journeys/vehicle_journey:vjA?_current_datetime=20120614T1337')
# with no cancellation, we have 2 journeys, one direct and one with the vj:A:0
assert get_arrivals(response) == ['20120614T080222', '20120614T080436'] # pt_walk + vj 08:01
assert get_used_vj(response), [['vjA'] == []]
pt_response = self.query_region('vehicle_journeys')
initial_nb_vehicle_journeys = len(pt_response['vehicle_journeys'])
assert initial_nb_vehicle_journeys == 9
# check that we have the next vj
s_coord = "0.0000898312;0.0000898312" # coordinate of S in the dataset
r_coord = "0.00188646;0.00071865" # coordinate of R in the dataset
journey_later_query = "journeys?from={from_coord}&to={to_coord}&datetime={datetime}".format(
from_coord=s_coord, to_coord=r_coord, datetime="20120614T080500"
)
later_response = self.query_region(journey_later_query + "&data_freshness=realtime")
assert get_arrivals(later_response) == ['20120614T080936', '20120614T180222'] # pt_walk + vj 18:01
assert get_used_vj(later_response), [[] == ['vehicle_journey:vjB']]
# no disruption yet
pt_response = self.query_region('vehicle_journeys/vehicle_journey:vjA?_current_datetime=20120614T1337')
assert len(pt_response['disruptions']) == 0
# sending disruption delaying VJ to the next day
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime("stop_point:stopB", tstamp("20120615T070224"), tstamp("20120615T070224")),
UpdatedStopTime("stop_point:stopA", tstamp("20120615T070400"), tstamp("20120615T070400")),
],
disruption_id='96231_2015-07-28_0',
effect='unknown',
)
# A new vj is created
pt_response = self.query_region('vehicle_journeys')
assert len(pt_response['vehicle_journeys']) == (initial_nb_vehicle_journeys + 1)
vj_ids = [vj['id'] for vj in pt_response['vehicle_journeys']]
assert 'vehicle_journey:vjA:modified:0:96231_2015-07-28_0' in vj_ids
# we should see the disruption
pt_response = self.query_region('vehicle_journeys/vehicle_journey:vjA?_current_datetime=20120614T1337')
assert len(pt_response['disruptions']) == 1
is_valid_disruption(pt_response['disruptions'][0], chaos_disrup=False)
assert pt_response['disruptions'][0]['disruption_id'] == '96231_2015-07-28_0'
# In order to not disturb the test, line M which was added afterwards for shared section tests, is forbidden here
new_response = self.query_region(journey_basic_query + "&data_freshness=realtime&forbidden_uris[]=M&")
assert get_arrivals(new_response) == ['20120614T080436', '20120614T180222'] # pt_walk + vj 18:01
assert get_used_vj(new_response), [[] == ['vjB']]
# it should not have changed anything for the base-schedule
new_base = self.query_region(journey_basic_query + "&data_freshness=base_schedule")
assert get_arrivals(new_base) == ['20120614T080222', '20120614T080436']
assert get_used_vj(new_base), [['vjA'] == []]
# the day after, we can use the delayed vj
journey_day_after_query = "journeys?from={from_coord}&to={to_coord}&datetime={datetime}".format(
from_coord=s_coord, to_coord=r_coord, datetime="20120615T070000"
)
day_after_response = self.query_region(journey_day_after_query + "&data_freshness=realtime")
assert get_arrivals(day_after_response) == [
'20120615T070436',
'20120615T070520',
] # pt_walk + rt 07:02:24
assert get_used_vj(day_after_response), [[] == ['vehicle_journey:vjA:modified:0:96231_2015-07-28_0']]
# it should not have changed anything for the theoric the day after
day_after_base = self.query_region(journey_day_after_query + "&data_freshness=base_schedule")
assert get_arrivals(day_after_base) == ['20120615T070436', '20120615T080222']
assert get_used_vj(day_after_base), [[] == ['vjA']]
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinOnVJOnTime(MockKirinDisruptionsFixture):
def test_vj_on_time(self):
"""
We don't want to output an on time disruption on journeys,
departures, arrivals and route_schedules (also on
stop_schedules, but no vj disruption is outputed for the
moment).
"""
disruptions_before = self.query_region('disruptions?_current_datetime=20120614T080000')
nb_disruptions_before = len(disruptions_before['disruptions'])
# New disruption same as base schedule
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080100"),
departure=tstamp("20120614T080100"),
message='on time',
),
UpdatedStopTime(
"stop_point:stopA",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080102"),
departure=tstamp("20120614T080102"),
),
],
disruption_id='vjA_on_time',
effect='unknown',
)
# We have a new diruption
disruptions_after = self.query_region('disruptions?_current_datetime=20120614T080000')
assert nb_disruptions_before + 1 == len(disruptions_after['disruptions'])
assert has_the_disruption(disruptions_after, 'vjA_on_time')
# it's not in journeys
journey_query = journey_basic_query + "&data_freshness=realtime&_current_datetime=20120614T080000"
response = self.query_region(journey_query)
assert not has_the_disruption(response, 'vjA_on_time')
self.is_valid_journey_response(response, journey_query)
assert response['journeys'][0]['sections'][1]['data_freshness'] == 'realtime'
# it's not in departures
response = self.query_region(
"stop_points/stop_point:stopB/departures?_current_datetime=20120614T080000&data_freshness=realtime"
)
assert not has_the_disruption(response, 'vjA_on_time')
assert response['departures'][0]['stop_date_time']['data_freshness'] == 'realtime'
# it's not in arrivals
response = self.query_region(
"stop_points/stop_point:stopA/arrivals?_current_datetime=20120614T080000&data_freshness=realtime"
)
assert not has_the_disruption(response, 'vjA_on_time')
assert response['arrivals'][0]['stop_date_time']['data_freshness'] == 'realtime'
# it's not in stop_schedules
response = self.query_region(
"stop_points/stop_point:stopB/lines/A/stop_schedules?_current_datetime=20120614T080000&data_freshness=realtime"
)
assert not has_the_disruption(response, 'vjA_on_time')
assert response['stop_schedules'][0]['date_times'][0]['data_freshness'] == 'realtime'
assert response['stop_schedules'][0]['date_times'][0]['base_date_time'] == '20120614T080100'
assert response['stop_schedules'][0]['date_times'][0]['date_time'] == '20120614T080100'
# it's not in terminus_schedules
response = self.query_region(
"stop_points/stop_point:stopB/lines/A/terminus_schedules?_current_datetime=20120614T080000&data_freshness=realtime"
)
assert not has_the_disruption(response, 'vjA_on_time')
assert response['terminus_schedules'][0]['date_times'][0]['data_freshness'] == 'realtime'
assert response['terminus_schedules'][0]['date_times'][0]['base_date_time'] == '20120614T080100'
assert response['terminus_schedules'][0]['date_times'][0]['date_time'] == '20120614T080100'
# it's not in route_schedules
response = self.query_region(
"stop_points/stop_point:stopB/lines/A/route_schedules?_current_datetime=20120614T080000&data_freshness=realtime"
)
assert not has_the_disruption(response, 'vjA_on_time')
# no realtime flags on route_schedules yet
# New disruption one second late
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival_delay=1,
departure_delay=1,
arrival=tstamp("20120614T080101"),
departure=tstamp("20120614T080101"),
message='on time',
),
UpdatedStopTime(
"stop_point:stopA",
arrival_delay=1,
departure_delay=1,
arrival=tstamp("20120614T080103"),
departure=tstamp("20120614T080103"),
),
],
disruption_id='vjA_late',
)
# We have a new diruption
disruptions_after = self.query_region('disruptions?_current_datetime=20120614T080000')
assert nb_disruptions_before + 2 == len(disruptions_after['disruptions'])
assert has_the_disruption(disruptions_after, 'vjA_late')
# it's in journeys
response = self.query_region(journey_query)
assert has_the_disruption(response, 'vjA_late')
self.is_valid_journey_response(response, journey_query)
assert response['journeys'][0]['sections'][1]['data_freshness'] == 'realtime'
# it's in departures
response = self.query_region(
"stop_points/stop_point:stopB/departures?_current_datetime=20120614T080000&data_freshness=realtime"
)
assert has_the_disruption(response, 'vjA_late')
assert response['departures'][0]['stop_date_time']['departure_date_time'] == '20120614T080101'
assert response['departures'][0]['stop_date_time']['data_freshness'] == 'realtime'
# it's in arrivals
response = self.query_region(
"stop_points/stop_point:stopA/arrivals?_current_datetime=20120614T080000&data_freshness=realtime"
)
assert has_the_disruption(response, 'vjA_late')
assert response['arrivals'][0]['stop_date_time']['arrival_date_time'] == '20120614T080103'
assert response['arrivals'][0]['stop_date_time']['data_freshness'] == 'realtime'
# it's in stop_schedules
response = self.query_region(
"stop_points/stop_point:stopB/lines/A/stop_schedules?_current_datetime=20120614T080000&data_freshness=realtime"
)
assert has_the_disruption(response, 'vjA_late')
assert response['stop_schedules'][0]['date_times'][0]['links'][1]['type'] == 'disruption'
assert response['stop_schedules'][0]['date_times'][0]['date_time'] == '20120614T080101'
assert response['stop_schedules'][0]['date_times'][0]['base_date_time'] == '20120614T080100'
assert response['stop_schedules'][0]['date_times'][0]['data_freshness'] == 'realtime'
# it's in terminus_schedules
response = self.query_region(
"stop_points/stop_point:stopB/lines/A/terminus_schedules?_current_datetime=20120614T080000&data_freshness=realtime"
)
assert has_the_disruption(response, 'vjA_late')
assert response['terminus_schedules'][0]['date_times'][0]['links'][1]['type'] == 'disruption'
assert response['terminus_schedules'][0]['date_times'][0]['date_time'] == '20120614T080101'
assert response['terminus_schedules'][0]['date_times'][0]['base_date_time'] == '20120614T080100'
assert response['terminus_schedules'][0]['date_times'][0]['data_freshness'] == 'realtime'
# it's in route_schedules
response = self.query_region(
"stop_points/stop_point:stopB/lines/A/route_schedules?_current_datetime=20120614T080000&data_freshness=realtime"
)
assert has_the_disruption(response, 'vjA_late')
# no realtime flags on route_schedules yet
MAIN_ROUTING_TEST_SETTING_NO_ADD = {
'main_routing_test': {
'kraken_args': [
'--BROKER.rt_topics=' + rt_topic,
'spawn_maintenance_worker',
] # also check that by 'default is_realtime_add_enabled=0'
}
}
MAIN_ROUTING_TEST_SETTING = deepcopy(MAIN_ROUTING_TEST_SETTING_NO_ADD)
MAIN_ROUTING_TEST_SETTING['main_routing_test']['kraken_args'].append('--GENERAL.is_realtime_add_enabled=1')
MAIN_ROUTING_TEST_SETTING['main_routing_test']['kraken_args'].append('--GENERAL.is_realtime_add_trip_enabled=1')
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinOnNewStopTimeAtTheEnd(MockKirinDisruptionsFixture):
def test_add_and_delete_one_stop_time_at_the_end(self):
"""
1. create a new_stop_time to add a final stop in C
test that a new journey is possible with section type = public_transport from B to C
2. delete the added stop_time and verify that the public_transport section is absent
3. delete again stop_time and verify that the public_transport section is absent
"""
disruptions_before = self.query_region('disruptions?_current_datetime=20120614T080000')
nb_disruptions_before = len(disruptions_before['disruptions'])
# New disruption with two stop_times same as base schedule and
# a new stop_time on stop_point:stopC added at the end
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080100"),
departure=tstamp("20120614T080100"),
message='on time',
),
UpdatedStopTime(
"stop_point:stopA",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080102"),
departure=tstamp("20120614T080102"),
),
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080104"),
departure=tstamp("20120614T080104"),
),
],
disruption_id='new_stop_time',
)
# We have a new disruption to add a new stop_time at stop_point:stopC in vehicle_journey 'VJA'
disruptions_after = self.query_region('disruptions?_current_datetime=20120614T080000')
assert nb_disruptions_before + 1 == len(disruptions_after['disruptions'])
assert has_the_disruption(disruptions_after, 'new_stop_time')
last_disrupt = disruptions_after['disruptions'][-1]
assert last_disrupt['severity']['effect'] == 'MODIFIED_SERVICE'
journey_query = journey_basic_query + "&data_freshness=realtime&_current_datetime=20120614T080000"
response = self.query_region(journey_query)
assert has_the_disruption(response, 'new_stop_time')
self.is_valid_journey_response(response, journey_query)
assert response['journeys'][0]['sections'][1]['data_freshness'] == 'realtime'
assert response['journeys'][0]['sections'][1]['display_informations']['physical_mode'] == 'Tramway'
B_C_query = "journeys?from={from_coord}&to={to_coord}&datetime={datetime}".format(
from_coord='stop_point:stopB', to_coord='stop_point:stopC', datetime='20120614T080000'
)
# The result with base_schedule should not have a journey with public_transport from B to C
base_journey_query = B_C_query + "&data_freshness=base_schedule&_current_datetime=20120614T080000"
response = self.query_region(base_journey_query)
assert not has_the_disruption(response, 'new_stop_time')
self.is_valid_journey_response(response, base_journey_query)
assert len(response['journeys']) == 1 # check we only have one journey
assert len(response['journeys'][0]['sections']) == 1
assert response['journeys'][0]['sections'][0]['type'] == 'street_network'
assert 'data_freshness' not in response['journeys'][0]['sections'][0] # means it's base_schedule
# The result with realtime should have a journey with public_transport from B to C
rt_journey_query = B_C_query + "&data_freshness=realtime&_current_datetime=20120614T080000"
response = self.query_region(rt_journey_query)
assert has_the_disruption(response, 'new_stop_time')
self.is_valid_journey_response(response, rt_journey_query)
assert len(response['journeys']) == 2 # check there's a new journey possible
assert response['journeys'][0]['sections'][0]['data_freshness'] == 'realtime'
assert response['journeys'][0]['sections'][0]['type'] == 'public_transport'
assert response['journeys'][0]['sections'][0]['to']['id'] == 'stop_point:stopC'
assert response['journeys'][0]['sections'][0]['duration'] == 4
assert response['journeys'][0]['status'] == 'MODIFIED_SERVICE'
assert 'data_freshness' not in response['journeys'][1]['sections'][0] # means it's base_schedule
assert response['journeys'][1]['sections'][0]['type'] == 'street_network'
# New disruption with a deleted stop_time recently added at stop_point:stopC
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080104"),
departure=tstamp("20120614T080104"),
message='stop_time deleted',
arrival_skipped=True,
)
],
disruption_id='deleted_stop_time',
)
# We have a new disruption with a deleted stop_time at stop_point:stopC in vehicle_journey 'VJA'
disruptions_with_deleted = self.query_region('disruptions?_current_datetime=20120614T080000')
assert len(disruptions_after['disruptions']) + 1 == len(disruptions_with_deleted['disruptions'])
assert has_the_disruption(disruptions_with_deleted, 'deleted_stop_time')
# The result with realtime should not have a journey with public_transport from B to C
# since the added stop_time has been deleted by the last disruption
rt_journey_query = B_C_query + "&data_freshness=realtime&_current_datetime=20120614T080000"
response = self.query_region(rt_journey_query)
assert not has_the_disruption(response, 'added_stop_time')
self.is_valid_journey_response(response, rt_journey_query)
assert len(response['journeys']) == 1
assert len(response['journeys'][0]['sections']) == 1
assert response['journeys'][0]['sections'][0]['type'] == 'street_network'
assert 'data_freshness' not in response['journeys'][0]['sections'][0]
# New disruption with a deleted stop_time already deleted at stop_point:stopC
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080104"),
departure=tstamp("20120614T080104"),
message='stop_time deleted',
arrival_skipped=True,
)
],
disruption_id='re_deleted_stop_time',
)
# We have a new disruption with a deleted stop_time at stop_point:stopC in vehicle_journey 'VJA'
disruptions_with_deleted = self.query_region('disruptions?_current_datetime=20120614T080000')
assert len(disruptions_after['disruptions']) + 2 == len(disruptions_with_deleted['disruptions'])
assert has_the_disruption(disruptions_with_deleted, 're_deleted_stop_time')
# The result with realtime should not have a journey with public_transport from B to C
rt_journey_query = B_C_query + "&data_freshness=realtime&_current_datetime=20120614T080000"
response = self.query_region(rt_journey_query)
assert not has_the_disruption(response, 'added_stop_time')
self.is_valid_journey_response(response, rt_journey_query)
assert len(response['journeys']) == 1
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinReadTripEffectFromTripUpdate(MockKirinDisruptionsFixture):
def test_read_trip_effect_from_tripupdate(self):
disruptions_before = self.query_region('disruptions?_current_datetime=20120614T080000')
nb_disruptions_before = len(disruptions_before['disruptions'])
assert nb_disruptions_before == 12
vjs_before = self.query_region('vehicle_journeys')
assert len(vjs_before['vehicle_journeys']) == 9
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival=tstamp("20120614T080224"),
departure=tstamp("20120614T080225"),
arrival_delay=0,
departure_delay=0,
),
UpdatedStopTime(
"stop_point:stopA",
arrival=tstamp("20120614T080400"),
departure=tstamp("20120614T080400"),
message='stop_time deleted',
arrival_skipped=True,
departure_skipped=True,
),
],
disruption_id='reduced_service_vjA',
effect='reduced_service',
)
disrupts = self.query_region('disruptions?_current_datetime=20120614T080000')
assert len(disrupts['disruptions']) == 13
assert has_the_disruption(disrupts, 'reduced_service_vjA')
last_disrupt = disrupts['disruptions'][-1]
assert last_disrupt['severity']['effect'] == 'REDUCED_SERVICE'
assert last_disrupt['severity']['name'] == 'reduced service'
vjs_after = self.query_region('vehicle_journeys')
# we got a new vj due to the disruption, which means the disruption is handled correctly
assert len(vjs_after['vehicle_journeys']) == 10
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinSchedulesNewStopTimeInBetween(MockKirinDisruptionsFixture):
def test_schedules_add_one_stop_time(self):
"""
Checking that when a stop is added on a trip, /departures and /stop_schedules are updated
"""
disruptions_before = self.query_region('disruptions?_current_datetime=20120614T080000')
base_query = 'stop_areas/stopC/{api}?from_datetime={dt}&_current_datetime={dt}&data_freshness={df}'
departures = self.query_region(base_query.format(api='departures', dt='20120614T080100', df='realtime'))
assert len(departures['departures']) == 0
stop_schedules = self.query_region(
base_query.format(api='stop_schedules', dt='20120614T080100', df='realtime')
)
assert len(stop_schedules['stop_schedules']) == 1
assert stop_schedules['stop_schedules'][0]['display_informations']['label'] == '1D'
assert not stop_schedules['stop_schedules'][0]['date_times']
# New disruption with a new stop_time in between B and A of the VJ = vjA
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival=tstamp("20120614T080100"),
departure=tstamp("20120614T080100"),
arrival_delay=0,
departure_delay=0,
),
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080330"),
departure=tstamp("20120614T080331"),
),
UpdatedStopTime(
"stop_point:stopA",
arrival=tstamp("20120614T080400"),
departure=tstamp("20120614T080400"),
arrival_delay=3 * 60 + 58,
departure_delay=3 * 60 + 58,
),
],
disruption_id='vjA_delayed_with_new_stop_time',
effect='modified',
)
disruptions_after = self.query_region('disruptions?_current_datetime=20120614T080000')
assert len(disruptions_before['disruptions']) + 1 == len(disruptions_after['disruptions'])
assert has_the_disruption(disruptions_after, 'vjA_delayed_with_new_stop_time')
# still nothing for base_schedule
departures = self.query_region(
base_query.format(api='departures', dt='20120614T080100', df='base_schedule')
)
assert len(departures['departures']) == 0
stop_schedules = self.query_region(
base_query.format(api='stop_schedules', dt='20120614T080100', df='base_schedule')
)
assert len(stop_schedules['stop_schedules']) == 2 # a new route is linked (not used in base_schedule)
assert not stop_schedules['stop_schedules'][0]['date_times']
assert not stop_schedules['stop_schedules'][1]['date_times']
# departures updated in realtime
departures = self.query_region(base_query.format(api='departures', dt='20120614T080100', df='realtime'))
assert len(departures['departures']) == 1
assert departures['departures'][0]['stop_date_time']['data_freshness'] == 'realtime'
assert (
departures['departures'][0]['stop_date_time']['arrival_date_time'] == '20120614T080330'
) # new stop
assert departures['departures'][0]['stop_date_time']['departure_date_time'] == '20120614T080331'
assert 'vjA_delayed_with_new_stop_time' in [
l['id'] for l in departures['departures'][0]['display_informations']['links']
] # link to disruption
assert 'vjA_delayed_with_new_stop_time' in [
d['id'] for d in departures['disruptions']
] # disruption in collection
# stop_schedules updated in realtime
stop_schedules = self.query_region(
base_query.format(api='stop_schedules', dt='20120614T080100', df='realtime')
)
assert len(stop_schedules['stop_schedules']) == 2
assert stop_schedules['stop_schedules'][1]['display_informations']['label'] == '1D'
assert not stop_schedules['stop_schedules'][1]['date_times'] # still no departure on other route
assert stop_schedules['stop_schedules'][0]['display_informations']['label'] == '1A'
assert stop_schedules['stop_schedules'][0]['date_times'][0]['data_freshness'] == 'realtime'
assert (
stop_schedules['stop_schedules'][0]['date_times'][0]['date_time'] == '20120614T080331'
) # new departure
assert 'vjA_delayed_with_new_stop_time' in [
l['id'] for l in stop_schedules['stop_schedules'][0]['date_times'][0]['links']
] # link to disruption
assert 'vjA_delayed_with_new_stop_time' in [
d['id'] for d in departures['disruptions']
] # disruption in collection
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinOnNewStopTimeInBetween(MockKirinDisruptionsFixture):
def test_add_modify_and_delete_one_stop_time(self):
"""
1. Create a disruption with delay on VJ = vjA (with stop_time B and A) and verify the journey
for a query from S to R: S-> walk-> B -> public_transport -> A -> walk -> R
2. Add a new stop_time (stop_point C) in between B and A in the VJ = vjA and verify the journey as above
3. Verify the journey for a query from S to C: S-> walk-> B -> public_transport -> C
4. Delete the added stop_time and verify the journey for a query in 3.
"""
# New disruption with a delay of VJ = vjA
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival=tstamp("20120614T080224"),
departure=tstamp("20120614T080225"),
arrival_delay=60 + 24,
departure_delay=60 + 25,
message='cow on tracks',
),
UpdatedStopTime(
"stop_point:stopA",
arrival=tstamp("20120614T080400"),
departure=tstamp("20120614T080400"),
arrival_delay=3 * 60 + 58,
departure_delay=3 * 60 + 58,
),
],
disruption_id='vjA_delayed',
)
# Verify disruptions
disrupts = self.query_region('disruptions?_current_datetime=20120614T080000')
assert len(disrupts['disruptions']) == 13
assert has_the_disruption(disrupts, 'vjA_delayed')
# query from S to R: Journey without delay with departure from B at 20120614T080100
# and arrival to A at 20120614T080102 returned
response = self.query_region(journey_basic_query + "&data_freshness=realtime")
assert len(response['journeys']) == 2
assert len(response['journeys'][0]['sections']) == 3
assert len(response['journeys'][1]['sections']) == 1
assert response['journeys'][0]['sections'][1]['type'] == 'public_transport'
assert response['journeys'][0]['sections'][1]['data_freshness'] == 'base_schedule'
assert response['journeys'][0]['sections'][1]['departure_date_time'] == '20120614T080101'
assert response['journeys'][0]['sections'][1]['arrival_date_time'] == '20120614T080103'
assert len(response['journeys'][0]['sections'][1]['stop_date_times']) == 2
assert response['journeys'][0]['sections'][0]['type'] == 'street_network'
# A new request with departure after 2 minutes gives us journey with delay
response = self.query_region(sub_query + "&data_freshness=realtime&datetime=20120614T080200")
assert len(response['journeys']) == 2
assert len(response['journeys'][0]['sections']) == 3
assert len(response['journeys'][1]['sections']) == 1
assert response['journeys'][0]['sections'][1]['type'] == 'public_transport'
assert response['journeys'][0]['sections'][1]['data_freshness'] == 'realtime'
assert response['journeys'][0]['sections'][1]['departure_date_time'] == '20120614T080225'
assert response['journeys'][0]['sections'][1]['arrival_date_time'] == '20120614T080400'
assert len(response['journeys'][0]['sections'][1]['stop_date_times']) == 2
assert response['journeys'][0]['sections'][0]['type'] == 'street_network'
# New disruption with a new stop_time in between B and A of the VJ = vjA
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival=tstamp("20120614T080224"),
departure=tstamp("20120614T080225"),
arrival_delay=60 + 24,
departure_delay=60 + 25,
message='cow on tracks',
),
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080330"),
departure=tstamp("20120614T080330"),
),
UpdatedStopTime(
"stop_point:stopA",
arrival=tstamp("20120614T080400"),
departure=tstamp("20120614T080400"),
arrival_delay=3 * 60 + 58,
departure_delay=3 * 60 + 58,
),
],
disruption_id='vjA_delayed_with_new_stop_time',
effect='modified',
)
# Verify disruptions
disrupts = self.query_region('disruptions?_current_datetime=20120614T080000')
assert len(disrupts['disruptions']) == 14
assert has_the_disruption(disrupts, 'vjA_delayed_with_new_stop_time')
last_disrupt = disrupts['disruptions'][-1]
assert last_disrupt['severity']['effect'] == 'MODIFIED_SERVICE'
# the journey has the new stop_time in its section of public_transport
response = self.query_region(sub_query + "&data_freshness=realtime&datetime=20120614T080200")
assert len(response['journeys']) == 2
assert len(response['journeys'][0]['sections']) == 3
assert len(response['journeys'][1]['sections']) == 1
assert response['journeys'][0]['sections'][1]['type'] == 'public_transport'
assert response['journeys'][0]['sections'][1]['data_freshness'] == 'realtime'
assert response['journeys'][0]['sections'][1]['departure_date_time'] == '20120614T080225'
assert response['journeys'][0]['sections'][1]['arrival_date_time'] == '20120614T080400'
assert len(response['journeys'][0]['sections'][1]['stop_date_times']) == 3
assert (
response['journeys'][0]['sections'][1]['stop_date_times'][1]['stop_point']['name']
== 'stop_point:stopC'
)
assert response['journeys'][0]['sections'][0]['type'] == 'street_network'
# Query from S to C: Uses a public_transport from B to C
S_to_C_query = "journeys?from={from_coord}&to={to_coord}".format(
from_coord='0.0000898312;0.0000898312', to_coord='stop_point:stopC'
)
base_journey_query = S_to_C_query + "&data_freshness=realtime&datetime=20120614T080200"
response = self.query_region(base_journey_query)
assert len(response['journeys']) == 2
assert len(response['journeys'][0]['sections']) == 2
assert len(response['journeys'][1]['sections']) == 1
assert response['journeys'][0]['sections'][1]['type'] == 'public_transport'
assert response['journeys'][0]['sections'][1]['data_freshness'] == 'realtime'
assert response['journeys'][0]['sections'][1]['departure_date_time'] == '20120614T080225'
assert response['journeys'][0]['sections'][1]['arrival_date_time'] == '20120614T080330'
# New disruption with a deleted stop_time recently added at stop_point:stopC
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080330"),
departure=tstamp("20120614T080330"),
message='stop_time deleted',
arrival_skipped=True,
)
],
disruption_id='deleted_stop_time',
)
# Verify disruptions
disrupts = self.query_region('disruptions?_current_datetime=20120614T080000')
assert len(disrupts['disruptions']) == 15
assert has_the_disruption(disrupts, 'deleted_stop_time')
# the journey doesn't have public_transport
response = self.query_region(base_journey_query)
assert len(response['journeys']) == 1
assert len(response['journeys'][0]['sections']) == 1
assert response['journeys'][0]['type'] == 'best'
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinOnNewStopTimeAtTheBeginning(MockKirinDisruptionsFixture):
def test_add_modify_and_delete_one_stop_time(self):
"""
1. create a new_stop_time to add a final stop in C
test that a new journey is possible with section type = public_transport from B to C
2. delete the added stop_time and verify that the public_transport section is absent
3. delete again stop_time and verify that the public_transport section is absent
"""
# Verify disruptions
disrupts = self.query_region('disruptions?_current_datetime=20120614T080000')
assert len(disrupts['disruptions']) == 12
C_to_R_query = "journeys?from={from_coord}&to={to_coord}".format(
from_coord='stop_point:stopC', to_coord='stop_point:stopA'
)
# Query from C to R: the journey doesn't have any public_transport
base_journey_query = C_to_R_query + "&data_freshness=realtime&datetime=20120614T080000&walking_speed=0.7"
response = self.query_region(base_journey_query)
assert len(response['journeys']) == 1
assert len(response['journeys'][0]['sections']) == 1
assert response['journeys'][0]['sections'][0]['type'] == 'street_network'
assert 'data_freshness' not in response['journeys'][0]['sections'][0]
assert response['journeys'][0]['durations']['walking'] == 127
# New disruption with two stop_times same as base schedule and
# a new stop_time on stop_point:stopC added at the beginning
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080000"),
departure=tstamp("20120614T080000"),
),
UpdatedStopTime(
"stop_point:stopB",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080001"),
departure=tstamp("20120614T080001"),
message='on time',
),
UpdatedStopTime(
"stop_point:stopA",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080002"),
departure=tstamp("20120614T080002"),
),
],
disruption_id='new_stop_time',
effect='delayed',
)
# Verify disruptions
disrupts = self.query_region('disruptions?_current_datetime=20120614T080000')
assert len(disrupts['disruptions']) == 13
assert has_the_disruption(disrupts, 'new_stop_time')
last_disruption = disrupts['disruptions'][-1]
assert last_disruption['impacted_objects'][0]['impacted_stops'][0]['arrival_status'] == 'added'
assert last_disruption['impacted_objects'][0]['impacted_stops'][0]['departure_status'] == 'added'
assert last_disruption['severity']['effect'] == 'SIGNIFICANT_DELAYS'
assert last_disruption['severity']['name'] == 'trip delayed'
# Query from C to R: the journey should have a public_transport from C to A
response = self.query_region(base_journey_query)
assert len(response['journeys']) == 2
assert len(response['journeys'][1]['sections']) == 1
assert response['journeys'][0]['sections'][0]['type'] == 'public_transport'
assert response['journeys'][0]['sections'][0]['data_freshness'] == 'realtime'
assert response['journeys'][0]['sections'][0]['departure_date_time'] == '20120614T080000'
assert response['journeys'][0]['sections'][0]['arrival_date_time'] == '20120614T080002'
assert response['journeys'][1]['sections'][0]['type'] == 'street_network'
# New disruption with a deleted stop_time recently added at stop_point:stopC
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080000"),
departure=tstamp("20120614T080000"),
message='stop_time deleted',
arrival_skipped=True,
)
],
disruption_id='deleted_stop_time',
)
# Verify disruptions
disrupts = self.query_region('disruptions?_current_datetime=20120614T080000')
assert len(disrupts['disruptions']) == 14
assert has_the_disruption(disrupts, 'deleted_stop_time')
last_disruption = disrupts['disruptions'][-1]
assert last_disruption['impacted_objects'][0]['impacted_stops'][0]['arrival_status'] == 'deleted'
assert (
last_disruption['impacted_objects'][0]['impacted_stops'][0]['departure_status'] == 'unchanged'
) # Why?
assert last_disruption['severity']['effect'] == 'REDUCED_SERVICE'
assert last_disruption['severity']['name'] == 'reduced service'
response = self.query_region(base_journey_query)
assert len(response['journeys']) == 1
assert len(response['journeys'][0]['sections']) == 1
assert response['journeys'][0]['sections'][0]['type'] == 'street_network'
assert 'data_freshness' not in response['journeys'][0]['sections'][0]
assert response['journeys'][0]['durations']['walking'] == 127
pt_response = self.query_region('vehicle_journeys/vehicle_journey:vjA?_current_datetime=20120614T080000')
assert len(pt_response['disruptions']) == 2
@dataset(MAIN_ROUTING_TEST_SETTING_NO_ADD)
class TestKrakenNoAdd(MockKirinDisruptionsFixture):
def test_no_rt_add_possible(self):
"""
trying to add new_stop_time without allowing it in kraken
test that it is ignored
(same test as test_add_one_stop_time_at_the_end(), different result expected)
"""
disruptions_before = self.query_region('disruptions?_current_datetime=20120614T080000')
nb_disruptions_before = len(disruptions_before['disruptions'])
# New disruption same as base schedule
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080100"),
departure=tstamp("20120614T080100"),
message='on time',
),
UpdatedStopTime(
"stop_point:stopA",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080102"),
departure=tstamp("20120614T080102"),
),
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080104"),
departure=tstamp("20120614T080104"),
),
],
disruption_id='new_stop_time',
)
# No new disruption
disruptions_after = self.query_region('disruptions?_current_datetime=20120614T080000')
assert nb_disruptions_before == len(disruptions_after['disruptions'])
assert not has_the_disruption(disruptions_after, 'new_stop_time')
journey_query = journey_basic_query + "&data_freshness=realtime&_current_datetime=20120614T080000"
response = self.query_region(journey_query)
assert not has_the_disruption(response, 'new_stop_time')
self.is_valid_journey_response(response, journey_query)
assert response['journeys'][0]['sections'][1]['data_freshness'] == 'base_schedule'
B_C_query = "journeys?from={from_coord}&to={to_coord}&datetime={datetime}".format(
from_coord='stop_point:stopB', to_coord='stop_point:stopC', datetime='20120614T080000'
)
base_journey_query = B_C_query + "&data_freshness=base_schedule&_current_datetime=20120614T080000"
response = self.query_region(base_journey_query)
assert not has_the_disruption(response, 'new_stop_time')
self.is_valid_journey_response(response, base_journey_query)
assert len(response['journeys']) == 1 # check we only have one journey
assert 'data_freshness' not in response['journeys'][0]['sections'][0] # means it's base_schedule
B_C_query = "journeys?from={from_coord}&to={to_coord}&datetime={datetime}".format(
from_coord='stop_point:stopB', to_coord='stop_point:stopC', datetime='20120614T080000'
)
rt_journey_query = B_C_query + "&data_freshness=realtime&_current_datetime=20120614T080000"
response = self.query_region(rt_journey_query)
assert not has_the_disruption(response, 'new_stop_time')
self.is_valid_journey_response(response, rt_journey_query)
assert len(response['journeys']) == 1 # check there's no new journey possible
assert 'data_freshness' not in response['journeys'][0]['sections'][0] # means it's base_schedule
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinStopTimeOnDetourAtTheEnd(MockKirinDisruptionsFixture):
def test_stop_time_with_detour_at_the_end(self):
"""
1. create a new_stop_time at C to replace existing one at A so that we have
A deleted_for_detour and C added_for_detour
2. test that a new journey is possible with section type = public_transport from B to C
"""
disruptions_before = self.query_region('disruptions?_current_datetime=20120614T080000')
nb_disruptions_before = len(disruptions_before['disruptions'])
# New disruption with one stop_time same as base schedule, another one deleted and
# a new stop_time on stop_point:stopC added at the end
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080100"),
departure=tstamp("20120614T080100"),
message='on time',
),
UpdatedStopTime(
"stop_point:stopA",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080102"),
departure=tstamp("20120614T080102"),
arrival_skipped=True,
is_detour=True,
message='deleted for detour',
),
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080104"),
departure=tstamp("20120614T080104"),
is_added=True,
is_detour=True,
message='added for detour',
),
],
disruption_id='stop_time_with_detour',
)
# Verify disruptions
disrupts = self.query_region('disruptions?_current_datetime=20120614T080000')
assert len(disrupts['disruptions']) == nb_disruptions_before + 1
assert has_the_disruption(disrupts, 'stop_time_with_detour')
last_disrupt = disrupts['disruptions'][-1]
assert last_disrupt['severity']['effect'] == 'DETOUR'
# Verify impacted objects
assert len(last_disrupt['impacted_objects']) == 1
impacted_stops = last_disrupt['impacted_objects'][0]['impacted_stops']
assert len(impacted_stops) == 3
assert bool(impacted_stops[0]['is_detour']) is False
assert impacted_stops[0]['cause'] == 'on time'
assert bool(impacted_stops[1]['is_detour']) is True
assert impacted_stops[1]['cause'] == 'deleted for detour'
assert impacted_stops[1]['departure_status'] == 'unchanged'
assert impacted_stops[1]['arrival_status'] == 'deleted'
assert bool(impacted_stops[2]['is_detour']) is True
assert impacted_stops[2]['cause'] == 'added for detour'
assert impacted_stops[2]['departure_status'] == 'added'
assert impacted_stops[2]['arrival_status'] == 'added'
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinStopTimeOnDetourAndArrivesBeforeDeletedAtTheEnd(MockKirinDisruptionsFixture):
def test_stop_time_with_detour_and_arrival_before_deleted_at_the_end(self):
"""
1. create a new_stop_time at C to replace existing one at A so that we have A deleted_for_detour
and C added_for_detour with arrival time < to arrival time of A (deleted)
2. Kraken accepts this disruption
"""
disruptions_before = self.query_region('disruptions?_current_datetime=20120614T080000')
nb_disruptions_before = len(disruptions_before['disruptions'])
# New disruption with one stop_time same as base schedule, another one deleted and
# a new stop_time on stop_point:stopC added at the end
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080100"),
departure=tstamp("20120614T080100"),
message='on time',
),
UpdatedStopTime(
"stop_point:stopA",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080102"),
departure=tstamp("20120614T080102"),
arrival_skipped=True,
departure_skipped=True,
is_detour=True,
message='deleted for detour',
),
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120614T080101"),
departure=tstamp("20120614T080101"),
is_added=True,
is_detour=True,
message='added for detour',
),
],
disruption_id='stop_time_with_detour',
)
# Verify disruptions
disrupts = self.query_region('disruptions?_current_datetime=20120614T080000')
assert len(disrupts['disruptions']) == nb_disruptions_before + 1
assert has_the_disruption(disrupts, 'stop_time_with_detour')
last_disrupt = disrupts['disruptions'][-1]
assert last_disrupt['severity']['effect'] == 'DETOUR'
# Verify impacted objects
assert len(last_disrupt['impacted_objects']) == 1
impacted_stops = last_disrupt['impacted_objects'][0]['impacted_stops']
assert len(impacted_stops) == 3
assert bool(impacted_stops[0]['is_detour']) is False
assert impacted_stops[0]['cause'] == 'on time'
assert bool(impacted_stops[1]['is_detour']) is True
assert impacted_stops[1]['cause'] == 'deleted for detour'
assert impacted_stops[1]['departure_status'] == 'deleted'
assert impacted_stops[1]['arrival_status'] == 'deleted'
assert bool(impacted_stops[2]['is_detour']) is True
assert impacted_stops[2]['cause'] == 'added for detour'
assert impacted_stops[2]['departure_status'] == 'added'
assert impacted_stops[2]['arrival_status'] == 'added'
B_C_query = "journeys?from={from_coord}&to={to_coord}&datetime={datetime}".format(
from_coord='stop_point:stopB', to_coord='stop_point:stopC', datetime='20120614T080000'
)
# Query with data_freshness=base_schedule
base_journey_query = B_C_query + "&data_freshness=base_schedule&_current_datetime=20120614T080000"
# There is no public transport from B to C
response = self.query_region(base_journey_query)
assert len(response['journeys']) == 1
assert response['journeys'][0]['type'] == 'best'
assert 'data_freshness' not in response['journeys'][0]['sections'][0] # means it's base_schedule
# Query with data_freshness=realtime
base_journey_query = B_C_query + "&data_freshness=realtime&_current_datetime=20120614T080000"
# There is a public transport from B to C with realtime having only two stop_date_times
# as the deleted-for-detour stop should not be displayed
response = self.query_region(base_journey_query)
assert len(response['journeys']) == 2
assert response['journeys'][0]['status'] == 'DETOUR'
assert response['journeys'][0]['sections'][0]['type'] == 'public_transport'
assert len(response['journeys'][0]['sections'][0]['stop_date_times']) == 2
assert response['journeys'][0]['sections'][0]['data_freshness'] == 'realtime'
assert response['journeys'][0]['sections'][0]['display_informations']['physical_mode'] == 'Tramway'
assert has_the_disruption(response, 'stop_time_with_detour')
# Tramway is the first physical_mode in NTFS, but we might pick mode in a smarter way in the future
response = self.query_region('physical_modes')
assert response['physical_modes'][0]['name'] == 'Tramway'
# Check attributes of deleted stop_time in the concerned vehicle_journey
vj_query = 'vehicle_journeys/{vj}?_current_datetime={dt}'.format(
vj='vehicle_journey:vjA:modified:0:stop_time_with_detour', dt='20120614T080000'
)
response = self.query_region(vj_query)
assert has_the_disruption(response, 'stop_time_with_detour')
assert len(response['vehicle_journeys']) == 1
assert len(response['vehicle_journeys'][0]['stop_times']) == 3
assert response['vehicle_journeys'][0]['stop_times'][1]['drop_off_allowed'] is False
assert response['vehicle_journeys'][0]['stop_times'][1]['pickup_allowed'] is False
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinAddNewTrip(MockKirinDisruptionsFixture):
def test_add_new_trip(self):
"""
0. test that no PT-Ref object related to the new trip exists and that no PT-journey exists
1. create a new trip
2. test that journey is possible using this new trip
3. test some PT-Ref objects were created
4. test that /pt_objects returns those objects
5. test that PT-Ref filters are working
6. test /departures and stop_schedules
"""
disruption_query = 'disruptions?_current_datetime={dt}'.format(dt='20120614T080000')
disruptions_before = self.query_region(disruption_query)
nb_disruptions_before = len(disruptions_before['disruptions'])
# /journeys before (only direct walk)
C_B_query = (
"journeys?from={f}&to={to}&data_freshness=realtime&"
"datetime={dt}&_current_datetime={dt}".format(
f='stop_point:stopC', to='stop_point:stopB', dt='20120614T080000'
)
)
response = self.query_region(C_B_query)
assert not has_the_disruption(response, 'new_trip')
self.is_valid_journey_response(response, C_B_query)
assert len(response['journeys']) == 1
assert 'non_pt_walking' in response['journeys'][0]['tags']
# /pt_objects before
ptobj_query = 'pt_objects?q={q}&_current_datetime={dt}'.format(q='adi', dt='20120614T080000') # ++typo
response = self.query_region(ptobj_query)
assert 'pt_objects' not in response
# Check that no vehicle_journey exists on the future realtime-trip
vj_query = 'vehicle_journeys/{vj}?_current_datetime={dt}'.format(
vj='vehicle_journey:additional-trip:modified:0:new_trip', dt='20120614T080000'
)
response, status = self.query_region(vj_query, check=False)
assert status == 404
assert 'vehicle_journeys' not in response
# Check that no additional line exists
line_query = 'lines/{l}?_current_datetime={dt}'.format(l='line:stopC_stopB', dt='20120614T080000')
response, status = self.query_region(line_query, check=False)
assert status == 404
assert 'lines' not in response
# Check that PT-Ref filter fails as no object exists
vj_filter_query = 'commercial_modes/{cm}/vehicle_journeys?_current_datetime={dt}'.format(
cm='commercial_mode:additional_service', dt='20120614T080000'
)
response, status = self.query_region(vj_filter_query, check=False)
assert status == 404
assert response['error']['message'] == 'ptref : Filters: Unable to find object'
network_filter_query = 'vehicle_journeys/{vj}/networks?_current_datetime={dt}'.format(
vj='vehicle_journey:additional-trip:modified:0:new_trip', dt='20120614T080000'
)
response, status = self.query_region(network_filter_query, check=False)
assert status == 404
assert response['error']['message'] == 'ptref : Filters: Unable to find object'
# Check that no departure exist on stop_point stop_point:stopC for neither base_schedule nor realtime
departure_query = "stop_points/stop_point:stopC/departures?_current_datetime=20120614T080000"
departures = self.query_region(departure_query + '&data_freshness=base_schedule')
assert len(departures['departures']) == 0
departures = self.query_region(departure_query + '&data_freshness=realtime')
assert len(departures['departures']) == 0
# Check stop_schedules on stop_point stop_point:stopC for base_schedule and realtime with
# Date_times list empty
ss_on_sp_query = "stop_points/stop_point:stopC/stop_schedules?_current_datetime=20120614T080000"
stop_schedules = self.query_region(ss_on_sp_query + '&data_freshness=realtime')
assert len(stop_schedules['stop_schedules']) == 1
assert stop_schedules['stop_schedules'][0]['links'][0]['type'] == 'line'
assert stop_schedules['stop_schedules'][0]['links'][0]['id'] == 'D'
assert len(stop_schedules['stop_schedules'][0]['date_times']) == 0
# Check that no stop_schedule exist on line:stopC_stopB and stop_point stop_point:stopC
ss_on_line_query = (
"stop_points/stop_point:stopC/lines/line:stopC_stopB/"
"stop_schedules?_current_datetime=20120614T080000"
)
stop_schedules, status = self.query_region(ss_on_line_query + '&data_freshness=realtime', check=False)
assert status == 404
assert len(stop_schedules['stop_schedules']) == 0
# New disruption, a new trip without headsign with 2 stop_times in realtime
self.send_mock(
"additional-trip",
"20120614",
'added',
[
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080100"),
departure=tstamp("20120614T080100"),
message='on time',
),
UpdatedStopTime(
"stop_point:stopB",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080102"),
departure=tstamp("20120614T080102"),
),
],
disruption_id='new_trip',
effect='additional_service',
physical_mode_id='physical_mode:Bus', # this physical mode exists in kraken
)
# Check new disruption 'additional-trip' to add a new trip
disruptions_after = self.query_region(disruption_query)
assert nb_disruptions_before + 1 == len(disruptions_after['disruptions'])
new_trip_disruptions = get_disruptions_by_id(disruptions_after, 'new_trip')
assert len(new_trip_disruptions) == 1
new_trip_disrupt = new_trip_disruptions[0]
assert new_trip_disrupt['id'] == 'new_trip'
assert new_trip_disrupt['severity']['effect'] == 'ADDITIONAL_SERVICE'
assert len(new_trip_disrupt['impacted_objects'][0]['impacted_stops']) == 2
assert all(
[
(s['departure_status'] == 'added' and s['arrival_status'] == 'added')
for s in new_trip_disrupt['impacted_objects'][0]['impacted_stops']
]
)
assert new_trip_disrupt['application_periods'][0]['begin'] == '20120614T080100'
assert new_trip_disrupt['application_periods'][0]['end'] == '20120614T080102'
# Check that a PT journey now exists
response = self.query_region(C_B_query)
assert has_the_disruption(response, 'new_trip')
self.is_valid_journey_response(response, C_B_query)
assert len(response['journeys']) == 2
pt_journey = response['journeys'][0]
assert 'non_pt_walking' not in pt_journey['tags']
assert pt_journey['status'] == 'ADDITIONAL_SERVICE'
assert pt_journey['sections'][0]['data_freshness'] == 'realtime'
assert pt_journey['sections'][0]['display_informations']['commercial_mode'] == 'additional service'
assert pt_journey['sections'][0]['display_informations']['physical_mode'] == 'Bus'
# Check date_times
assert pt_journey['sections'][0]['departure_date_time'] == '20120614T080100'
assert pt_journey['sections'][0]['arrival_date_time'] == '20120614T080102'
assert pt_journey['sections'][0]['stop_date_times'][0]['arrival_date_time'] == '20120614T080100'
assert pt_journey['sections'][0]['stop_date_times'][-1]['arrival_date_time'] == '20120614T080102'
# Check /pt_objects after: new objects created
response = self.query_region(ptobj_query)
assert len(response['pt_objects']) == 4
assert len([o for o in response['pt_objects'] if o['id'] == 'network:additional_service']) == 1
assert len([o for o in response['pt_objects'] if o['id'] == 'commercial_mode:additional_service']) == 1
assert len([o for o in response['pt_objects'] if o['id'] == 'line:stopC_stopB']) == 1
assert len([o for o in response['pt_objects'] if o['id'] == 'route:stopC_stopB']) == 1
# Check that the vehicle_journey has been created
response = self.query_region(vj_query)
assert has_the_disruption(response, 'new_trip')
assert len(response['vehicle_journeys']) == 1
# Check that name and headsign are empty
assert response['vehicle_journeys'][0]['name'] == ''
assert response['vehicle_journeys'][0]['headsign'] == ''
assert response['vehicle_journeys'][0]['disruptions'][0]['id'] == 'new_trip'
assert len(response['vehicle_journeys'][0]['stop_times']) == 2
assert response['vehicle_journeys'][0]['stop_times'][0]['drop_off_allowed'] is True
assert response['vehicle_journeys'][0]['stop_times'][0]['pickup_allowed'] is True
# Check that the new line has been created with necessary information
response = self.query_region(line_query)
assert len(response['lines']) == 1
assert response['lines'][0]['name'] == 'stopC - stopB'
assert response['lines'][0]['network']['id'] == 'network:additional_service'
assert response['lines'][0]['commercial_mode']['id'] == 'commercial_mode:additional_service'
assert response['lines'][0]['routes'][0]['id'] == 'route:stopC_stopB'
assert response['lines'][0]['routes'][0]['name'] == 'stopC - stopB'
assert response['lines'][0]['routes'][0]['direction']['id'] == 'stopB'
assert response['lines'][0]['routes'][0]['direction_type'] == 'forward'
# Check that objects created are linked in PT-Ref filter
response = self.query_region(vj_filter_query)
assert has_the_disruption(response, 'new_trip')
assert len(response['vehicle_journeys']) == 1
# Check that the newly created vehicle journey are well filtered by &since and &until
# Note: For backward compatibility parameter &data_freshness with base_schedule is added
# and works with &since and &until
vj_base_query = (
'commercial_modes/commercial_mode:additional_service/vehicle_journeys?'
'_current_datetime={dt}&since={sin}&until={un}&data_freshness={df}'
)
response, status = self.query_region(
vj_base_query.format(
dt='20120614T080000', sin='20120614T080100', un='20120614T080102', df='base_schedule'
),
check=False,
)
assert status == 404
assert 'vehicle_journeys' not in response
response = self.query_region(
vj_base_query.format(
dt='20120614T080000', sin='20120614T080100', un='20120614T080102', df='realtime'
)
)
assert len(response['vehicle_journeys']) == 1
response, status = self.query_region(
vj_base_query.format(
dt='20120614T080000', sin='20120614T080101', un='20120614T080102', df='realtime'
),
check=False,
)
assert status == 404
assert 'vehicle_journeys' not in response
response = self.query_region(network_filter_query)
assert len(response['networks']) == 1
assert response['networks'][0]['name'] == 'additional service'
# Check that no departure exist on stop_point stop_point:stopC for base_schedule
departures = self.query_region(departure_query + '&data_freshness=base_schedule')
assert len(departures['departures']) == 0
# Check that departures on stop_point stop_point:stopC exists with disruption
departures = self.query_region(departure_query + '&data_freshness=realtime')
assert len(departures['disruptions']) == 1
assert departures['disruptions'][0]['disruption_uri'] == 'new_trip'
assert departures['departures'][0]['display_informations']['name'] == 'stopC - stopB'
# Check that stop_schedule on line "line:stopC_stopB" and stop_point stop_point:stopC
# for base_schedule date_times list is empty.
stop_schedules = self.query_region(ss_on_line_query + '&data_freshness=base_schedule')
assert len(stop_schedules['stop_schedules']) == 1
assert stop_schedules['stop_schedules'][0]['links'][0]['type'] == 'line'
assert stop_schedules['stop_schedules'][0]['links'][0]['id'] == 'line:stopC_stopB'
assert len(stop_schedules['stop_schedules'][0]['date_times']) == 0
# Check that stop_schedule on line "line:stopC_stopB" and stop_point stop_point:stopC
# exists with disruption.
stop_schedules = self.query_region(ss_on_line_query + '&data_freshness=realtime')
assert len(stop_schedules['stop_schedules']) == 1
assert stop_schedules['stop_schedules'][0]['links'][0]['id'] == 'line:stopC_stopB'
assert len(stop_schedules['disruptions']) == 1
assert stop_schedules['disruptions'][0]['uri'] == 'new_trip'
assert len(stop_schedules['stop_schedules'][0]['date_times']) == 1
assert stop_schedules['stop_schedules'][0]['date_times'][0]['date_time'] == '20120614T080100'
assert stop_schedules['stop_schedules'][0]['date_times'][0]['data_freshness'] == 'realtime'
# Check stop_schedules on stop_point stop_point:stopC for base_schedule
# Date_times list is empty for both stop_schedules
stop_schedules = self.query_region(ss_on_sp_query + '&data_freshness=base_schedule')
assert len(stop_schedules['stop_schedules']) == 2
assert stop_schedules['stop_schedules'][0]['links'][0]['type'] == 'line'
assert stop_schedules['stop_schedules'][0]['links'][0]['id'] == 'D'
assert len(stop_schedules['stop_schedules'][0]['date_times']) == 0
assert stop_schedules['stop_schedules'][1]['links'][0]['type'] == 'line'
assert stop_schedules['stop_schedules'][1]['links'][0]['id'] == 'line:stopC_stopB'
assert len(stop_schedules['stop_schedules'][1]['date_times']) == 0
# Check stop_schedules on stop_point stop_point:stopC for realtime
# Date_times list is empty for line 'D' but not for the new line added
stop_schedules = self.query_region(ss_on_sp_query + '&data_freshness=realtime')
assert len(stop_schedules['stop_schedules']) == 2
assert stop_schedules['stop_schedules'][0]['links'][0]['type'] == 'line'
assert stop_schedules['stop_schedules'][0]['links'][0]['id'] == 'D'
assert len(stop_schedules['stop_schedules'][0]['date_times']) == 0
assert stop_schedules['stop_schedules'][1]['links'][0]['type'] == 'line'
assert stop_schedules['stop_schedules'][1]['links'][0]['id'] == 'line:stopC_stopB'
assert len(stop_schedules['stop_schedules'][1]['date_times']) == 1
assert stop_schedules['stop_schedules'][1]['date_times'][0]['date_time'] == '20120614T080100'
assert stop_schedules['stop_schedules'][1]['date_times'][0]['data_freshness'] == 'realtime'
# Check stop_schedules on stop_area stopC for base_schedule
# Date_times list is empty for both stop_schedules
ss_on_sa_query = "stop_areas/stopC/stop_schedules?_current_datetime=20120614T080000"
stop_schedules = self.query_region(ss_on_sa_query + '&data_freshness=base_schedule')
assert len(stop_schedules['stop_schedules']) == 2
assert stop_schedules['stop_schedules'][0]['links'][0]['type'] == 'line'
assert stop_schedules['stop_schedules'][0]['links'][0]['id'] == 'D'
assert len(stop_schedules['stop_schedules'][0]['date_times']) == 0
assert stop_schedules['stop_schedules'][1]['links'][0]['type'] == 'line'
assert stop_schedules['stop_schedules'][1]['links'][0]['id'] == 'line:stopC_stopB'
assert len(stop_schedules['stop_schedules'][1]['date_times']) == 0
# Check stop_schedules on stop_area stopC for realtime
# Date_times list is empty for line 'D' but not for the new line added
ss_on_sa_query = "stop_areas/stopC/stop_schedules?_current_datetime=20120614T080000"
stop_schedules = self.query_region(ss_on_sa_query + '&data_freshness=realtime')
assert len(stop_schedules['stop_schedules']) == 2
assert stop_schedules['stop_schedules'][0]['links'][0]['type'] == 'line'
assert stop_schedules['stop_schedules'][0]['links'][0]['id'] == 'D'
assert len(stop_schedules['stop_schedules'][0]['date_times']) == 0
assert stop_schedules['stop_schedules'][1]['links'][0]['type'] == 'line'
assert stop_schedules['stop_schedules'][1]['links'][0]['id'] == 'line:stopC_stopB'
assert len(stop_schedules['stop_schedules'][1]['date_times']) == 1
assert stop_schedules['stop_schedules'][1]['date_times'][0]['date_time'] == '20120614T080100'
assert stop_schedules['stop_schedules'][1]['date_times'][0]['data_freshness'] == 'realtime'
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestPtRefOnAddedTrip(MockKirinDisruptionsFixture):
def test_ptref_on_added_trip(self):
"""
1. Test all possibles ptref calls with/without filters before adding a new trip
2. Test all possibles ptref calls with/without filters after adding a new trip
3. Test all possibles ptref calls with/without filters after modifying the recently added trip
Note: physical_mode is present in gtfs-rt whereas for network and commercial_mode default value is used
"""
disruption_query = 'disruptions?_current_datetime={dt}'.format(dt='20120614T080000')
disruptions_before = self.query_region(disruption_query)
nb_disruptions_before = len(disruptions_before['disruptions'])
# Verify that network, line, commercial_mode of the new trip to be added in future is absent
resp, status = self.query_region("networks/network:additional_service", check=False)
assert status == 404
assert resp['error']['message'] == 'ptref : Filters: Unable to find object'
resp, status = self.query_region("lines/line:stopC_stopB", check=False)
assert status == 404
assert resp['error']['message'] == 'ptref : Filters: Unable to find object'
resp, status = self.query_region("commercial_modes/commercial_mode:additional_service", check=False)
assert status == 404
assert resp['error']['message'] == 'ptref : Filters: Unable to find object'
# The following ptref search should work with base-schedule data.
# network <-> datasets
resp = self.query_region("networks/base_network/datasets")
assert resp["datasets"][0]["id"] == "default:dataset"
resp = self.query_region("datasets/default:dataset/networks")
assert resp["networks"][0]["id"] == "base_network"
# line <-> company
resp = self.query_region("lines/A/companies")
assert resp["companies"][0]["id"] == "base_company"
resp = self.query_region("companies/base_company/lines")
assert resp["lines"][0]["id"] == "A"
# company <-> commercial_modes
resp = self.query_region("companies/base_company/commercial_modes")
assert resp['commercial_modes'][0]['id'] == '0x0'
resp = self.query_region("commercial_modes/0x0/companies")
assert resp["companies"][0]["id"] == "base_company"
# route <-> dataset
resp = self.query_region("routes/B:3/datasets")
assert resp["datasets"][0]["id"] == "default:dataset"
resp = self.query_region("datasets/default:dataset/routes")
routes = [rt["id"] for rt in resp["routes"]]
assert "B:3" in routes
# vehicle_journey <-> company
resp = self.query_region("vehicle_journeys/vehicle_journey:vjA/companies")
assert resp["companies"][0]["id"] == "base_company"
resp = self.query_region("companies/base_company/vehicle_journeys")
assert len(resp["vehicle_journeys"]) == 9
# network <-> contributor
resp = self.query_region("networks/base_network/contributors")
assert resp["contributors"][0]["id"] == "default:contributor"
resp = self.query_region("contributors/default:contributor/networks")
assert resp["networks"][0]["id"] == "base_network"
# New disruption, a new trip with 2 stop_times in realtime
self.send_mock(
"additional-trip",
"20120614",
"added",
[
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080100"),
departure=tstamp("20120614T080100"),
message='on time',
),
UpdatedStopTime(
"stop_point:stopB",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080102"),
departure=tstamp("20120614T080102"),
),
],
disruption_id="new_trip",
effect="additional_service",
physical_mode_id="physical_mode:Bus", # this physical mode exists in kraken
)
# Check new disruption 'additional-trip' to add a new trip
disruptions_after = self.query_region(disruption_query)
assert nb_disruptions_before + 1 == len(disruptions_after['disruptions'])
# Verify that network, line, commercial_mode of the new trip are present
resp = self.query_region("networks/network:additional_service")
assert "networks" in resp
resp = self.query_region("lines/line:stopC_stopB")
assert "lines" in resp
resp = self.query_region("commercial_modes/commercial_mode:additional_service")
assert "commercial_modes" in resp
resp = self.query_region("networks/network:additional_service/physical_modes")
assert resp["physical_modes"][0]["id"] == "physical_mode:Bus"
resp = self.query_region("physical_modes/physical_mode:Bus/networks")
networks = [nw["id"] for nw in resp["networks"]]
assert "network:additional_service" in networks
# network by line should work
resp = self.query_region("lines/line:stopC_stopB/networks")
assert resp["networks"][0]["id"] == "network:additional_service"
# The physical_mode sent in gtfs-rt should be present in the new line added
resp = self.query_region("lines/line:stopC_stopB/physical_modes")
assert resp["physical_modes"][0]["id"] == "physical_mode:Bus"
# The default commercial_mode used for a new line should be present
resp = self.query_region("lines/line:stopC_stopB/commercial_modes")
assert resp["commercial_modes"][0]["id"] == "commercial_mode:additional_service"
# Newly added lines should have a route, vehicle_journey,
resp = self.query_region("lines/line:stopC_stopB/routes")
assert resp["routes"][0]["id"] == "route:stopC_stopB"
resp = self.query_region("lines/line:stopC_stopB/vehicle_journeys")
assert resp["vehicle_journeys"][0]["id"] == "vehicle_journey:additional-trip:modified:0:new_trip"
# Name and headsign are empty
assert resp["vehicle_journeys"][0]["name"] == ""
assert resp["vehicle_journeys"][0]["headsign"] == ""
# We should be able to get the line from vehicle_journey recently added
resp = self.query_region("vehicle_journeys/vehicle_journey:additional-trip:modified:0:new_trip/lines")
assert resp["lines"][0]["id"] == "line:stopC_stopB"
# We should be able to get the physical_mode sent in gtfs-rt from vehicle_journey recently added
resp = self.query_region(
"vehicle_journeys/vehicle_journey:additional-trip:modified:0:new_trip/physical_modes"
)
assert resp["physical_modes"][0]["id"] == "physical_mode:Bus"
# The following ptref search should work with a trip added.
# network <-> datasets
resp = self.query_region("networks/network:additional_service/datasets")
assert resp["datasets"][0]["id"] == "default:dataset"
resp = self.query_region("datasets/default:dataset/networks")
networks = [nw["id"] for nw in resp["networks"]]
assert "network:additional_service" in networks
# route <-> dataset
resp = self.query_region("routes/route:stopC_stopB/datasets")
assert resp["datasets"][0]["id"] == "default:dataset"
resp = self.query_region("datasets/default:dataset/routes")
routes = [rt["id"] for rt in resp["routes"]]
assert "route:stopC_stopB" in routes
# route <-> physical_mode
resp = self.query_region("routes/route:stopC_stopB/physical_modes")
assert resp["physical_modes"][0]["id"] == "physical_mode:Bus"
resp = self.query_region("physical_modes/physical_mode:Bus/routes")
routes = [rt["id"] for rt in resp["routes"]]
assert "route:stopC_stopB" in routes
# route <-> stop_point
resp = self.query_region("routes/route:stopC_stopB/stop_points")
sps = [sp["id"] for sp in resp["stop_points"]]
assert "stop_point:stopC" in sps
assert "stop_point:stopB" in sps
resp = self.query_region("stop_points/stop_point:stopC/routes")
routes = [rt["id"] for rt in resp["routes"]]
assert "route:stopC_stopB" in routes
resp = self.query_region("stop_points/stop_point:stopB/routes")
routes = [rt["id"] for rt in resp["routes"]]
assert "route:stopC_stopB" in routes
# network <-> contributor
resp = self.query_region("networks/network:additional_service/contributors")
assert resp["contributors"][0]["id"] == "default:contributor"
resp = self.query_region("contributors/default:contributor/networks")
networks = [nw["id"] for nw in resp["networks"]]
assert "network:additional_service" in networks
# line <-> company
resp = self.query_region("lines/line:stopC_stopB/companies")
assert resp["companies"][0]["id"] == "base_company"
resp = self.query_region("companies/base_company/lines")
assert resp["lines"][7]["id"] == "line:stopC_stopB"
# vehicle_journey <-> company
resp = self.query_region(
"vehicle_journeys/vehicle_journey:additional-trip:modified:0:new_trip/companies"
)
assert resp["companies"][0]["id"] == "base_company"
resp = self.query_region("companies/base_company/vehicle_journeys")
vjs = [vj["id"] for vj in resp["vehicle_journeys"]]
assert "vehicle_journey:additional-trip:modified:0:new_trip" in vjs
# commercial_mode <-> company
resp = self.query_region("commercial_modes/commercial_mode:additional_service/companies")
assert resp["companies"][0]["id"] == "base_company"
resp = self.query_region("companies/base_company/commercial_modes")
commercial_modes = [cm["id"] for cm in resp["commercial_modes"]]
assert "commercial_mode:additional_service" in commercial_modes
# stop_point <-> dataset
resp = self.query_region("stop_points/stop_point:stopC/datasets")
assert resp["datasets"][0]["id"] == "default:dataset"
resp = self.query_region("stop_points/stop_point:stopB/datasets")
assert resp["datasets"][0]["id"] == "default:dataset"
resp = self.query_region("datasets/default:dataset/stop_points")
sps = [sp["id"] for sp in resp["stop_points"]]
assert "stop_point:stopC" in sps
assert "stop_point:stopB" in sps
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinAddNewTripWithWrongPhysicalMode(MockKirinDisruptionsFixture):
def test_add_new_trip_with_wrong_physical_mode(self):
"""
1. send a disruption to create a new trip with physical_mode absent in kaken
2. check of journey, disruption and PT-Ref objects to verify that no trip is added
"""
disruption_query = 'disruptions?_current_datetime={dt}'.format(dt='20120614T080000')
disruptions_before = self.query_region(disruption_query)
nb_disruptions_before = len(disruptions_before['disruptions'])
# New disruption, a new trip with 2 stop_times in realtime
self.send_mock(
"additional-trip",
"20120614",
'added',
[
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080100"),
departure=tstamp("20120614T080100"),
message='on time',
),
UpdatedStopTime(
"stop_point:stopB",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080102"),
departure=tstamp("20120614T080102"),
),
],
disruption_id='new_trip',
effect='additional_service',
physical_mode_id='physical_mode:Toto', # this physical mode doesn't exist in kraken
)
# Check there is no new disruption
disruptions_after = self.query_region(disruption_query)
assert nb_disruptions_before == len(disruptions_after['disruptions'])
# / Journeys: as no trip on pt added, only direct walk.
C_B_query = (
"journeys?from={f}&to={to}&data_freshness=realtime&"
"datetime={dt}&_current_datetime={dt}".format(
f='stop_point:stopC', to='stop_point:stopB', dt='20120614T080000'
)
)
response = self.query_region(C_B_query)
assert not has_the_disruption(response, 'new_trip')
self.is_valid_journey_response(response, C_B_query)
assert len(response['journeys']) == 1
assert 'non_pt_walking' in response['journeys'][0]['tags']
# Check that no vehicle_journey is added
vj_query = 'vehicle_journeys/{vj}?_current_datetime={dt}'.format(
vj='vehicle_journey:additional-trip:modified:0:new_trip', dt='20120614T080000'
)
response, status = self.query_region(vj_query, check=False)
assert status == 404
assert 'vehicle_journeys' not in response
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinAddNewTripWithoutPhysicalMode(MockKirinDisruptionsFixture):
def test_add_new_trip_without_physical_mode(self):
"""
1. send a disruption to create a new trip without physical_mode absent in kaken
2. check physical_mode of journey
"""
disruption_query = 'disruptions?_current_datetime={dt}'.format(dt='20120614T080000')
disruptions_before = self.query_region(disruption_query)
nb_disruptions_before = len(disruptions_before['disruptions'])
# New disruption, a new trip with 2 stop_times in realtime
self.send_mock(
"additional-trip",
"20120614",
'added',
[
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080100"),
departure=tstamp("20120614T080100"),
message='on time',
),
UpdatedStopTime(
"stop_point:stopB",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080102"),
departure=tstamp("20120614T080102"),
),
],
disruption_id='new_trip',
effect='additional_service',
)
# Check that a new disruption is added
disruptions_after = self.query_region(disruption_query)
assert nb_disruptions_before + 1 == len(disruptions_after['disruptions'])
C_B_query = (
"journeys?from={f}&to={to}&data_freshness=realtime&"
"datetime={dt}&_current_datetime={dt}".format(
f='stop_point:stopC', to='stop_point:stopB', dt='20120614T080000'
)
)
# Check that a PT journey exists with first physical_mode in the NTFS('Tramway')
response = self.query_region(C_B_query)
assert has_the_disruption(response, 'new_trip')
self.is_valid_journey_response(response, C_B_query)
assert len(response['journeys']) == 2
pt_journey = response['journeys'][0]
assert 'non_pt_walking' not in pt_journey['tags']
assert pt_journey['status'] == 'ADDITIONAL_SERVICE'
assert pt_journey['sections'][0]['data_freshness'] == 'realtime'
assert pt_journey['sections'][0]['display_informations']['commercial_mode'] == 'additional service'
assert pt_journey['sections'][0]['display_informations']['physical_mode'] == 'Tramway'
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinUpdateTripWithPhysicalMode(MockKirinDisruptionsFixture):
def test_update_trip_with_physical_mode(self):
"""
1. send a disruption with a physical_mode to update a trip
2. check physical_mode of journey
"""
# we have 8 vehicle_jouneys
pt_response = self.query_region('vehicle_journeys')
initial_nb_vehicle_journeys = len(pt_response['vehicle_journeys'])
assert initial_nb_vehicle_journeys == 9
disruption_query = 'disruptions?_current_datetime={dt}'.format(dt='20120614T080000')
disruptions_before = self.query_region(disruption_query)
nb_disruptions_before = len(disruptions_before['disruptions'])
# physical_mode of base vehicle_journey
pt_response = self.query_region(
'vehicle_journeys/vehicle_journey:vjA/physical_modes?_current_datetime=20120614T1337'
)
assert len(pt_response['physical_modes']) == 1
assert pt_response['physical_modes'][0]['name'] == 'Tramway'
self.send_mock(
"vjA",
"20120614",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival=tstamp("20120614T080224"),
departure=tstamp("20120614T080225"),
arrival_delay=60 + 24,
departure_delay=60 + 25,
message='cow on tracks',
),
UpdatedStopTime(
"stop_point:stopA",
arrival=tstamp("20120614T080400"),
departure=tstamp("20120614T080400"),
arrival_delay=3 * 60 + 58,
departure_delay=3 * 60 + 58,
),
],
disruption_id='vjA_delayed',
physical_mode_id='physical_mode:Bus', # this physical mode exists in kraken
)
# Check that a new disruption is added
disruptions_after = self.query_region(disruption_query)
assert nb_disruptions_before + 1 == len(disruptions_after['disruptions'])
# A new vj is created
pt_response = self.query_region('vehicle_journeys')
assert len(pt_response['vehicle_journeys']) == (initial_nb_vehicle_journeys + 1)
# physical_mode of the newly created vehicle_journey is the base vehicle_journey physical mode (Tramway)
pt_response = self.query_region(
'vehicle_journeys/vehicle_journey:vjA:modified:0:vjA_delayed/physical_modes'
)
assert len(pt_response['physical_modes']) == 1
assert pt_response['physical_modes'][0]['name'] == 'Tramway'
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinAddTripWithHeadSign(MockKirinDisruptionsFixture):
def test_add_trip_with_headsign(self):
"""
1. send a disruption with a headsign to add a trip
2. check that headsign is present in journey.section.display_informations
"""
disruption_query = 'disruptions?_current_datetime={dt}'.format(dt='20120614T080000')
disruptions_before = self.query_region(disruption_query)
nb_disruptions_before = len(disruptions_before['disruptions'])
# New disruption, a new trip with 2 stop_times in realtime
self.send_mock(
"additional-trip",
"20120614",
'added',
[
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080100"),
departure=tstamp("20120614T080100"),
message='on time',
),
UpdatedStopTime(
"stop_point:stopB",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080102"),
departure=tstamp("20120614T080102"),
),
],
disruption_id='new_trip',
effect='additional_service',
headsign='trip_headsign',
)
# Check that a new disruption is added
disruptions_after = self.query_region(disruption_query)
assert nb_disruptions_before + 1 == len(disruptions_after['disruptions'])
C_B_query = (
"journeys?from={f}&to={to}&data_freshness=realtime&"
"datetime={dt}&_current_datetime={dt}".format(
f='stop_point:stopC', to='stop_point:stopB', dt='20120614T080000'
)
)
# Check that a PT journey exists with trip_headsign in display_informations
response = self.query_region(C_B_query)
assert has_the_disruption(response, 'new_trip')
self.is_valid_journey_response(response, C_B_query)
assert len(response['journeys']) == 2
pt_journey = response['journeys'][0]
assert pt_journey['status'] == 'ADDITIONAL_SERVICE'
assert pt_journey['sections'][0]['data_freshness'] == 'realtime'
assert pt_journey['sections'][0]['display_informations']['headsign'] == 'trip_headsign'
# Check the vehicle_journey created by real-time
new_vj = self.query_region('vehicle_journeys/vehicle_journey:additional-trip:modified:0:new_trip')
assert len(new_vj['vehicle_journeys']) == 1
assert (new_vj['vehicle_journeys'][0]['name']) == 'trip_headsign'
assert (new_vj['vehicle_journeys'][0]['headsign']) == 'trip_headsign'
@dataset(MAIN_ROUTING_TEST_SETTING_NO_ADD)
class TestKirinAddNewTripBlocked(MockKirinDisruptionsFixture):
def test_add_new_trip_blocked(self):
"""
Disable realtime trip-add in Kraken
1. send a disruption to create a new trip
2. test that no journey is possible using this new trip
3. test that no PT-Ref objects were created
4. test that /pt_objects doesn't return objects
5. test that PT-Ref filters find nothing
6. test /departures and stop_schedules
"""
disruption_query = 'disruptions?_current_datetime={dt}'.format(dt='20120614T080000')
disruptions_before = self.query_region(disruption_query)
nb_disruptions_before = len(disruptions_before['disruptions'])
# New disruption, a new trip with 2 stop_times in realtime
self.send_mock(
"additional-trip",
"20120614",
'added',
[
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080100"),
departure=tstamp("20120614T080100"),
message='on time',
),
UpdatedStopTime(
"stop_point:stopB",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080102"),
departure=tstamp("20120614T080102"),
),
],
disruption_id='new_trip',
effect='additional_service',
)
# Check there is no new disruption
disruptions_after = self.query_region(disruption_query)
assert nb_disruptions_before == len(disruptions_after['disruptions'])
# /journeys before (only direct walk)
C_B_query = (
"journeys?from={f}&to={to}&data_freshness=realtime&"
"datetime={dt}&_current_datetime={dt}".format(
f='stop_point:stopC', to='stop_point:stopB', dt='20120614T080000'
)
)
response = self.query_region(C_B_query)
assert not has_the_disruption(response, 'new_trip')
self.is_valid_journey_response(response, C_B_query)
assert len(response['journeys']) == 1
assert 'non_pt_walking' in response['journeys'][0]['tags']
# /pt_objects before
ptobj_query = 'pt_objects?q={q}&_current_datetime={dt}'.format(q='adi', dt='20120614T080000') # ++typo
response = self.query_region(ptobj_query)
assert 'pt_objects' not in response
# Check that no vehicle_journey exists on the future realtime-trip
vj_query = 'vehicle_journeys/{vj}?_current_datetime={dt}'.format(
vj='vehicle_journey:additional-trip:modified:0:new_trip', dt='20120614T080000'
)
response, status = self.query_region(vj_query, check=False)
assert status == 404
assert 'vehicle_journeys' not in response
# Check that no additional line exists
line_query = 'lines/{l}?_current_datetime={dt}'.format(l='line:stopC_stopB', dt='20120614T080000')
response, status = self.query_region(line_query, check=False)
assert status == 404
assert 'lines' not in response
# Check that PT-Ref filter fails as no object exists
vj_filter_query = 'commercial_modes/{cm}/vehicle_journeys?_current_datetime={dt}'.format(
cm='commercial_mode:additional_service', dt='20120614T080000'
)
response, status = self.query_region(vj_filter_query, check=False)
assert status == 404
assert response['error']['message'] == 'ptref : Filters: Unable to find object'
network_filter_query = 'vehicle_journeys/{vj}/networks?_current_datetime={dt}'.format(
vj='vehicle_journey:additional-trip:modified:0:new_trip', dt='20120614T080000'
)
response, status = self.query_region(network_filter_query, check=False)
assert status == 404
assert response['error']['message'] == 'ptref : Filters: Unable to find object'
# Check that no departure exist on stop_point stop_point:stopC
departure_query = "stop_points/stop_point:stopC/departures?_current_datetime=20120614T080000"
departures = self.query_region(departure_query)
assert len(departures['departures']) == 0
# Check that no stop_schedule exist on line:stopC_stopB and stop_point stop_point:stopC
ss_query = (
"stop_points/stop_point:stopC/lines/line:stopC_stopB/"
"stop_schedules?_current_datetime=20120614T080000&data_freshness=realtime"
)
stop_schedules, status = self.query_region(ss_query, check=False)
assert status == 404
assert len(stop_schedules['stop_schedules']) == 0
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinAddNewTripPresentInNavitiaTheSameDay(MockKirinDisruptionsFixture):
def test_add_new_trip_present_in_navitia_the_same_day(self):
disruption_query = 'disruptions?_current_datetime={dt}'.format(dt='20120614T080000')
disruptions_before = self.query_region(disruption_query)
nb_disruptions_before = len(disruptions_before['disruptions'])
# The vehicle_journey vjA is present in navitia
pt_response = self.query_region('vehicle_journeys/vehicle_journey:vjA?_current_datetime=20120614T1337')
assert len(pt_response['vehicle_journeys']) == 1
assert len(pt_response['disruptions']) == 0
# New disruption, a new trip with vehicle_journey id = vjA and having 2 stop_times in realtime
self.send_mock(
"vjA",
"20120614",
'added',
[
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080100"),
departure=tstamp("20120614T080100"),
message='on time',
),
UpdatedStopTime(
"stop_point:stopB",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120614T080102"),
departure=tstamp("20120614T080102"),
),
],
disruption_id='new_trip',
effect='additional_service',
)
# Check that there should not be a new disruption
disruptions_after = self.query_region(disruption_query)
assert nb_disruptions_before == len(disruptions_after['disruptions'])
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinAddNewTripPresentInNavitiaWithAShift(MockKirinDisruptionsFixture):
def test_add_new_trip_present_in_navitia_with_a_shift(self):
disruption_query = 'disruptions?_current_datetime={dt}'.format(dt='20120614T080000')
disruptions_before = self.query_region(disruption_query)
nb_disruptions_before = len(disruptions_before['disruptions'])
# The vehicle_journey vjA is present in navitia
pt_response = self.query_region('vehicle_journeys/vehicle_journey:vjA?_current_datetime=20120614T1337')
assert len(pt_response['vehicle_journeys']) == 1
assert len(pt_response['disruptions']) == 0
# New disruption, a new trip with meta vehicle journey id = vjA and having 2 stop_times in realtime
self.send_mock(
"vjA",
"20120620",
'added',
[
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120620T080100"),
departure=tstamp("20120620T080100"),
message='on time',
),
UpdatedStopTime(
"stop_point:stopB",
arrival_delay=0,
departure_delay=0,
is_added=True,
arrival=tstamp("20120620T080102"),
departure=tstamp("20120620T080102"),
),
],
disruption_id='new_trip',
effect='additional_service',
)
# The new trip is accepted because, it is not the same day of the base vj
# So a disruption is added
disruptions_after = self.query_region(disruption_query)
assert nb_disruptions_before + 1 == len(disruptions_after['disruptions'])
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinDelayPassMidnightTowardsNextDay(MockKirinDisruptionsFixture):
def test_delay_pass_midnight_towards_next_day(self):
"""
Relates to "test_cots_update_trip_with_delay_pass_midnight_on_first_station" in kirin
1. Add a disruption with a delay in second station (stop_point:stopA) so that there is a pass midnight
2. Verify disruption count, vehicle_journeys count and journey
3. Update the disruption so that departure station stop_point:stopB is replaced by stop_point:stopC
with a delay so that there is no more pass midnight
4. Verify disruption count, vehicle_journeys count and journey
Note: '&forbidden_uris[]=PM' used to avoid line 'PM' and it's vj=vjPB in /journey
"""
disruption_query = 'disruptions?_current _datetime={dt}'.format(dt='20120614T080000')
initial_nb_disruptions = len(self.query_region(disruption_query)['disruptions'])
pt_response = self.query_region('vehicle_journeys')
initial_nb_vehicle_journeys = len(pt_response['vehicle_journeys'])
empty_query = (
"journeys?from={f}&to={to}&data_freshness=realtime&max_duration_to_pt=0&"
"datetime={dt}&_current_datetime={dt}&forbidden_uris[]=PM"
)
# Check journeys in realtime for 20120615(the day of the future disruption) from B to A
# vjB circulates everyday with departure at 18:01:00 and arrival at 18:01:02
ba_15T18_journey_query = empty_query.format(
f='stop_point:stopB', to='stop_point:stopA', dt='20120615T180000'
)
response = self.query_region(ba_15T18_journey_query)
assert len(response['journeys']) == 1
assert response['journeys'][0]['departure_date_time'] == '20120615T180100'
assert response['journeys'][0]['arrival_date_time'] == '20120615T180102'
assert response['journeys'][0]['sections'][0]['display_informations']['name'] == 'B'
assert response['journeys'][0]['sections'][0]['data_freshness'] == 'base_schedule'
# vjB circulates the day before at 18:01:00 and arrival at 18:01:02
ba_14T18_journey_query = empty_query.format(
f='stop_point:stopB', to='stop_point:stopA', dt='20120614T180000'
)
response = self.query_region(ba_14T18_journey_query)
assert len(response['journeys']) == 1
assert response['journeys'][0]['departure_date_time'] == '20120614T180100'
assert response['journeys'][0]['arrival_date_time'] == '20120614T180102'
assert response['journeys'][0]['sections'][0]['display_informations']['name'] == 'B'
assert response['journeys'][0]['sections'][0]['data_freshness'] == 'base_schedule'
# vjB circulates the day after at 18:01:00 and arrival at 18:01:02
ba_16T18_journey_query = empty_query.format(
f='stop_point:stopB', to='stop_point:stopA', dt='20120616T180000'
)
response = self.query_region(ba_16T18_journey_query)
assert len(response['journeys']) == 1
assert response['journeys'][0]['departure_date_time'] == '20120616T180100'
assert response['journeys'][0]['arrival_date_time'] == '20120616T180102'
assert response['journeys'][0]['sections'][0]['display_informations']['name'] == 'B'
assert response['journeys'][0]['sections'][0]['data_freshness'] == 'base_schedule'
# A new disruption with a delay on arrival station to have a pass midnight
self.send_mock(
"vjB",
"20120615",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival=tstamp("20120615T180100"),
departure=tstamp("20120615T180100"),
arrival_delay=0,
departure_delay=0,
),
UpdatedStopTime(
"stop_point:stopA",
arrival=tstamp("20120616T010102"),
departure=tstamp("20120616T010102"),
arrival_delay=7 * 60 * 60,
message="Delayed to have pass midnight",
),
],
disruption_id='stop_time_with_detour',
effect='delayed',
)
# A new disruption is added
disruptions_after = self.query_region(disruption_query)
assert initial_nb_disruptions + 1 == len(disruptions_after['disruptions'])
# A new vehicle_journey is added
pt_response = self.query_region('vehicle_journeys')
assert initial_nb_vehicle_journeys + 1 == len(pt_response['vehicle_journeys'])
# Check journeys in realtime for 20120615, the day of the disruption from B to A
# vjB circulates with departure at 20120615T18:01:00 and arrival at 20120616T01:01:02
response = self.query_region(ba_15T18_journey_query + '&forbidden_uris[]=PM')
assert len(response['journeys']) == 1
assert response['journeys'][0]['departure_date_time'] == '20120615T180100'
assert response['journeys'][0]['arrival_date_time'] == '20120616T010102'
assert response['journeys'][0]['sections'][0]['display_informations']['name'] == 'B'
assert response['journeys'][0]['sections'][0]['base_departure_date_time'] == '20120615T180100'
assert response['journeys'][0]['sections'][0]['departure_date_time'] == '20120615T180100'
assert response['journeys'][0]['sections'][0]['base_arrival_date_time'] == '20120615T180102'
assert response['journeys'][0]['sections'][0]['arrival_date_time'] == '20120616T010102'
assert response['journeys'][0]['sections'][0]['data_freshness'] == 'realtime'
# vjB circulates the day before at 18:01:00 and arrival at 18:01:02
response = self.query_region(ba_14T18_journey_query)
assert len(response['journeys']) == 1
assert response['journeys'][0]['departure_date_time'] == '20120614T180100'
assert response['journeys'][0]['arrival_date_time'] == '20120614T180102'
assert response['journeys'][0]['sections'][0]['display_informations']['name'] == 'B'
assert response['journeys'][0]['sections'][0]['data_freshness'] == 'base_schedule'
# vjB circulates the day after at 18:01:00 and arrival at 18:01:02
response = self.query_region(ba_16T18_journey_query)
assert len(response['journeys']) == 1
assert response['journeys'][0]['departure_date_time'] == '20120616T180100'
assert response['journeys'][0]['arrival_date_time'] == '20120616T180102'
assert response['journeys'][0]['sections'][0]['display_informations']['name'] == 'B'
assert response['journeys'][0]['sections'][0]['data_freshness'] == 'base_schedule'
# Disruption is modified with first station on detour and delay so that there is no more pass midnight
self.send_mock(
"vjB",
"20120615",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival=tstamp("20120615T000100"),
departure=tstamp("20120615T180100"),
arrival_delay=0,
departure_delay=0,
arrival_skipped=True,
departure_skipped=True,
is_detour=True,
message='deleted for detour',
),
UpdatedStopTime(
"stop_point:stopC",
arrival_delay=0,
departure_delay=0,
arrival=tstamp("20120616T003000"),
departure=tstamp("20120616T003000"),
is_added=True,
is_detour=True,
message='added for detour',
),
UpdatedStopTime(
"stop_point:stopA",
arrival=tstamp("20120616T010102"),
departure=tstamp("20120616T010102"),
arrival_delay=7 * 60 * 60,
message="No more pass midnight",
),
],
disruption_id='stop_time_with_detour',
effect='delayed',
)
# The disruption created above is modified so no disruption is added
disruptions_after = self.query_region(disruption_query)
assert initial_nb_disruptions + 1 == len(disruptions_after['disruptions'])
# The disruption created above is modified so no vehicle_journey is added
pt_response = self.query_region('vehicle_journeys')
assert initial_nb_vehicle_journeys + 1 == len(pt_response['vehicle_journeys'])
# Query for 20120615T180000 makes wait till 003000 the next day
# vjB circulates on 20120616 with departure at 00:30:00 and arrival at 01:01:02
ca_15T18_journey_query = empty_query.format(
f='stop_point:stopC', to='stop_point:stopA', dt='20120615T180000'
)
response = self.query_region(ca_15T18_journey_query)
assert len(response['journeys']) == 1
assert response['journeys'][0]['departure_date_time'] == '20120616T003000'
assert response['journeys'][0]['arrival_date_time'] == '20120616T010102'
assert response['journeys'][0]['sections'][0]['display_informations']['name'] == 'B'
assert response['journeys'][0]['sections'][0]['data_freshness'] == 'realtime'
assert len(response['journeys'][0]['sections'][0]['stop_date_times']) == 2
# vjB circulates the day before at 18:01:00 and arrival at 18:01:02
response = self.query_region(ba_14T18_journey_query)
assert len(response['journeys']) == 1
assert response['journeys'][0]['departure_date_time'] == '20120614T180100'
assert response['journeys'][0]['arrival_date_time'] == '20120614T180102'
assert response['journeys'][0]['sections'][0]['display_informations']['name'] == 'B'
assert response['journeys'][0]['sections'][0]['data_freshness'] == 'base_schedule'
# vjB circulates the day after at 18:01:00 and arrival at 18:01:02
response = self.query_region(ba_16T18_journey_query)
assert len(response['journeys']) == 1
assert response['journeys'][0]['departure_date_time'] == '20120616T180100'
assert response['journeys'][0]['arrival_date_time'] == '20120616T180102'
assert response['journeys'][0]['sections'][0]['display_informations']['name'] == 'B'
assert response['journeys'][0]['sections'][0]['base_departure_date_time'] == '20120616T180100'
assert response['journeys'][0]['sections'][0]['base_arrival_date_time'] == '20120616T180102'
assert response['journeys'][0]['sections'][0]['data_freshness'] == 'base_schedule'
@dataset(MAIN_ROUTING_TEST_SETTING)
class TestKirinDelayOnBasePassMidnightTowardsNextDay(MockKirinDisruptionsFixture):
def test_delay_on_base_pass_midnight_towards_next_day(self):
"""
Relates to "test_cots_update_trip_with_delay_pass_midnight_on_first_station" in kirin
Test on a vehicle_journey with departure from stop_point:stopB at 23:55:00 and arrival
to stop_point:stopA at 00:01:00 the next day.
1. Verify disruption count, vehicle_journeys count and journey
2. Add a disruption with a delay = 2 minutes at first station (stop_point:stopB) so that
there is still pass midnight
3. Update the disruption with a delay = 6 minutes at first station and delay = 5 minutes
at second station so that there is no more pass midnight and the departure is the day after
4. Update the disruption with a smaller delay on first station and advance on arrival station
so that there is no pass midnight and the departure is the same day as original (base_schedule)
"""
def journey_base_schedule_for_day_before(resp):
assert resp['journeys'][0]['departure_date_time'] == '20120614T235500'
assert resp['journeys'][0]['arrival_date_time'] == '20120615T000100'
assert resp['journeys'][0]['sections'][0]['display_informations']['name'] == 'PM'
assert resp['journeys'][0]['sections'][0]['data_freshness'] == 'base_schedule'
def journey_base_schedule_for_next_day(resp):
assert resp['journeys'][0]['departure_date_time'] == '20120616T235500'
assert resp['journeys'][0]['arrival_date_time'] == '20120617T000100'
assert resp['journeys'][0]['sections'][0]['display_informations']['name'] == 'PM'
assert resp['journeys'][0]['sections'][0]['data_freshness'] == 'base_schedule'
disruption_query = 'disruptions?_current _datetime={dt}'.format(dt='20120615T080000')
initial_nb_disruptions = len(self.query_region(disruption_query)['disruptions'])
pt_response = self.query_region('vehicle_journeys')
initial_nb_vehicle_journeys = len(pt_response['vehicle_journeys'])
empty_query = (
"journeys?from={f}&to={to}&data_freshness=realtime&max_duration_to_pt=0&"
"datetime={dt}&_current_datetime={dt}"
)
# Check journeys in realtime for 20120615(the day of the future disruption) from B to A
# vjPM circulates everyday with departure at 23:55:00 and arrival at 00:01:00 the day after
ba_15T23_journey_query = empty_query.format(
f='stop_point:stopB', to='stop_point:stopA', dt='20120615T235000'
)
response = self.query_region(ba_15T23_journey_query)
assert len(response['journeys']) == 1
assert response['journeys'][0]['departure_date_time'] == '20120615T235500'
assert response['journeys'][0]['arrival_date_time'] == '20120616T000100'
assert response['journeys'][0]['sections'][0]['display_informations']['name'] == 'PM'
assert response['journeys'][0]['sections'][0]['data_freshness'] == 'base_schedule'
# vjPM circulates the day before at 23:55:00 and arrival at 00:01:00 the day after
ba_14T23_journey_query = empty_query.format(
f='stop_point:stopB', to='stop_point:stopA', dt='20120614T235000'
)
response = self.query_region(ba_14T23_journey_query)
assert len(response['journeys']) == 1
journey_base_schedule_for_day_before(response)
# vjPM circulates the day after at 23:55:00 and arrival at 00:01:00 the day after
ba_16T23_journey_query = empty_query.format(
f='stop_point:stopB', to='stop_point:stopA', dt='20120616T235000'
)
response = self.query_region(ba_16T23_journey_query)
assert len(response['journeys']) == 1
journey_base_schedule_for_next_day(response)
# A new disruption with a delay on departure station before midnight
self.send_mock(
"vjPM",
"20120615",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival=tstamp("20120615T235700"),
departure=tstamp("20120615T235700"),
arrival_delay=2 * 60,
departure_delay=2 * 60,
message="Delay before pass midnight",
),
UpdatedStopTime(
"stop_point:stopA",
arrival=tstamp("20120616T000100"),
departure=tstamp("20120616T000100"),
arrival_delay=0,
),
],
disruption_id='delay_before_pm',
effect='delayed',
)
# A new disruption is added
disruptions_after = self.query_region(disruption_query)
assert initial_nb_disruptions + 1 == len(disruptions_after['disruptions'])
# Now we have 1 more vehicle_journey than before
pt_response = self.query_region('vehicle_journeys')
assert initial_nb_vehicle_journeys + 1 == len(pt_response['vehicle_journeys'])
# Check journeys in realtime for 20120615, the day of the disruption from B to A
# vjB circulates with departure at 23:57:00 and arrival at 00:01:00 the day after
response = self.query_region(ba_15T23_journey_query)
assert len(response['journeys']) == 1
assert response['journeys'][0]['departure_date_time'] == '20120615T235700'
assert response['journeys'][0]['arrival_date_time'] == '20120616T000100'
assert response['journeys'][0]['sections'][0]['display_informations']['name'] == 'PM'
assert response['journeys'][0]['sections'][0]['base_departure_date_time'] == '20120615T235500'
assert response['journeys'][0]['sections'][0]['departure_date_time'] == '20120615T235700'
assert response['journeys'][0]['sections'][0]['base_arrival_date_time'] == '20120616T000100'
assert response['journeys'][0]['sections'][0]['arrival_date_time'] == '20120616T000100'
assert response['journeys'][0]['sections'][0]['data_freshness'] == 'realtime'
# vjPM circulates the day before at 23:55:00 and arrival at 00:01:00 the day after
response = self.query_region(ba_14T23_journey_query)
assert len(response['journeys']) == 1
journey_base_schedule_for_day_before(response)
# vjPM circulates the day after at 23:55:00 and arrival at 00:01:00 the day after
response = self.query_region(ba_16T23_journey_query)
assert len(response['journeys']) == 1
journey_base_schedule_for_next_day(response)
# Disruption is modified with a delay on first station so that there is no more pass midnight
# and the departure is the day after
self.send_mock(
"vjPM",
"20120615",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival=tstamp("20120616T000100"),
departure=tstamp("20120616T000100"),
arrival_delay=6 * 60,
departure_delay=6 * 60,
message="Departure the next day",
),
UpdatedStopTime(
"stop_point:stopA",
arrival=tstamp("20120616T000600"),
departure=tstamp("20120616T000600"),
arrival_delay=5 * 60,
message="Arrival delayed",
),
],
disruption_id='delay_before_pm',
effect='delayed',
)
# The disruption created above is modified so no disruption is added
disruptions_after = self.query_region(disruption_query)
assert initial_nb_disruptions + 1 == len(disruptions_after['disruptions'])
# We have 1 more vehicle_journey than initial as realtime vj is deleted and a new one is added
pt_response = self.query_region('vehicle_journeys')
assert initial_nb_vehicle_journeys + 1 == len(pt_response['vehicle_journeys'])
# Check journeys in realtime for 20120615, the day of the disruption from B to A
# vjB circulates with departure at 23:55:00 and arrival at 00:06:00 the day after
response = self.query_region(ba_15T23_journey_query)
assert len(response['journeys']) == 1
assert response['journeys'][0]['departure_date_time'] == '20120616T000100'
assert response['journeys'][0]['arrival_date_time'] == '20120616T000600'
assert response['journeys'][0]['sections'][0]['display_informations']['name'] == 'PM'
assert response['journeys'][0]['sections'][0]['base_departure_date_time'] == '20120615T235500'
assert response['journeys'][0]['sections'][0]['departure_date_time'] == '20120616T000100'
assert response['journeys'][0]['sections'][0]['base_arrival_date_time'] == '20120616T000100'
assert response['journeys'][0]['sections'][0]['arrival_date_time'] == '20120616T000600'
assert response['journeys'][0]['sections'][0]['data_freshness'] == 'realtime'
# vjPM circulates the day before at 23:55:00 and arrival at 00:01:00 the day after
response = self.query_region(ba_14T23_journey_query)
assert len(response['journeys']) == 1
journey_base_schedule_for_day_before(response)
# vjPM circulates the day after at 23:55:00 and arrival at 00:01:00 the day after
response = self.query_region(ba_16T23_journey_query)
assert len(response['journeys']) == 1
journey_base_schedule_for_next_day(response)
# Disruption is modified with a smaller delay on first station and advance on arrival station
# so that there is no pass midnight and the departure is the same day as original (base_schedule)
self.send_mock(
"vjPM",
"20120615",
'modified',
[
UpdatedStopTime(
"stop_point:stopB",
arrival=tstamp("20120615T235600"),
departure=tstamp("20120615T235600"),
arrival_delay=1 * 60,
departure_delay=1 * 60,
message="Departure the same day",
),
UpdatedStopTime(
"stop_point:stopA",
arrival=tstamp("20120615T235900"),
departure=tstamp("20120615T235900"),
arrival_delay=-2 * 60,
message="Arrival advanced",
),
],
disruption_id='delay_before_pm',
effect='delayed',
)
# The disruption created above is modified so no disruption is added
disruptions_after = self.query_region(disruption_query)
assert initial_nb_disruptions + 1 == len(disruptions_after['disruptions'])
# We have 1 more vehicle_journey than initial as realtime vj is deleted and a new one is added
pt_response = self.query_region('vehicle_journeys')
assert initial_nb_vehicle_journeys + 1 == len(pt_response['vehicle_journeys'])
# Check journeys in realtime for 20120615, the day of the disruption from B to A
# vjB circulates with departure at 23:56:00 and arrival at 23:59:00 the same day
response = self.query_region(ba_15T23_journey_query)
assert len(response['journeys']) == 1
assert response['journeys'][0]['departure_date_time'] == '20120615T235600'
assert response['journeys'][0]['arrival_date_time'] == '20120615T235900'
assert response['journeys'][0]['sections'][0]['display_informations']['name'] == 'PM'
assert response['journeys'][0]['sections'][0]['base_departure_date_time'] == '20120615T235500'
assert response['journeys'][0]['sections'][0]['departure_date_time'] == '20120615T235600'
assert response['journeys'][0]['sections'][0]['base_arrival_date_time'] == '20120616T000100'
assert response['journeys'][0]['sections'][0]['arrival_date_time'] == '20120615T235900'
assert response['journeys'][0]['sections'][0]['data_freshness'] == 'realtime'
# vjPM circulates the day before at 23:55:00 and arrival at 00:01:00 the day after
response = self.query_region(ba_14T23_journey_query)
assert len(response['journeys']) == 1
journey_base_schedule_for_day_before(response)
# vjPM circulates the day after at 23:55:00 and arrival at 00:01:00 the day after
response = self.query_region(ba_16T23_journey_query)
assert len(response['journeys']) == 1
journey_base_schedule_for_next_day(response)
def make_mock_kirin_item(
vj_id,
date,
status='canceled',
new_stop_time_list=[],
disruption_id=None,
effect=None,
physical_mode_id=None,
headsign=None,
):
feed_message = gtfs_realtime_pb2.FeedMessage()
feed_message.header.gtfs_realtime_version = '1.0'
feed_message.header.incrementality = gtfs_realtime_pb2.FeedHeader.DIFFERENTIAL
feed_message.header.timestamp = 0
entity = feed_message.entity.add()
entity.id = disruption_id or "{}".format(uuid.uuid1())
trip_update = entity.trip_update
trip = trip_update.trip
trip.trip_id = vj_id
trip.start_date = date
trip.Extensions[kirin_pb2.contributor] = rt_topic
if headsign:
trip_update.Extensions[kirin_pb2.headsign] = headsign
if physical_mode_id:
trip_update.vehicle.Extensions[kirin_pb2.physical_mode_id] = physical_mode_id
if effect == 'unknown':
trip_update.Extensions[kirin_pb2.effect] = gtfs_realtime_pb2.Alert.UNKNOWN_EFFECT
elif effect == 'modified':
trip_update.Extensions[kirin_pb2.effect] = gtfs_realtime_pb2.Alert.MODIFIED_SERVICE
elif effect == 'delayed':
trip_update.Extensions[kirin_pb2.effect] = gtfs_realtime_pb2.Alert.SIGNIFICANT_DELAYS
elif effect == 'detour':
trip_update.Extensions[kirin_pb2.effect] = gtfs_realtime_pb2.Alert.DETOUR
elif effect == 'reduced_service':
trip_update.Extensions[kirin_pb2.effect] = gtfs_realtime_pb2.Alert.REDUCED_SERVICE
elif effect == 'additional_service':
trip_update.Extensions[kirin_pb2.effect] = gtfs_realtime_pb2.Alert.ADDITIONAL_SERVICE
if status == 'canceled':
# TODO: remove this deprecated code (for retrocompatibility with Kirin < 0.8.0 only)
trip.schedule_relationship = gtfs_realtime_pb2.TripDescriptor.CANCELED
elif status in ['modified', 'added']:
# TODO: remove this deprecated code (for retrocompatibility with Kirin < 0.8.0 only)
if status == 'modified':
trip.schedule_relationship = gtfs_realtime_pb2.TripDescriptor.SCHEDULED
elif status == 'added':
trip.schedule_relationship = gtfs_realtime_pb2.TripDescriptor.ADDED
for st in new_stop_time_list:
stop_time_update = trip_update.stop_time_update.add()
stop_time_update.stop_id = st.stop_id
stop_time_update.arrival.time = st.arrival
stop_time_update.arrival.delay = st.arrival_delay
stop_time_update.departure.time = st.departure
stop_time_update.departure.delay = st.departure_delay
def get_stop_time_status(is_skipped=False, is_added=False, is_detour=False):
if is_skipped:
if is_detour:
return kirin_pb2.DELETED_FOR_DETOUR
return kirin_pb2.DELETED
if is_added:
if is_detour:
return kirin_pb2.ADDED_FOR_DETOUR
return kirin_pb2.ADDED
return kirin_pb2.SCHEDULED
stop_time_update.arrival.Extensions[kirin_pb2.stop_time_event_status] = get_stop_time_status(
st.arrival_skipped, st.is_added, st.is_detour
)
stop_time_update.departure.Extensions[kirin_pb2.stop_time_event_status] = get_stop_time_status(
st.departure_skipped, st.is_added, st.is_detour
)
if st.message:
stop_time_update.Extensions[kirin_pb2.stoptime_message] = st.message
else:
# TODO
pass
return feed_message.SerializeToString()
| agpl-3.0 | -7,856,919,964,186,165,000 | 4,785,948,325,611,432,000 | 47.759387 | 127 | 0.599788 | false |
ximenesuk/openmicroscopy | components/tools/OmeroWeb/omeroweb/webclient/controller/history.py | 3 | 8170 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright (c) 2008-2011 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
import calendar
import datetime
import time
from django.conf import settings
from django.core.urlresolvers import reverse
from webclient.controller import BaseController
class BaseCalendar(BaseController):
day = None
month = None
year = None
next_month = None
next_month_name = None
last_month = None
last_month_name = None
next_year = None
last_year = None
def __init__(self, conn, year=None, month=None, day=None, eid=None, **kw):
BaseController.__init__(self, conn)
self.year = int(year)
self.month = int(month)
if eid is None:
self.eid = self.conn.getEventContext().userId
else:
self.eid = eid
if day:
self.day = int(day)
date = datetime.datetime.strptime(("%i-%i-%i" % (self.year, self.month, self.day)), "%Y-%m-%d")
self.displayDate = '%s %s' % (date.strftime("%A, %d"), date.strftime("%B %Y"))
self.nameday = date.strftime("%A")
else:
date = datetime.datetime.strptime(("%i-%i" % (self.year, self.month)), "%Y-%m")
def create_calendar(self):
calendar.setfirstweekday(settings.FIRST_DAY_OF_WEEK)
now = datetime.datetime(self.year, self.month, 1)
if self.month == 12:
self.next_month = now.replace(year=now.year+1, month=1)
self.next_year = self.year+1
else:
self.next_month = now.replace(month=now.month+1)
self.next_year = self.year
if self.month == 1:
self.last_month = now.replace(year=self.year-1, month=12)
self.last_year = self.year-1
else:
self.last_month = now.replace(month=now.month-1)
self.last_year = self.year
next_month_name = self.next_month.strftime('%B')
last_month_name = self.last_month.strftime('%B')
self.week_day_labels = [x for x in calendar.weekheader(5).split(' ') if x != '']
self.current_month = datetime.datetime(self.year, self.month, 1)
self.month_name = calendar.month_name[self.month]
if self.month == 12:
self.next_month = self.current_month.replace(year=self.year+1, month=1)
else:
self.next_month = self.current_month.replace(month=self.current_month.month+1)
self.next_month_name = self.next_month.strftime('%B')
if self.month == 1:
self.last_month = self.current_month.replace(year=self.year-1, month=12)
else:
self.last_month = self.current_month.replace(month=self.current_month.month-1)
self.last_month_name = self.last_month.strftime('%B')
self.cal_weeks = calendar.monthcalendar(self.year, self.month)
self.monthrange = calendar.monthrange(self.year, self.month)[1]
self.cal_days = []
items = self.calendar_items(self.month, self.monthrange)
for week,day in [(week,day) for week in xrange(0,len(self.cal_weeks)) for day in xrange(0,7)]:
imgCounter = dict()
rdCounter = dict()
dsCounter = dict()
prCounter = dict()
imgCounter = 0
rdCounter = 0
dsCounter = 0
prCounter = 0
d = int(self.cal_weeks[week][day])
if d > 0:
t_items = {'image':[], 'dataset':[], 'project':[]}
for item in items.get(d):
if item.get('type') == 'ome.model.core.Image':
try:
t_items['image'].index(item.get('id'))
except:
imgCounter += 1
t_items['image'].append(item.get('id'))
elif item.get('type') == 'ome.model.containers.Dataset':
try:
t_items['dataset'].index(item.get('id'))
except:
dsCounter += 1
t_items['dataset'].append(item.get('id'))
elif item.get('type') == 'ome.model.containers.Project':
try:
t_items['project'].index(item.get('id'))
except:
prCounter += 1
t_items['project'].append(item.get('id'))
self.cal_days.append({'day':self.cal_weeks[week][day], 'counter': {'imgCounter':imgCounter, 'dsCounter':dsCounter, 'prCounter':prCounter }})
else:
self.cal_days.append({'day':self.cal_weeks[week][day], 'counter': {}})
self.cal_weeks[week][day] = {'cell': self.cal_days[-1]}
def calendar_items(self, month, monthrange):
if month < 10:
mn = '0%i' % month
else:
mn = month
d1 = datetime.datetime.strptime(("%i-%s-01 00:00:00" % (self.year, mn)), "%Y-%m-%d %H:%M:%S")
d2 = datetime.datetime.strptime(("%i-%s-%i 23:59:59" % (self.year, mn, monthrange)), "%Y-%m-%d %H:%M:%S")
start = long(time.mktime(d1.timetuple())+1e-6*d1.microsecond)*1000
end = long(time.mktime(d2.timetuple())+1e-6*d2.microsecond)*1000
all_logs = self.conn.getEventsByPeriod(start, end, self.eid)
items = dict()
for d in xrange(1,monthrange+1):
items[d] = list()
for i in all_logs:
for d in items:
if time.gmtime(i.event.time.val / 1000).tm_mday == d:
items[d].append({'id':i.entityId.val, 'type': i.entityType.val, 'action': i.action.val})
return items
def month_range(self, year, month):
if month == 12:
year += 1
month = 1
else:
month += 1
return (datetime.date(year, month, 1), datetime.date(year, month, 1)-datetime.timedelta(days=1))
def get_items(self, page=None):
if self.month < 10:
mn = '0%i' % self.month
else:
mn = self.month
if self.day < 10:
dy = '0%i' % self.day
else:
dy = self.day
d1 = datetime.datetime.strptime(('%i-%s-%s 00:00:00' % (self.year, mn, dy)), "%Y-%m-%d %H:%M:%S")
d2 = datetime.datetime.strptime(('%i-%s-%s 23:59:59' % (self.year, mn, dy)), "%Y-%m-%d %H:%M:%S")
start = long(time.mktime(d1.timetuple())+1e-6*d1.microsecond)*1000
end = long(time.mktime(d2.timetuple())+1e-6*d2.microsecond)*1000
self.day_items = list()
self.day_items_size = 0
self.total_items_size = self.conn.countDataByPeriod(start, end, self.eid)
obj_logs = self.conn.getDataByPeriod(start=start, end=end, eid=self.eid, page=page)
obj_logs_counter = self.conn.countDataByPeriod(start, end, self.eid)
if len(obj_logs['image']) > 0 or len(obj_logs['dataset']) > 0 or len(obj_logs['project']) > 0:
self.day_items.append({'project':obj_logs['project'], 'dataset':obj_logs['dataset'], 'image':obj_logs['image']})
self.day_items_size = len(obj_logs['project'])+len(obj_logs['dataset'])+len(obj_logs['image'])
self.paging = self.doPaging(page, self.day_items_size, obj_logs_counter)
| gpl-2.0 | -8,691,938,284,445,984,000 | -8,308,058,906,124,230,000 | 39.445545 | 156 | 0.552509 | false |
Shadow6363/Competitions | 2015/Advent of Code/Day23.py | 1 | 1537 | # -*- coding: utf-8 -*-
import sys
class ChristmasComputer(object):
def __init__(self, a=0, b=0):
self.registers = {
'a': a,
'b': b,
'pc': 0
}
def hlf(self, r):
self.registers[r] /= 2
self.registers['pc'] += 1
def tpl(self, r):
self.registers[r] *= 3
self.registers['pc'] += 1
def inc(self, r):
self.registers[r] += 1
self.registers['pc'] += 1
def jmp(self, offset):
offset = int(offset)
self.registers['pc'] += offset
def jie(self, r, offset):
offset = int(offset)
if self.registers[r] % 2 == 0:
self.registers['pc'] += offset
else:
self.registers['pc'] += 1
def jio(self, r, offset):
offset = int(offset)
if self.registers[r] == 1:
self.registers['pc'] += offset
else:
self.registers['pc'] += 1
def main():
instructions = [instruction.strip().split(' ', 1) for instruction in sys.stdin]
computer = ChristmasComputer(1)
instruction_map = {
'hlf': computer.hlf,
'tpl': computer.tpl,
'inc': computer.inc,
'jmp': computer.jmp,
'jie': computer.jie,
'jio': computer.jio
}
while computer.registers['pc'] < len(instructions):
instruction, arg = instructions[computer.registers['pc']]
instruction_map[instruction](*arg.split(', '))
print computer.registers['b']
if __name__ == '__main__':
main()
| unlicense | -6,450,575,291,623,274,000 | -2,600,580,917,889,875,000 | 22.287879 | 83 | 0.510085 | false |
Nikoli/youtube-dl | youtube_dl/extractor/googlesearch.py | 168 | 1699 | from __future__ import unicode_literals
import itertools
import re
from .common import SearchInfoExtractor
from ..compat import (
compat_urllib_parse,
)
class GoogleSearchIE(SearchInfoExtractor):
IE_DESC = 'Google Video search'
_MAX_RESULTS = 1000
IE_NAME = 'video.google:search'
_SEARCH_KEY = 'gvsearch'
_TEST = {
'url': 'gvsearch15:python language',
'info_dict': {
'id': 'python language',
'title': 'python language',
},
'playlist_count': 15,
}
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
entries = []
res = {
'_type': 'playlist',
'id': query,
'title': query,
}
for pagenum in itertools.count():
result_url = (
'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en'
% (compat_urllib_parse.quote_plus(query), pagenum * 10))
webpage = self._download_webpage(
result_url, 'gvsearch:' + query,
note='Downloading result page ' + str(pagenum + 1))
for hit_idx, mobj in enumerate(re.finditer(
r'<h3 class="r"><a href="([^"]+)"', webpage)):
# Skip playlists
if not re.search(r'id="vidthumb%d"' % (hit_idx + 1), webpage):
continue
entries.append({
'_type': 'url',
'url': mobj.group(1)
})
if (len(entries) >= n) or not re.search(r'id="pnnext"', webpage):
res['entries'] = entries[:n]
return res
| unlicense | -5,668,476,969,749,798,000 | 5,545,172,477,229,787,000 | 27.79661 | 78 | 0.495586 | false |
HyperBaton/ansible | lib/ansible/modules/cloud/amazon/redshift_subnet_group.py | 10 | 5994 | #!/usr/bin/python
# Copyright 2014 Jens Carl, Hothead Games Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author:
- "Jens Carl (@j-carl), Hothead Games Inc."
module: redshift_subnet_group
version_added: "2.2"
short_description: manage Redshift cluster subnet groups
description:
- Create, modifies, and deletes Redshift cluster subnet groups.
options:
state:
description:
- Specifies whether the subnet should be present or absent.
required: true
choices: ['present', 'absent' ]
type: str
group_name:
description:
- Cluster subnet group name.
required: true
aliases: ['name']
type: str
group_description:
description:
- Database subnet group description.
aliases: ['description']
type: str
group_subnets:
description:
- List of subnet IDs that make up the cluster subnet group.
aliases: ['subnets']
type: list
elements: str
requirements: [ 'boto' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create a Redshift subnet group
- local_action:
module: redshift_subnet_group
state: present
group_name: redshift-subnet
group_description: Redshift subnet
group_subnets:
- 'subnet-aaaaa'
- 'subnet-bbbbb'
# Remove subnet group
- redshift_subnet_group:
state: absent
group_name: redshift-subnet
'''
RETURN = '''
group:
description: dictionary containing all Redshift subnet group information
returned: success
type: complex
contains:
name:
description: name of the Redshift subnet group
returned: success
type: str
sample: "redshift_subnet_group_name"
vpc_id:
description: Id of the VPC where the subnet is located
returned: success
type: str
sample: "vpc-aabb1122"
'''
try:
import boto
import boto.redshift
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
group_name=dict(required=True, aliases=['name']),
group_description=dict(required=False, aliases=['description']),
group_subnets=dict(required=False, aliases=['subnets'], type='list'),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto v2.9.0+ required for this module')
state = module.params.get('state')
group_name = module.params.get('group_name')
group_description = module.params.get('group_description')
group_subnets = module.params.get('group_subnets')
if state == 'present':
for required in ('group_name', 'group_description', 'group_subnets'):
if not module.params.get(required):
module.fail_json(msg=str("parameter %s required for state='present'" % required))
else:
for not_allowed in ('group_description', 'group_subnets'):
if module.params.get(not_allowed):
module.fail_json(msg=str("parameter %s not allowed for state='absent'" % not_allowed))
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg=str("Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file"))
# Connect to the Redshift endpoint.
try:
conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
try:
changed = False
exists = False
group = None
try:
matching_groups = conn.describe_cluster_subnet_groups(group_name, max_records=100)
exists = len(matching_groups) > 0
except boto.exception.JSONResponseError as e:
if e.body['Error']['Code'] != 'ClusterSubnetGroupNotFoundFault':
# if e.code != 'ClusterSubnetGroupNotFoundFault':
module.fail_json(msg=str(e))
if state == 'absent':
if exists:
conn.delete_cluster_subnet_group(group_name)
changed = True
else:
if not exists:
new_group = conn.create_cluster_subnet_group(group_name, group_description, group_subnets)
group = {
'name': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
['ClusterSubnetGroup']['ClusterSubnetGroupName'],
'vpc_id': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
['ClusterSubnetGroup']['VpcId'],
}
else:
changed_group = conn.modify_cluster_subnet_group(group_name, group_subnets, description=group_description)
group = {
'name': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
['ClusterSubnetGroup']['ClusterSubnetGroupName'],
'vpc_id': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
['ClusterSubnetGroup']['VpcId'],
}
changed = True
except boto.exception.JSONResponseError as e:
module.fail_json(msg=str(e))
module.exit_json(changed=changed, group=group)
if __name__ == '__main__':
main()
| gpl-3.0 | -707,505,780,483,979,000 | 1,861,939,558,011,652,000 | 31.934066 | 157 | 0.629796 | false |
tinloaf/home-assistant | tests/components/test_mythicbeastsdns.py | 12 | 2026 | """Test the Mythic Beasts DNS component."""
import logging
import asynctest
from homeassistant.setup import async_setup_component
from homeassistant.components import mythicbeastsdns
_LOGGER = logging.getLogger(__name__)
async def mbddns_update_mock(domain, password, host, ttl=60, session=None):
"""Mock out mythic beasts updater."""
if password == 'incorrect':
_LOGGER.error("Updating Mythic Beasts failed: Not authenticated")
return False
if host[0] == '$':
_LOGGER.error("Updating Mythic Beasts failed: Invalid Character")
return False
return True
@asynctest.mock.patch('mbddns.update', new=mbddns_update_mock)
async def test_update(hass):
"""Run with correct values and check true is returned."""
result = await async_setup_component(
hass,
mythicbeastsdns.DOMAIN,
{
mythicbeastsdns.DOMAIN: {
'domain': 'example.org',
'password': 'correct',
'host': 'hass'
}
}
)
assert result
@asynctest.mock.patch('mbddns.update', new=mbddns_update_mock)
async def test_update_fails_if_wrong_token(hass):
"""Run with incorrect token and check false is returned."""
result = await async_setup_component(
hass,
mythicbeastsdns.DOMAIN,
{
mythicbeastsdns.DOMAIN: {
'domain': 'example.org',
'password': 'incorrect',
'host': 'hass'
}
}
)
assert not result
@asynctest.mock.patch('mbddns.update', new=mbddns_update_mock)
async def test_update_fails_if_invalid_host(hass):
"""Run with invalid characters in host and check false is returned."""
result = await async_setup_component(
hass,
mythicbeastsdns.DOMAIN,
{
mythicbeastsdns.DOMAIN: {
'domain': 'example.org',
'password': 'correct',
'host': '$hass'
}
}
)
assert not result
| apache-2.0 | 3,673,356,959,304,665,600 | 8,421,696,408,952,272,000 | 27.942857 | 75 | 0.594274 | false |
anryko/ansible | lib/ansible/modules/packaging/os/sorcery.py | 52 | 20201 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015-2016, Vlad Glagolev <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: sorcery
short_description: Package manager for Source Mage GNU/Linux
description:
- Manages "spells" on Source Mage GNU/Linux using I(sorcery) toolchain
author: "Vlad Glagolev (@vaygr)"
version_added: "2.3"
notes:
- When all three components are selected, the update goes by the sequence --
Sorcery -> Grimoire(s) -> Spell(s); you cannot override it.
- grimoire handling (i.e. add/remove, including SCM/rsync versions) is not
yet supported.
requirements:
- bash
options:
name:
description:
- Name of the spell
- multiple names can be given, separated by commas
- special value '*' in conjunction with states C(latest) or
C(rebuild) will update or rebuild the whole system respectively
aliases: ["spell"]
state:
description:
- Whether to cast, dispel or rebuild a package
- state C(cast) is an equivalent of C(present), not C(latest)
- state C(latest) always triggers C(update_cache=yes)
- state C(rebuild) implies cast of all specified spells, not only
those existed before
choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"]
default: "present"
depends:
description:
- Comma-separated list of _optional_ dependencies to build a spell
(or make sure it is built) with; use +/- in front of dependency
to turn it on/off ('+' is optional though)
- this option is ignored if C(name) parameter is equal to '*' or
contains more than one spell
- providers must be supplied in the form recognized by Sorcery, e.g.
'openssl(SSL)'
update:
description:
- Whether or not to update sorcery scripts at the very first stage
type: bool
default: 'no'
update_cache:
description:
- Whether or not to update grimoire collection before casting spells
type: bool
default: 'no'
aliases: ["update_codex"]
cache_valid_time:
description:
- Time in seconds to invalidate grimoire collection on update
- especially useful for SCM and rsync grimoires
- makes sense only in pair with C(update_cache)
'''
EXAMPLES = '''
# Make sure spell 'foo' is installed
- sorcery:
spell: foo
state: present
# Make sure spells 'foo', 'bar' and 'baz' are removed
- sorcery:
spell: foo,bar,baz
state: absent
# Make sure spell 'foo' with dependencies 'bar' and 'baz' is installed
- sorcery:
spell: foo
depends: bar,baz
state: present
# Make sure spell 'foo' with 'bar' and without 'baz' dependencies is installed
- sorcery:
spell: foo
depends: +bar,-baz
state: present
# Make sure spell 'foo' with libressl (providing SSL) dependency is installed
- sorcery:
spell: foo
depends: libressl(SSL)
state: present
# Playbook: make sure spells with/without required dependencies (if any) are installed
- sorcery:
name: "{{ item.spell }}"
depends: "{{ item.depends | default(None) }}"
state: present
loop:
- { spell: 'vifm', depends: '+file,-gtk+2' }
- { spell: 'fwknop', depends: 'gpgme' }
- { spell: 'pv,tnftp,tor' }
# Install the latest version of spell 'foo' using regular glossary
- sorcery:
name: foo
state: latest
# Rebuild spell 'foo'
- sorcery:
spell: foo
state: rebuild
# Rebuild the whole system, but update Sorcery and Codex first
- sorcery:
spell: '*'
state: rebuild
update: yes
update_cache: yes
# Refresh the grimoire collection if it's 1 day old using native sorcerous alias
- sorcery:
update_codex: yes
cache_valid_time: 86400
# Update only Sorcery itself
- sorcery:
update: yes
'''
RETURN = '''
'''
import datetime
import fileinput
import os
import re
import shutil
import sys
# auto-filled at module init
SORCERY = {
'sorcery': None,
'scribe': None,
'cast': None,
'dispel': None,
'gaze': None
}
SORCERY_LOG_DIR = "/var/log/sorcery"
SORCERY_STATE_DIR = "/var/state/sorcery"
def get_sorcery_ver(module):
""" Get Sorcery version. """
cmd_sorcery = "%s --version" % SORCERY['sorcery']
rc, stdout, stderr = module.run_command(cmd_sorcery)
if rc != 0 or not stdout:
module.fail_json(msg="unable to get Sorcery version")
return stdout.strip()
def codex_fresh(codex, module):
""" Check if grimoire collection is fresh enough. """
if not module.params['cache_valid_time']:
return False
timedelta = datetime.timedelta(seconds=module.params['cache_valid_time'])
for grimoire in codex:
lastupdate_path = os.path.join(SORCERY_STATE_DIR,
grimoire + ".lastupdate")
try:
mtime = os.stat(lastupdate_path).st_mtime
except Exception:
return False
lastupdate_ts = datetime.datetime.fromtimestamp(mtime)
# if any grimoire is not fresh, we invalidate the Codex
if lastupdate_ts + timedelta < datetime.datetime.now():
return False
return True
def codex_list(module):
""" List valid grimoire collection. """
codex = {}
cmd_scribe = "%s index" % SORCERY['scribe']
rc, stdout, stderr = module.run_command(cmd_scribe)
if rc != 0:
module.fail_json(msg="unable to list grimoire collection, fix your Codex")
rex = re.compile(r"^\s*\[\d+\] : (?P<grim>[\w\-+.]+) : [\w\-+./]+(?: : (?P<ver>[\w\-+.]+))?\s*$")
# drop 4-line header and empty trailing line
for line in stdout.splitlines()[4:-1]:
match = rex.match(line)
if match:
codex[match.group('grim')] = match.group('ver')
if not codex:
module.fail_json(msg="no grimoires to operate on; add at least one")
return codex
def update_sorcery(module):
""" Update sorcery scripts.
This runs 'sorcery update' ('sorcery -u'). Check mode always returns a
positive change value.
"""
changed = False
if module.check_mode:
if not module.params['name'] and not module.params['update_cache']:
module.exit_json(changed=True, msg="would have updated Sorcery")
else:
sorcery_ver = get_sorcery_ver(module)
cmd_sorcery = "%s update" % SORCERY['sorcery']
rc, stdout, stderr = module.run_command(cmd_sorcery)
if rc != 0:
module.fail_json(msg="unable to update Sorcery: " + stdout)
if sorcery_ver != get_sorcery_ver(module):
changed = True
if not module.params['name'] and not module.params['update_cache']:
module.exit_json(changed=changed,
msg="successfully updated Sorcery")
def update_codex(module):
""" Update grimoire collections.
This runs 'scribe update'. Check mode always returns a positive change
value when 'cache_valid_time' is used.
"""
params = module.params
changed = False
codex = codex_list(module)
fresh = codex_fresh(codex, module)
if module.check_mode:
if not params['name']:
if not fresh:
changed = True
module.exit_json(changed=changed, msg="would have updated Codex")
elif not fresh or params['name'] and params['state'] == 'latest':
# SILENT is required as a workaround for query() in libgpg
module.run_command_environ_update.update(dict(SILENT='1'))
cmd_scribe = "%s update" % SORCERY['scribe']
rc, stdout, stderr = module.run_command(cmd_scribe)
if rc != 0:
module.fail_json(msg="unable to update Codex: " + stdout)
if codex != codex_list(module):
changed = True
if not params['name']:
module.exit_json(changed=changed,
msg="successfully updated Codex")
def match_depends(module):
""" Check for matching dependencies.
This inspects spell's dependencies with the desired states and returns
'False' if a recast is needed to match them. It also adds required lines
to the system-wide depends file for proper recast procedure.
"""
params = module.params
spells = params['name']
depends = {}
depends_ok = True
if len(spells) > 1 or not params['depends']:
return depends_ok
spell = spells[0]
if module.check_mode:
sorcery_depends_orig = os.path.join(SORCERY_STATE_DIR, "depends")
sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends.check")
try:
shutil.copy2(sorcery_depends_orig, sorcery_depends)
except IOError:
module.fail_json(msg="failed to copy depends.check file")
else:
sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends")
rex = re.compile(r"^(?P<status>\+?|\-){1}(?P<depend>[a-z0-9]+[a-z0-9_\-\+\.]*(\([A-Z0-9_\-\+\.]+\))*)$")
for d in params['depends'].split(','):
match = rex.match(d)
if not match:
module.fail_json(msg="wrong depends line for spell '%s'" % spell)
# normalize status
if not match.group('status') or match.group('status') == '+':
status = 'on'
else:
status = 'off'
depends[match.group('depend')] = status
# drop providers spec
depends_list = [s.split('(')[0] for s in depends]
cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(depends_list))
rc, stdout, stderr = module.run_command(cmd_gaze)
if rc != 0:
module.fail_json(msg="wrong dependencies for spell '%s'" % spell)
fi = fileinput.input(sorcery_depends, inplace=True)
try:
try:
for line in fi:
if line.startswith(spell + ':'):
match = None
for d in depends:
# when local status is 'off' and dependency is provider,
# use only provider value
d_offset = d.find('(')
if d_offset == -1:
d_p = ''
else:
d_p = re.escape(d[d_offset:])
# .escape() is needed mostly for the spells like 'libsigc++'
rex = re.compile("%s:(?:%s|%s):(?P<lstatus>on|off):optional:" %
(re.escape(spell), re.escape(d), d_p))
match = rex.match(line)
# we matched the line "spell:dependency:on|off:optional:"
if match:
# if we also matched the local status, mark dependency
# as empty and put it back into depends file
if match.group('lstatus') == depends[d]:
depends[d] = None
sys.stdout.write(line)
# status is not that we need, so keep this dependency
# in the list for further reverse switching;
# stop and process the next line in both cases
break
if not match:
sys.stdout.write(line)
else:
sys.stdout.write(line)
except IOError:
module.fail_json(msg="I/O error on the depends file")
finally:
fi.close()
depends_new = [v for v in depends if depends[v]]
if depends_new:
try:
try:
fl = open(sorcery_depends, 'a')
for k in depends_new:
fl.write("%s:%s:%s:optional::\n" % (spell, k, depends[k]))
except IOError:
module.fail_json(msg="I/O error on the depends file")
finally:
fl.close()
depends_ok = False
if module.check_mode:
try:
os.remove(sorcery_depends)
except IOError:
module.fail_json(msg="failed to clean up depends.backup file")
return depends_ok
def manage_spells(module):
""" Cast or dispel spells.
This manages the whole system ('*'), list or a single spell. Command 'cast'
is used to install or rebuild spells, while 'dispel' takes care of theirs
removal from the system.
"""
params = module.params
spells = params['name']
sorcery_queue = os.path.join(SORCERY_LOG_DIR, "queue/install")
if spells == '*':
if params['state'] == 'latest':
# back up original queue
try:
os.rename(sorcery_queue, sorcery_queue + ".backup")
except IOError:
module.fail_json(msg="failed to backup the update queue")
# see update_codex()
module.run_command_environ_update.update(dict(SILENT='1'))
cmd_sorcery = "%s queue"
rc, stdout, stderr = module.run_command(cmd_sorcery)
if rc != 0:
module.fail_json(msg="failed to generate the update queue")
try:
queue_size = os.stat(sorcery_queue).st_size
except Exception:
module.fail_json(msg="failed to read the update queue")
if queue_size != 0:
if module.check_mode:
try:
os.rename(sorcery_queue + ".backup", sorcery_queue)
except IOError:
module.fail_json(msg="failed to restore the update queue")
module.exit_json(changed=True, msg="would have updated the system")
cmd_cast = "%s --queue" % SORCERY['cast']
rc, stdout, stderr = module.run_command(cmd_cast)
if rc != 0:
module.fail_json(msg="failed to update the system")
module.exit_json(changed=True, msg="successfully updated the system")
else:
module.exit_json(changed=False, msg="the system is already up to date")
elif params['state'] == 'rebuild':
if module.check_mode:
module.exit_json(changed=True, msg="would have rebuilt the system")
cmd_sorcery = "%s rebuild" % SORCERY['sorcery']
rc, stdout, stderr = module.run_command(cmd_sorcery)
if rc != 0:
module.fail_json(msg="failed to rebuild the system: " + stdout)
module.exit_json(changed=True, msg="successfully rebuilt the system")
else:
module.fail_json(msg="unsupported operation on '*' name value")
else:
if params['state'] in ('present', 'latest', 'rebuild', 'absent'):
# extract versions from the 'gaze' command
cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(spells))
rc, stdout, stderr = module.run_command(cmd_gaze)
# fail if any of spells cannot be found
if rc != 0:
module.fail_json(msg="failed to locate spell(s) in the list (%s)" %
', '.join(spells))
cast_queue = []
dispel_queue = []
rex = re.compile(r"[^|]+\|[^|]+\|(?P<spell>[^|]+)\|(?P<grim_ver>[^|]+)\|(?P<inst_ver>[^$]+)")
# drop 2-line header and empty trailing line
for line in stdout.splitlines()[2:-1]:
match = rex.match(line)
cast = False
if params['state'] == 'present':
# spell is not installed..
if match.group('inst_ver') == '-':
# ..so set up depends reqs for it
match_depends(module)
cast = True
# spell is installed..
else:
# ..but does not conform depends reqs
if not match_depends(module):
cast = True
elif params['state'] == 'latest':
# grimoire and installed versions do not match..
if match.group('grim_ver') != match.group('inst_ver'):
# ..so check for depends reqs first and set them up
match_depends(module)
cast = True
# grimoire and installed versions match..
else:
# ..but the spell does not conform depends reqs
if not match_depends(module):
cast = True
elif params['state'] == 'rebuild':
cast = True
# 'absent'
else:
if match.group('inst_ver') != '-':
dispel_queue.append(match.group('spell'))
if cast:
cast_queue.append(match.group('spell'))
if cast_queue:
if module.check_mode:
module.exit_json(changed=True, msg="would have cast spell(s)")
cmd_cast = "%s -c %s" % (SORCERY['cast'], ' '.join(cast_queue))
rc, stdout, stderr = module.run_command(cmd_cast)
if rc != 0:
module.fail_json(msg="failed to cast spell(s): %s" + stdout)
module.exit_json(changed=True, msg="successfully cast spell(s)")
elif params['state'] != 'absent':
module.exit_json(changed=False, msg="spell(s) are already cast")
if dispel_queue:
if module.check_mode:
module.exit_json(changed=True, msg="would have dispelled spell(s)")
cmd_dispel = "%s %s" % (SORCERY['dispel'], ' '.join(dispel_queue))
rc, stdout, stderr = module.run_command(cmd_dispel)
if rc != 0:
module.fail_json(msg="failed to dispel spell(s): %s" + stdout)
module.exit_json(changed=True, msg="successfully dispelled spell(s)")
else:
module.exit_json(changed=False, msg="spell(s) are already dispelled")
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(default=None, aliases=['spell'], type='list'),
state=dict(default='present', choices=['present', 'latest',
'absent', 'cast', 'dispelled', 'rebuild']),
depends=dict(default=None),
update=dict(default=False, type='bool'),
update_cache=dict(default=False, aliases=['update_codex'], type='bool'),
cache_valid_time=dict(default=0, type='int')
),
required_one_of=[['name', 'update', 'update_cache']],
supports_check_mode=True
)
if os.geteuid() != 0:
module.fail_json(msg="root privileges are required for this operation")
for c in SORCERY:
SORCERY[c] = module.get_bin_path(c, True)
# prepare environment: run sorcery commands without asking questions
module.run_command_environ_update = dict(PROMPT_DELAY='0', VOYEUR='0')
params = module.params
# normalize 'state' parameter
if params['state'] in ('present', 'cast'):
params['state'] = 'present'
elif params['state'] in ('absent', 'dispelled'):
params['state'] = 'absent'
if params['update']:
update_sorcery(module)
if params['update_cache'] or params['state'] == 'latest':
update_codex(module)
if params['name']:
manage_spells(module)
# import module snippets
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 | 4,691,759,462,778,605,000 | -2,175,672,857,582,678,000 | 30.31938 | 108 | 0.549923 | false |
ashrith/dpkt | dpkt/ssl.py | 3 | 19686 | # $Id: ssl.py 90 2014-04-02 22:06:23Z [email protected] $
# Portion Copyright 2012 Google Inc. All rights reserved.
"""Secure Sockets Layer / Transport Layer Security."""
import dpkt
import ssl_ciphersuites
import struct
import binascii
import traceback
import datetime
#
# Note from April 2011: [email protected] added code that parses SSL3/TLS messages more in depth.
#
# Jul 2012: [email protected] modified and extended SSL support further.
#
class SSL2(dpkt.Packet):
__hdr__ = (
('len', 'H', 0),
('msg', 's', ''),
('pad', 's', ''),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.len & 0x8000:
n = self.len = self.len & 0x7FFF
self.msg, self.data = self.data[:n], self.data[n:]
else:
n = self.len = self.len & 0x3FFF
padlen = ord(self.data[0])
self.msg = self.data[1:1+n]
self.pad = self.data[1+n:1+n+padlen]
self.data = self.data[1+n+padlen:]
# SSLv3/TLS versions
SSL3_V = 0x0300
TLS1_V = 0x0301
TLS11_V = 0x0302
TLS12_V = 0x0303
ssl3_versions_str = {
SSL3_V: 'SSL3',
TLS1_V: 'TLS 1.0',
TLS11_V: 'TLS 1.1',
TLS12_V: 'TLS 1.2'
}
SSL3_VERSION_BYTES = set(('\x03\x00', '\x03\x01', '\x03\x02', '\x03\x03'))
# Alert levels
SSL3_AD_WARNING = 1
SSL3_AD_FATAL = 2
alert_level_str = {
SSL3_AD_WARNING: 'SSL3_AD_WARNING',
SSL3_AD_FATAL: 'SSL3_AD_FATAL'
}
# SSL3 alert descriptions
SSL3_AD_CLOSE_NOTIFY = 0
SSL3_AD_UNEXPECTED_MESSAGE = 10 # fatal
SSL3_AD_BAD_RECORD_MAC = 20 # fatal
SSL3_AD_DECOMPRESSION_FAILURE = 30 # fatal
SSL3_AD_HANDSHAKE_FAILURE = 40 # fatal
SSL3_AD_NO_CERTIFICATE = 41
SSL3_AD_BAD_CERTIFICATE = 42
SSL3_AD_UNSUPPORTED_CERTIFICATE = 43
SSL3_AD_CERTIFICATE_REVOKED = 44
SSL3_AD_CERTIFICATE_EXPIRED = 45
SSL3_AD_CERTIFICATE_UNKNOWN = 46
SSL3_AD_ILLEGAL_PARAMETER = 47 # fatal
# TLS1 alert descriptions
TLS1_AD_DECRYPTION_FAILED = 21
TLS1_AD_RECORD_OVERFLOW = 22
TLS1_AD_UNKNOWN_CA = 48 # fatal
TLS1_AD_ACCESS_DENIED = 49 # fatal
TLS1_AD_DECODE_ERROR = 50 # fatal
TLS1_AD_DECRYPT_ERROR = 51
TLS1_AD_EXPORT_RESTRICTION = 60 # fatal
TLS1_AD_PROTOCOL_VERSION = 70 # fatal
TLS1_AD_INSUFFICIENT_SECURITY = 71 # fatal
TLS1_AD_INTERNAL_ERROR = 80 # fatal
TLS1_AD_USER_CANCELLED = 90
TLS1_AD_NO_RENEGOTIATION = 100
#/* codes 110-114 are from RFC3546 */
TLS1_AD_UNSUPPORTED_EXTENSION = 110
TLS1_AD_CERTIFICATE_UNOBTAINABLE = 111
TLS1_AD_UNRECOGNIZED_NAME = 112
TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE = 113
TLS1_AD_BAD_CERTIFICATE_HASH_VALUE = 114
TLS1_AD_UNKNOWN_PSK_IDENTITY = 115 # fatal
# Mapping alert types to strings
alert_description_str = {
SSL3_AD_CLOSE_NOTIFY: 'SSL3_AD_CLOSE_NOTIFY',
SSL3_AD_UNEXPECTED_MESSAGE: 'SSL3_AD_UNEXPECTED_MESSAGE',
SSL3_AD_BAD_RECORD_MAC: 'SSL3_AD_BAD_RECORD_MAC',
SSL3_AD_DECOMPRESSION_FAILURE: 'SSL3_AD_DECOMPRESSION_FAILURE',
SSL3_AD_HANDSHAKE_FAILURE: 'SSL3_AD_HANDSHAKE_FAILURE',
SSL3_AD_NO_CERTIFICATE: 'SSL3_AD_NO_CERTIFICATE',
SSL3_AD_BAD_CERTIFICATE: 'SSL3_AD_BAD_CERTIFICATE',
SSL3_AD_UNSUPPORTED_CERTIFICATE: 'SSL3_AD_UNSUPPORTED_CERTIFICATE',
SSL3_AD_CERTIFICATE_REVOKED: 'SSL3_AD_CERTIFICATE_REVOKED',
SSL3_AD_CERTIFICATE_EXPIRED: 'SSL3_AD_CERTIFICATE_EXPIRED',
SSL3_AD_CERTIFICATE_UNKNOWN: 'SSL3_AD_CERTIFICATE_UNKNOWN',
SSL3_AD_ILLEGAL_PARAMETER: 'SSL3_AD_ILLEGAL_PARAMETER',
TLS1_AD_DECRYPTION_FAILED: 'TLS1_AD_DECRYPTION_FAILED',
TLS1_AD_RECORD_OVERFLOW: 'TLS1_AD_RECORD_OVERFLOW',
TLS1_AD_UNKNOWN_CA: 'TLS1_AD_UNKNOWN_CA',
TLS1_AD_ACCESS_DENIED: 'TLS1_AD_ACCESS_DENIED',
TLS1_AD_DECODE_ERROR: 'TLS1_AD_DECODE_ERROR',
TLS1_AD_DECRYPT_ERROR: 'TLS1_AD_DECRYPT_ERROR',
TLS1_AD_EXPORT_RESTRICTION: 'TLS1_AD_EXPORT_RESTRICTION',
TLS1_AD_PROTOCOL_VERSION: 'TLS1_AD_PROTOCOL_VERSION',
TLS1_AD_INSUFFICIENT_SECURITY: 'TLS1_AD_INSUFFICIENT_SECURITY',
TLS1_AD_INTERNAL_ERROR: 'TLS1_AD_INTERNAL_ERROR',
TLS1_AD_USER_CANCELLED: 'TLS1_AD_USER_CANCELLED',
TLS1_AD_NO_RENEGOTIATION: 'TLS1_AD_NO_RENEGOTIATION',
TLS1_AD_UNSUPPORTED_EXTENSION: 'TLS1_AD_UNSUPPORTED_EXTENSION',
TLS1_AD_CERTIFICATE_UNOBTAINABLE: 'TLS1_AD_CERTIFICATE_UNOBTAINABLE',
TLS1_AD_UNRECOGNIZED_NAME: 'TLS1_AD_UNRECOGNIZED_NAME',
TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE: 'TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE',
TLS1_AD_BAD_CERTIFICATE_HASH_VALUE: 'TLS1_AD_BAD_CERTIFICATE_HASH_VALUE',
TLS1_AD_UNKNOWN_PSK_IDENTITY: 'TLS1_AD_UNKNOWN_PSK_IDENTITY'
}
# struct format strings for parsing buffer lengths
# don't forget, you have to pad a 3-byte value with \x00
_SIZE_FORMATS = ['!B', '!H', '!I', '!I']
def parse_variable_array(buf, lenbytes):
"""
Parse an array described using the 'Type name<x..y>' syntax from the spec
Read a length at the start of buf, and returns that many bytes
after, in a tuple with the TOTAL bytes consumed (including the size). This
does not check that the array is the right length for any given datatype.
"""
# first have to figure out how to parse length
assert lenbytes <= 4 # pretty sure 4 is impossible, too
size_format = _SIZE_FORMATS[lenbytes - 1]
padding = '\x00' if lenbytes == 3 else ''
# read off the length
size = struct.unpack(size_format, padding + buf[:lenbytes])[0]
# read the actual data
data = buf[lenbytes:lenbytes + size]
# if len(data) != size: insufficient data
return data, size + lenbytes
class SSL3Exception(Exception):
pass
class TLSRecord(dpkt.Packet):
"""
SSLv3 or TLSv1+ packet.
In addition to the fields specified in the header, there are
compressed and decrypted fields, indicating whether, in the language
of the spec, this is a TLSPlaintext, TLSCompressed, or
TLSCiphertext. The application will have to figure out when it's
appropriate to change these values.
"""
__hdr__ = (
('type', 'B', 0),
('version', 'H', 0),
('length', 'H', 0),
)
def __init__(self, *args, **kwargs):
# assume plaintext unless specified otherwise in arguments
self.compressed = kwargs.pop('compressed', False)
self.encrypted = kwargs.pop('encrypted', False)
# parent constructor
dpkt.Packet.__init__(self, *args, **kwargs)
# make sure length and data are consistent
self.length = len(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
header_length = self.__hdr_len__
self.data = buf[header_length:header_length+self.length]
# make sure buffer was long enough
if len(self.data) != self.length:
raise dpkt.NeedData('TLSRecord data was too short.')
# assume compressed and encrypted when it's been parsed from
# raw data
self.compressed = True
self.encrypted = True
class TLSChangeCipherSpec(dpkt.Packet):
"""
ChangeCipherSpec message is just a single byte with value 1
"""
__hdr__ = (('type', 'B', 1),)
class TLSAppData(str):
"""
As far as TLSRecord is concerned, AppData is just an opaque blob.
"""
pass
class TLSAlert(dpkt.Packet):
__hdr__ = (
('level', 'B', 1),
('description', 'B', 0),
)
class TLSHelloRequest(dpkt.Packet):
__hdr__ = tuple()
class TLSClientHello(dpkt.Packet):
__hdr__ = (
('version', 'H', 0x0301),
('random', '32s', '\x00'*32),
) # the rest is variable-length and has to be done manually
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# now session, cipher suites, extensions are in self.data
self.session_id, pointer = parse_variable_array(self.data, 1)
# print 'pointer',pointer
# handle ciphersuites
ciphersuites, parsed = parse_variable_array(self.data[pointer:], 2)
pointer += parsed
self.num_ciphersuites = len(ciphersuites) / 2
# check len(ciphersuites) % 2 == 0 ?
# compression methods
compression_methods, parsed = parse_variable_array(
self.data[pointer:], 1)
pointer += parsed
self.num_compression_methods = parsed - 1
self.compression_methods = map(ord, compression_methods)
# extensions
class TLSServerHello(dpkt.Packet):
__hdr__ = (
('version', 'H', '0x0301'),
('random', '32s', '\x00'*32),
) # session is variable, forcing rest to be manual
def unpack(self, buf):
try:
dpkt.Packet.unpack(self, buf)
self.session_id, pointer = parse_variable_array(self.data, 1)
# single cipher suite
self.cipher_suite = struct.unpack('!H', self.data[pointer:pointer+2])[0]
pointer += 2
# single compression method
self.compression = struct.unpack('!B', self.data[pointer:pointer+1])[0]
pointer += 1
# ignore extensions for now
except struct.error:
# probably data too short
raise dpkt.NeedData
class TLSUnknownHandshake(dpkt.Packet):
__hdr__ = tuple()
TLSCertificate = TLSUnknownHandshake
TLSServerKeyExchange = TLSUnknownHandshake
TLSCertificateRequest = TLSUnknownHandshake
TLSServerHelloDone = TLSUnknownHandshake
TLSCertificateVerify = TLSUnknownHandshake
TLSClientKeyExchange = TLSUnknownHandshake
TLSFinished = TLSUnknownHandshake
# mapping of handshake type ids to their names
# and the classes that implement them
HANDSHAKE_TYPES = {
0: ('HelloRequest', TLSHelloRequest),
1: ('ClientHello', TLSClientHello),
2: ('ServerHello', TLSServerHello),
11: ('Certificate', TLSCertificate),
12: ('ServerKeyExchange', TLSServerKeyExchange),
13: ('CertificateRequest', TLSCertificateRequest),
14: ('ServerHelloDone', TLSServerHelloDone),
15: ('CertificateVerify', TLSCertificateVerify),
16: ('ClientKeyExchange', TLSClientKeyExchange),
20: ('Finished', TLSFinished),
}
class TLSHandshake(dpkt.Packet):
'''
A TLS Handshake message
This goes for all messages encapsulated in the Record layer, but especially
important for handshakes and app data: A message may be spread across a
number of TLSRecords, in addition to the possibility of there being more
than one in a given Record. You have to put together the contents of
TLSRecord's yourself.
'''
# struct.unpack can't handle the 3-byte int, so we parse it as bytes
# (and store it as bytes so dpkt doesn't get confused), and turn it into
# an int in a user-facing property
__hdr__ = (
('type', 'B', 0),
('length_bytes', '3s', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# Wait, might there be more than one message of self.type?
embedded_type = HANDSHAKE_TYPES.get(self.type, None)
if embedded_type is None:
raise SSL3Exception('Unknown or invalid handshake type %d' %
self.type)
# only take the right number of bytes
self.data = self.data[:self.length]
if len(self.data) != self.length:
raise dpkt.NeedData
# get class out of embedded_type tuple
self.data = embedded_type[1](self.data)
@property
def length(self):
return struct.unpack('!I', '\x00' + self.length_bytes)[0]
RECORD_TYPES = {
20: TLSChangeCipherSpec,
21: TLSAlert,
22: TLSHandshake,
23: TLSAppData,
}
class SSLFactory(object):
def __new__(cls, buf):
v = buf[1:3]
if v in [ '\x03\x00', '\x03\x01', '\x03\x02' ]:
return SSL3(buf)
# SSL2 has no characteristic header or magic bytes, so we just assume
# that the msg is an SSL2 msg if it is not detected as SSL3+
return SSL2(buf)
def TLSMultiFactory(buf):
'''
Attempt to parse one or more TLSRecord's out of buf
Args:
buf: string containing SSL/TLS messages. May have an incomplete record
on the end
Returns:
[TLSRecord]
int, total bytes consumed, != len(buf) if an incomplete record was left at
the end.
Raises SSL3Exception.
'''
i, n = 0, len(buf)
msgs = []
while i < n:
v = buf[i+1:i+3]
if v in SSL3_VERSION_BYTES:
try:
msg = TLSRecord(buf[i:])
msgs.append(msg)
except dpkt.NeedData:
break
else:
raise SSL3Exception('Bad TLS version in buf: %r' % buf[i:i+5])
i += len(msg)
return msgs, i
import unittest
_hexdecode = binascii.a2b_hex
class TLSRecordTest(unittest.TestCase):
"""
Test basic TLSRecord functionality
For this test, the contents of the record doesn't matter, since we're not
parsing the next layer.
"""
def setUp(self):
# add some extra data, to make sure length is parsed correctly
self.p = TLSRecord('\x17\x03\x01\x00\x08abcdefghzzzzzzzzzzz')
def testContentType(self):
self.assertEqual(self.p.type, 23)
def testVersion(self):
self.assertEqual(self.p.version, 0x0301)
def testLength(self):
self.assertEqual(self.p.length, 8)
def testData(self):
self.assertEqual(self.p.data, 'abcdefgh')
def testInitialFlags(self):
self.assertTrue(self.p.compressed)
self.assertTrue(self.p.encrypted)
def testRepack(self):
p2 = TLSRecord(type=23, version=0x0301, data='abcdefgh')
self.assertEqual(p2.type, 23)
self.assertEqual(p2.version, 0x0301)
self.assertEqual(p2.length, 8)
self.assertEqual(p2.data, 'abcdefgh')
self.assertEqual(p2.pack(), self.p.pack())
def testTotalLength(self):
# that len(p) includes header
self.assertEqual(len(self.p), 13)
def testRaisesNeedDataWhenBufIsShort(self):
self.assertRaises(
dpkt.NeedData,
TLSRecord,
'\x16\x03\x01\x00\x10abc')
class TLSChangeCipherSpecTest(unittest.TestCase):
"It's just a byte. This will be quick, I promise"
def setUp(self):
self.p = TLSChangeCipherSpec('\x01')
def testParses(self):
self.assertEqual(self.p.type, 1)
def testTotalLength(self):
self.assertEqual(len(self.p), 1)
class TLSAppDataTest(unittest.TestCase):
"AppData is basically just a string"
def testValue(self):
d = TLSAppData('abcdefgh')
self.assertEqual(d, 'abcdefgh')
class TLSHandshakeTest(unittest.TestCase):
def setUp(self):
self.h = TLSHandshake('\x00\x00\x00\x01\xff')
def testCreatedInsideMessage(self):
self.assertTrue(isinstance(self.h.data, TLSHelloRequest))
def testLength(self):
self.assertEqual(self.h.length, 0x01)
def testRaisesNeedData(self):
self.assertRaises(dpkt.NeedData, TLSHandshake, '\x00\x00\x01\x01')
class ClientHelloTest(unittest.TestCase):
'This data is extracted from and verified by Wireshark'
def setUp(self):
self.data = _hexdecode(
"01000199" # handshake header
"0301" # version
"5008220ce5e0e78b6891afe204498c9363feffbe03235a2d9e05b7d990eb708d" # rand
"2009bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed1" # session id
# cipher suites
"005400ffc00ac0140088008700390038c00fc00500840035c007c009c011c0130045004400330032c00cc00ec002c0040096004100050004002fc008c01200160013c00dc003feff000ac006c010c00bc00100020001"
"0100" # compresssion methods
# extensions
"00fc0000000e000c0000096c6f63616c686f7374000a00080006001700180019000b00020100002300d0a50b2e9f618a9ea9bf493ef49b421835cd2f6b05bbe1179d8edf70d58c33d656e8696d36d7e7e0b9d3ecc0e4de339552fa06c64c0fcb550a334bc43944e2739ca342d15a9ebbe981ac87a0d38160507d47af09bdc16c5f0ee4cdceea551539382333226048a026d3a90a0535f4a64236467db8fee22b041af986ad0f253bc369137cd8d8cd061925461d7f4d7895ca9a4181ab554dad50360ac31860e971483877c9335ac1300c5e78f3e56f3b8e0fc16358fcaceefd5c8d8aaae7b35be116f8832856ca61144fcdd95e071b94d0cf7233740000"
"FFFFFFFFFFFFFFFF") # random garbage
self.p = TLSHandshake(self.data)
def testClientHelloConstructed(self):
'Make sure the correct class was constructed'
#print self.p
self.assertTrue(isinstance(self.p.data, TLSClientHello))
# def testClientDateCorrect(self):
# self.assertEqual(self.p.random_unixtime, 1342710284)
def testClientRandomCorrect(self):
self.assertEqual(self.p.data.random,
_hexdecode('5008220ce5e0e78b6891afe204498c9363feffbe03235a2d9e05b7d990eb708d'))
def testCipherSuiteLength(self):
# we won't bother testing the identity of each cipher suite in the list.
self.assertEqual(self.p.data.num_ciphersuites, 42)
#self.assertEqual(len(self.p.ciphersuites), 42)
def testSessionId(self):
self.assertEqual(self.p.data.session_id,
_hexdecode('09bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed1'))
def testCompressionMethods(self):
self.assertEqual(self.p.data.num_compression_methods, 1)
def testTotalLength(self):
self.assertEqual(len(self.p), 413)
class ServerHelloTest(unittest.TestCase):
'Again, from Wireshark'
def setUp(self):
self.data = _hexdecode('0200004d03015008220c8ec43c5462315a7c99f5d5b6bff009ad285b51dc18485f352e9fdecd2009bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed10002000005ff01000100')
self.p = TLSHandshake(self.data)
def testConstructed(self):
self.assertTrue(isinstance(self.p.data, TLSServerHello))
# def testDateCorrect(self):
# self.assertEqual(self.p.random_unixtime, 1342710284)
def testRandomCorrect(self):
self.assertEqual(self.p.data.random,
_hexdecode('5008220c8ec43c5462315a7c99f5d5b6bff009ad285b51dc18485f352e9fdecd'))
def testCipherSuite(self):
self.assertEqual(
ssl_ciphersuites.BY_CODE[self.p.data.cipher_suite].name,
'TLS_RSA_WITH_NULL_SHA')
def testTotalLength(self):
self.assertEqual(len(self.p), 81)
class TLSMultiFactoryTest(unittest.TestCase):
"Made up test data"
def setUp(self):
self.data = _hexdecode('1703010010' # header 1
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' # data 1
'1703010010' # header 2
'BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB' # data 2
'1703010010' # header 3
'CCCCCCCC') # data 3 (incomplete)
self.msgs, self.bytes_parsed = TLSMultiFactory(self.data)
def testNumMessages(self):
# only complete messages should be parsed, incomplete ones left
# in buffer
self.assertEqual(len(self.msgs), 2)
def testBytesParsed(self):
self.assertEqual(self.bytes_parsed, (5 + 16) * 2)
def testFirstMsgData(self):
self.assertEqual(self.msgs[0].data, _hexdecode('AA' * 16))
def testSecondMsgData(self):
self.assertEqual(self.msgs[1].data, _hexdecode('BB' * 16))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -4,770,636,658,246,940,000 | 4,348,720,573,657,617,000 | 34.153571 | 518 | 0.644621 | false |
junhuac/MQUIC | src/build/rmdir_and_stamp.py | 11 | 1412 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wipes out a directory recursively and then touches a stamp file.
This odd pairing of operations is used to support build scripts which
slurp up entire directories (e.g. build/android/javac.py when handling
generated sources) as inputs.
The general pattern of use is:
- Add a target which generates |gen_sources| into |out_path| from |inputs|.
- Include |stamp_file| as an input for that target or any of its rules which
generate files in |out_path|.
- Add an action which depends on |inputs| and which outputs |stamp_file|;
the action should run this script and pass |out_path| and |stamp_file| as
its arguments.
The net result is that you will force |out_path| to be wiped and all
|gen_sources| to be regenerated any time any file in |inputs| changes.
See //mojo/mojom_bindings_generator.gypi for an example use case.
"""
import errno
import os
import shutil
import sys
def Main(dst_dir, stamp_file):
try:
shutil.rmtree(os.path.normpath(dst_dir))
except OSError as e:
# Ignore only "not found" errors.
if e.errno != errno.ENOENT:
raise e
with open(stamp_file, 'a'):
os.utime(stamp_file, None)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1], sys.argv[2]))
| mit | 4,978,760,312,448,803,000 | -6,052,035,027,725,673,000 | 30.377778 | 78 | 0.715297 | false |
xarisd/honcho | honcho/test/unit/test_environ.py | 3 | 8050 | # coding=utf-8
import textwrap
from ..helpers import TestCase
from honcho import environ
from honcho import compat
ENVFILE_FIXTURES = [
[
"""
FOO=bar
""",
{'FOO': 'bar'}
],
[
"""
FOO=bar
BAZ=qux
""",
{'FOO': 'bar', 'BAZ': 'qux'}
],
[
# No newline at EOF
"""
FOO=bar""",
{'FOO': 'bar'}
],
[
# Comments
"""
#commented: command
""",
{}
],
[
# Invalid characters
"""
-foo=command
""",
{}
],
[
# Single quoted
"""
MYVAR='hello"world'
""",
{'MYVAR': 'hello"world'}
],
[
# Double quoted
"""
MYVAR="hello'world"
""",
{'MYVAR': "hello'world"}
],
[
# Quotation mark surrounded
r"""
MYVAR='"surrounded"'
""",
{'MYVAR': '"surrounded"'}
],
[
# Escaped quotation mark surrounded
r"""
MYVAR=\"escaped\"
""",
{'MYVAR': '"escaped"'}
],
[
# At-sign in value
r"""
[email protected]
""",
{'MYVAR': '[email protected]'}
],
[
# Much punctuation in value
r"""
MYVAR=~pun|u@|0n$=
""",
{'MYVAR': '~pun|u@|0n$='}
],
[
# Unicode values
r"""
MYVAR=⋃ñᴉ—☪ó∂ǝ
""",
{'MYVAR': '⋃ñᴉ—☪ó∂ǝ'}
],
[
# Unicode keys
r"""
ṀẎṾẠṚ=value
""",
{}
],
[
# Quoted space in value
r"""
MYVAR='sp ace'
""",
{'MYVAR': 'sp ace'}
],
[
# Escaped characters in value
r"""
TABS='foo\tbar'
NEWLINES='foo\nbar'
DOLLAR='foo\$bar'
""",
{'TABS': 'foo\tbar',
'NEWLINES': 'foo\nbar',
'DOLLAR': 'foo\\$bar'}
],
]
PROCFILE_FIXTURES = [
[
# Simple
"""
web: command
""",
{'web': 'command'}
],
[
# Simple 2
"""
foo: python foo.py
bar: python bar.py
""",
{'foo': 'python foo.py', 'bar': 'python bar.py'}
],
[
# No newline at EOF
"""
web: command""",
{'web': 'command'}
],
[
# Comments
"""
#commented: command
""",
{}
],
[
# Invalid characters
"""
-foo: command
""",
{}
],
[
# Shell metacharacters
"""
web: sh -c "echo $FOOBAR" >/dev/null 2>&1
""",
{'web': 'sh -c "echo $FOOBAR" >/dev/null 2>&1'}
],
]
class TestEnviron(TestCase):
def test_environ_parse(self):
for content, commands in ENVFILE_FIXTURES:
content = textwrap.dedent(content)
result = environ.parse(content)
self.assertEqual(result, commands)
class TestProcfileParse(TestCase):
def test_parse_procfiles(self):
for content, processes in PROCFILE_FIXTURES:
content = textwrap.dedent(content)
p = environ.parse_procfile(content)
self.assertEqual(p.processes, processes)
def test_procfile_ordered(self):
content = textwrap.dedent("""
one: onecommand
two: twocommand
three: twocommand
four: fourcommand
""")
p = environ.parse_procfile(content)
order = [k for k in p.processes]
self.assertEqual(['one', 'two', 'three', 'four'], order)
class TestProcfile(TestCase):
def test_init(self):
p = environ.Procfile()
self.assertEqual(0, len(p.processes))
def test_add_process(self):
p = environ.Procfile()
p.add_process('foo', 'echo 123')
self.assertEqual('echo 123', p.processes['foo'])
def test_add_process_ensures_unique_name(self):
p = environ.Procfile()
p.add_process('foo', 'echo 123')
self.assertRaises(AssertionError, p.add_process, 'foo', 'echo 123')
def ep(*args, **kwargs):
return environ.expand_processes(compat.OrderedDict(args), **kwargs)
class TestExpandProcesses(TestCase):
def test_name(self):
p = ep(("foo", "some command"))
self.assertEqual(1, len(p))
self.assertEqual("foo.1", p[0].name)
def test_name_multiple(self):
p = ep(("foo", "some command"), ("bar", "another command"))
self.assertEqual(2, len(p))
self.assertEqual("foo.1", p[0].name)
self.assertEqual("bar.1", p[1].name)
def test_name_concurrency(self):
p = ep(("foo", "some command"), concurrency={"foo": 3})
self.assertEqual(3, len(p))
self.assertEqual("foo.1", p[0].name)
self.assertEqual("foo.2", p[1].name)
self.assertEqual("foo.3", p[2].name)
def test_name_concurrency_multiple(self):
p = ep(("foo", "some command"), ("bar", "another command"),
concurrency={"foo": 3, "bar": 2})
self.assertEqual(5, len(p))
self.assertEqual("foo.1", p[0].name)
self.assertEqual("foo.2", p[1].name)
self.assertEqual("foo.3", p[2].name)
self.assertEqual("bar.1", p[3].name)
self.assertEqual("bar.2", p[4].name)
def test_command(self):
p = ep(("foo", "some command"))
self.assertEqual("some command", p[0].cmd)
def test_port_not_defaulted(self):
p = ep(("foo", "some command"))
self.assertEqual({}, p[0].env)
def test_port(self):
p = ep(("foo", "some command"), port=8000)
self.assertEqual({"PORT": "8000"}, p[0].env)
def test_port_multiple(self):
p = ep(("foo", "some command"),
("bar", "another command"),
port=8000)
self.assertEqual({"PORT": "8000"}, p[0].env)
self.assertEqual({"PORT": "8100"}, p[1].env)
def test_port_from_env(self):
p = ep(("foo", "some command"),
("bar", "another command"),
env={"PORT": 8000})
self.assertEqual({"PORT": "8000"}, p[0].env)
self.assertEqual({"PORT": "8100"}, p[1].env)
def test_port_from_env_coerced_to_number(self):
p = ep(("foo", "some command"), env={"PORT": "5000"})
self.assertEqual({"PORT": "5000"}, p[0].env)
def test_port_from_env_overrides(self):
p = ep(("foo", "some command"), env={"PORT": 5000}, port=8000)
self.assertEqual({"PORT": "5000"}, p[0].env)
def test_port_concurrency(self):
p = ep(("foo", "some command"),
("bar", "another command"),
concurrency={"foo": 3, "bar": 2},
port=4000)
self.assertEqual({"PORT": "4000"}, p[0].env)
self.assertEqual({"PORT": "4001"}, p[1].env)
self.assertEqual({"PORT": "4002"}, p[2].env)
self.assertEqual({"PORT": "4100"}, p[3].env)
self.assertEqual({"PORT": "4101"}, p[4].env)
def test_quiet(self):
p = ep(("foo", "some command"), quiet=["foo", "bar"])
self.assertEqual(True, p[0].quiet)
def test_quiet_multiple(self):
p = ep(("foo", "some command"),
("bar", "another command"),
quiet=["foo"])
self.assertEqual(True, p[0].quiet)
self.assertEqual(False, p[1].quiet)
def test_env(self):
p = ep(("foo", "some command"),
env={"ANIMAL": "giraffe", "DEBUG": "false"})
self.assertEqual("giraffe", p[0].env["ANIMAL"])
self.assertEqual("false", p[0].env["DEBUG"])
def test_env_multiple(self):
p = ep(("foo", "some command"),
("bar", "another command"),
env={"ANIMAL": "giraffe", "DEBUG": "false"})
self.assertEqual("giraffe", p[0].env["ANIMAL"])
self.assertEqual("false", p[0].env["DEBUG"])
self.assertEqual("giraffe", p[1].env["ANIMAL"])
self.assertEqual("false", p[1].env["DEBUG"])
| mit | -7,420,638,664,246,773,000 | 1,712,043,351,245,498,600 | 24.522293 | 75 | 0.482406 | false |
sebadiaz/rethinkdb | test/common/vcoptparse.py | 32 | 9768 | # Copyright 2010-2012 RethinkDB, all rights reserved.
"""
vcoptparse is short for Value-Centric Option Parser. It's a tiny argument parsing library. It has
less features than optparse or argparse, but it kicks more ass.
optparse and argparse allow the client to specify the flags that should be parsed, and as an
afterthought specify what keys should appear in the options dictionary when the parse is over.
vcoptparse works the other way around: you specify the keys and how to determine the keys from the
command line. That's why it's called "value-centric".
Here is a simple example:
>>> op = OptParser()
>>> op["verbose"] = BoolFlag("--verbose")
>>> op["count"] = IntFlag("--count", 5) # Default count is 5
>>> op["infiles"] = ManyPositionalArgs()
>>> op.parse(["foo.py", "--count", "5", "file1.txt", "file2.txt"])
{'count': 5, 'verbose': False, 'infiles': ['file1.txt', 'file2.txt']}
"""
class NoValueClass(object):
pass
NoValue = NoValueClass()
class Arg(object):
pass
class OptError(StandardError):
pass
class OptParser(object):
def __init__(self):
self.parsers_by_key = {}
self.parsers_in_order = []
def __setitem__(self, key, parser):
assert isinstance(parser, Arg)
if key in self.parsers_by_key: del self[key]
assert parser not in self.parsers_by_key.values()
self.parsers_by_key[key] = parser
self.parsers_in_order.append((key, parser))
def __getitem__(self, key):
return self.parsers_by_key[key]
def __delitem__(self, key):
self.parsers_in_order.remove((key, self.parsers_by_key[key]))
del self.parsers_by_key[key]
def parse(self, args):
args = args[1:] # Cut off name of program
values = dict((key, NoValue) for key in self.parsers_by_key.keys())
def name_for_key(key):
return getattr(self.parsers_by_key[key], "name", key)
def set_value(key, new_value):
combiner = getattr(self.parsers_by_key[key], "combiner", enforce_one_combiner)
try:
values[key] = combiner(values[key], new_value)
except OptError as e:
raise OptError(str(e) % {"name": name_for_key(key)})
# Build flag table
flags = {}
for key, parser in self.parsers_in_order:
if hasattr(parser, "flags"):
for flag in parser.flags:
assert flag.startswith("-")
if flag in flags:
raise ValueError("The flag %r has two different meanings." % flag)
flags[flag] = (key, parser)
# Handle flag arguments and store positional arguments
positionals = []
while args:
arg = args.pop(0)
if arg.startswith("-"):
if arg in flags:
key, parser = flags[arg]
set_value(key, parser.flag(arg, args))
else:
raise OptError("Don't know how to handle flag %r" % arg)
else:
positionals.append(arg)
# Handle positional arguments
for key, parser in self.parsers_in_order:
if hasattr(parser, "positional"):
set_value(key, parser.positional(positionals))
if positionals:
raise OptError("Unexpected extra positional argument(s): %s" % ", ".join(repr(x) for x in positionals))
# Apply defaults
for key, parser in self.parsers_by_key.iteritems():
if values[key] is NoValue:
if hasattr(parser, "default") and parser.default is not NoValue:
values[key] = parser.default
else:
raise OptError("You need to specify a value for %r" % name_for_key(key))
return values
# Combiners (indicate how to combine repeat specifications of the same flag)
def most_recent_combiner(old, new):
return new
def enforce_one_combiner(old, new):
if old is not NoValue:
raise OptError("%(name)r should only be specified once.")
return new
def append_combiner(old, new):
if old is NoValue: old = []
return old + [new]
# Converters (indicate how to convert from string arguments to values)
def bool_converter(x):
if x.lower() in ["yes", "true", "y", "t"]: return True
elif x.lower() in ["no", "false", "n", "f"]: return False
else: raise OptError("Expected a yes/no value. Got %r." % x)
def int_converter(x):
try: return int(x)
except ValueError: raise OptError("Expected an integer. Got %r." % x)
def float_converter(x):
try: return float(x)
except ValueError: raise OptError("Expected a float. Got %r." % x)
def choice_converter(choices):
def check(x):
if x in choices: return x
else: raise OptError("Expected one of %s. Got %r." % (", ".join(choices), x))
return check
# Standard argument parsers for common situations
class BoolFlag(Arg):
def __init__(self, arg, invert=False):
assert isinstance(invert, bool)
self.flags = [arg]
self.default = invert
def flag(self, flag, args):
return not self.default
class ChoiceFlags(Arg):
def __init__(self, choices, default = NoValue):
assert all(isinstance(x, str) for x in choices)
self.flags = choices
self.default = default
def flag(self, flag, args):
return flag.lstrip("-")
class ValueFlag(Arg):
def __init__(self, name, converter = str, default = NoValue, combiner = enforce_one_combiner):
assert isinstance(name, str)
assert callable(converter)
assert callable(combiner)
self.flags = [name]
self.converter = converter
self.combiner = combiner
self.default = default
def flag(self, flag, args):
try: value = args.pop(0)
except IndexError:
raise OptError("Flag %r expects an argument." % flag)
try: value2 = self.converter(value)
except OptError as e:
raise OptError("Problem in argument to flag %r: %s" % (flag, e))
return value2
class StringFlag(ValueFlag):
def __init__(self, name, default = NoValue):
ValueFlag.__init__(self, name, str, default = default)
class IntFlag(ValueFlag):
def __init__(self, name, default = NoValue):
ValueFlag.__init__(self, name, int_converter, default = default)
class FloatFlag(ValueFlag):
def __init__(self, name, default = NoValue):
ValueFlag.__init__(self, name, float_converter, default = default)
class ChoiceFlag(ValueFlag):
def __init__(self, name, choices, default = NoValue):
ValueFlag.__init__(self, name, choice_converter(choices), default = default)
class MultiValueFlag(Arg):
def __init__(self, name, converters = [str], default = NoValue, combiner = enforce_one_combiner):
assert isinstance(name, str)
assert all(callable(x) for x in converters)
assert callable(combiner)
self.flags = [name]
self.converters = converters
self.combiner = combiner
self.default = default
def flag(self, flag, args):
new_values = ()
args_gotten = 0
for converter in self.converters:
try: value = args.pop(0)
except IndexError:
raise OptError("Flag %r expects %d argument(s), but only got %d." % (flag, len(self.converters), args_gotten))
try: value2 = converter(value)
except OptError as e:
raise OptError("Problem in argument %d to flag %r: %s" % (args_gotten + 1, flag, e))
new_values += (value2, )
args_gotten += 1
return new_values
class AllArgsAfterFlag(Arg):
def __init__(self, name, converter = str, default = NoValue):
assert isinstance(name, str)
assert callable(converter)
self.flags = [name]
self.converter = converter
self.default = default
def flag(self, flag, args):
args2 = []
for arg in args:
try: args2.append(self.converter(arg))
except OptError as e: raise OptError("For %r: %s" % (flag, e))
del args[:] # We consume all arguments remaining
return args2
class PositionalArg(Arg):
def __init__(self, name = None, converter = str, default = NoValue):
assert callable(converter)
self.name = name
self.converter = converter
self.default = default
def positional(self, args):
try: value = args.pop(0)
except IndexError:
if self.default is NoValue:
if self.name is None:
raise OptError("Too few positional arguments.")
else:
raise OptError("Too few positional arguments; need a value for %r." % self.name)
else:
return NoValue
try: value2 = self.converter(value)
except OptError as e:
if self.name is None: raise
else: raise OptError("For %r: %s" % (self.name, e))
return value2
class ManyPositionalArgs(Arg):
def __init__(self, name = None, converter = str):
assert callable(converter)
self.name = name
self.converter = converter
def positional(self, args):
args2 = []
for arg in args:
try: args2.append(self.converter(arg))
except OptError as e:
if self.name is None: raise
else: raise OptError("For %r: %s" % (self.name, e))
del args[:] # We consume all arguments remaining
return args2
| agpl-3.0 | -2,899,844,262,816,232,400 | 2,921,337,883,612,796,400 | 32.916667 | 126 | 0.586302 | false |
skython/eXe | twisted/internet/iocpreactor/process.py | 14 | 15266 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Support for IReactorProcess for the IOCP proactor.
API Stability: unstable
Maintainer: U{Justin Johnson<mailto:[email protected]>}
This code is potentially unstable. I have performed numerous tests
but couldn't get someone who was knowledgable of win32 to review it.
If you run into problems please submit a bug report to
http://twistedmatrix.com/bugs.
"""
# Win32 imports
import win32api
import win32gui
import win32con
import win32file
import win32pipe
import win32process
import win32security
from win32event import CreateEvent, SetEvent, WaitForSingleObject
from win32event import MsgWaitForMultipleObjects, WAIT_OBJECT_0
from win32event import WAIT_TIMEOUT, INFINITE, QS_ALLINPUT, QS_POSTMESSAGE
from win32event import QS_ALLEVENTS
# Zope & Twisted imports
from zope.interface import implements
from twisted.internet import error
from twisted.python import failure, components
from twisted.python.win32 import cmdLineQuote
from twisted.internet.interfaces import IProcessTransport, IConsumer
# sibling imports
import ops
import process_waiter
# System imports
import os
import sys
import time
import itertools
# Counter for uniquely identifying pipes
counter = itertools.count(1)
class Process(object):
"""A process that integrates with the Twisted event loop.
See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/dllproc/base/creating_a_child_process_with_redirected_input_and_output.asp
for more info on how to create processes in Windows and access their
stdout/err/in. Another good source is http://www.informit.com/articles/article.asp?p=362660&seqNum=2.
Issues:
If your subprocess is a python program, you need to:
- Run python.exe with the '-u' command line option - this turns on
unbuffered I/O. Buffering stdout/err/in can cause problems, see e.g.
http://support.microsoft.com/default.aspx?scid=kb;EN-US;q1903
- (is this still true?) If you don't want Windows messing with data passed over
stdin/out/err, set the pipes to be in binary mode::
import os, sys, mscvrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
"""
implements(IProcessTransport, IConsumer)
# I used this size because abstract.ConnectedSocket did. I don't
# know why though.
bufferSize = 2**2**2**2
# Per http://www-128.ibm.com/developerworks/linux/library/l-rt4/,
# an extra 24 bytes are needed to handle write header. I haven't seen
# any problems not having the extra 24 bytes though, so I'm not
# adding it to the size. I comment here just in case it is discovered
# to be necessary down the road.
pipeBufferSize = bufferSize
def __init__(self, reactor, protocol, command, args, environment, path):
self.reactor = reactor
self.protocol = protocol
self.outBuffer = reactor.AllocateReadBuffer(self.bufferSize)
self.errBuffer = reactor.AllocateReadBuffer(self.bufferSize)
# This is the buffer for *reading* stdin, which is only done to
# determine if the other end of the pipe was closed.
self.inBuffer = reactor.AllocateReadBuffer(self.bufferSize)
# IO operation classes
self.readOutOp = ops.ReadOutOp(self)
self.readErrOp = ops.ReadErrOp(self)
self.readInOp = ops.ReadInOp(self)
self.writeInOp = ops.WriteInOp(self)
self.writeBuffer = ""
self.writing = False
self.finished = False
self.offset = 0
self.writeBufferedSize = 0
self.closingStdin = False
self.closedStdin = False
self.closedStdout = False
self.closedStderr = False
# Stdio handles
self.hChildStdinRd = None
self.hChildStdinWr = None
self.hChildStdinWrDup = None
self.hChildStdoutRd = None
self.hChildStdoutWr = None
self.hChildStdoutRdDup = None
self.hChildStderrRd = None
self.hChildStderrWr = None
self.hChildStderrRdDup = None
self.closedNotifies = 0 # increments to 3 (for stdin, stdout, stderr)
self.closed = False # set to true when all 3 handles close
self.exited = False # set to true when WFMO thread gets signalled proc handle. See doWaitForProcessExit.
# Set the bInheritHandle flag so pipe handles are inherited.
saAttr = win32security.SECURITY_ATTRIBUTES()
saAttr.bInheritHandle = 1
currentPid = win32api.GetCurrentProcess() # -1 which stands for current process
self.pid = os.getpid() # unique pid for pipe naming
# Create a pipe for the child process's STDOUT.
self.stdoutPipeName = r"\\.\pipe\twisted-iocp-stdout-%d-%d-%d" % (self.pid, counter.next(), time.time())
self.hChildStdoutRd = win32pipe.CreateNamedPipe(
self.stdoutPipeName,
win32con.PIPE_ACCESS_INBOUND | win32con.FILE_FLAG_OVERLAPPED, # open mode
win32con.PIPE_TYPE_BYTE, # pipe mode
1, # max instances
self.pipeBufferSize, # out buffer size
self.pipeBufferSize, # in buffer size
0, # timeout
saAttr)
self.hChildStdoutWr = win32file.CreateFile(
self.stdoutPipeName,
win32con.GENERIC_WRITE,
win32con.FILE_SHARE_READ|win32con.FILE_SHARE_WRITE,
saAttr,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_OVERLAPPED,
0);
# Create noninheritable read handle and close the inheritable read
# handle.
self.hChildStdoutRdDup = win32api.DuplicateHandle(
currentPid, self.hChildStdoutRd,
currentPid, 0,
0,
win32con.DUPLICATE_SAME_ACCESS)
win32api.CloseHandle(self.hChildStdoutRd);
self.hChildStdoutRd = self.hChildStdoutRdDup
# Create a pipe for the child process's STDERR.
self.stderrPipeName = r"\\.\pipe\twisted-iocp-stderr-%d-%d-%d" % (self.pid, counter.next(), time.time())
self.hChildStderrRd = win32pipe.CreateNamedPipe(
self.stderrPipeName,
win32con.PIPE_ACCESS_INBOUND | win32con.FILE_FLAG_OVERLAPPED, # open mode
win32con.PIPE_TYPE_BYTE, # pipe mode
1, # max instances
self.pipeBufferSize, # out buffer size
self.pipeBufferSize, # in buffer size
0, # timeout
saAttr)
self.hChildStderrWr = win32file.CreateFile(
self.stderrPipeName,
win32con.GENERIC_WRITE,
win32con.FILE_SHARE_READ|win32con.FILE_SHARE_WRITE,
saAttr,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_OVERLAPPED,
0);
# Create noninheritable read handle and close the inheritable read
# handle.
self.hChildStderrRdDup = win32api.DuplicateHandle(
currentPid, self.hChildStderrRd,
currentPid, 0,
0,
win32con.DUPLICATE_SAME_ACCESS)
win32api.CloseHandle(self.hChildStderrRd)
self.hChildStderrRd = self.hChildStderrRdDup
# Create a pipe for the child process's STDIN. This one is opened
# in duplex mode so we can read from it too in order to detect when
# the child closes their end of the pipe.
self.stdinPipeName = r"\\.\pipe\twisted-iocp-stdin-%d-%d-%d" % (self.pid, counter.next(), time.time())
self.hChildStdinWr = win32pipe.CreateNamedPipe(
self.stdinPipeName,
win32con.PIPE_ACCESS_DUPLEX | win32con.FILE_FLAG_OVERLAPPED, # open mode
win32con.PIPE_TYPE_BYTE, # pipe mode
1, # max instances
self.pipeBufferSize, # out buffer size
self.pipeBufferSize, # in buffer size
0, # timeout
saAttr)
self.hChildStdinRd = win32file.CreateFile(
self.stdinPipeName,
win32con.GENERIC_READ,
win32con.FILE_SHARE_READ|win32con.FILE_SHARE_WRITE,
saAttr,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_OVERLAPPED,
0);
# Duplicate the write handle to the pipe so it is not inherited.
self.hChildStdinWrDup = win32api.DuplicateHandle(
currentPid, self.hChildStdinWr,
currentPid, 0,
0,
win32con.DUPLICATE_SAME_ACCESS)
win32api.CloseHandle(self.hChildStdinWr)
self.hChildStdinWr = self.hChildStdinWrDup
# set the info structure for the new process. This is where
# we tell the process to use the pipes for stdout/err/in.
StartupInfo = win32process.STARTUPINFO()
StartupInfo.hStdOutput = self.hChildStdoutWr
StartupInfo.hStdError = self.hChildStderrWr
StartupInfo.hStdInput = self.hChildStdinRd
StartupInfo.dwFlags = win32process.STARTF_USESTDHANDLES
# create the process
cmdline = ' '.join([cmdLineQuote(a) for a in args])
self.hProcess, hThread, dwPid, dwTid = win32process.CreateProcess(
command, # name
cmdline, # command line
None, # process security attributes
None, # primary thread security attributes
1, # handles are inherited
0, # creation flags
environment, # if NULL, use parent environment
path, # current directory
StartupInfo) # STARTUPINFO pointer
# close handles which only the child will use
win32file.CloseHandle(self.hChildStderrWr)
win32file.CloseHandle(self.hChildStdoutWr)
win32file.CloseHandle(self.hChildStdinRd)
# Begin reading on stdout and stderr, before we have output on them.
self.readOutOp.initiateOp(self.hChildStdoutRd, self.outBuffer)
self.readErrOp.initiateOp(self.hChildStderrRd, self.errBuffer)
# Read stdin which was opened in duplex mode so we can detect when
# the child closed their end of the pipe.
self.readInOp.initiateOp(self.hChildStdinWr, self.inBuffer)
# When the process is done, call connectionLost().
# This function returns right away. Note I call this after
# protocol.makeConnection to ensure that the protocol doesn't
# have processEnded called before protocol.makeConnection.
self.reactor.processWaiter.beginWait(self.reactor, self.hProcess, self)
# notify protocol by calling protocol.makeConnection and specifying
# ourself as the transport.
self.protocol.makeConnection(self)
def signalProcess(self, signalID):
if signalID in ("INT", "TERM", "KILL"):
win32process.TerminateProcess(self.hProcess, 1)
def startWriting(self):
if not self.writing:
self.writing = True
b = buffer(self.writeBuffer, self.offset, self.offset + self.bufferSize)
self.writeInOp.initiateOp(self.hChildStdinWr, b)
def stopWriting(self):
self.writing = False
def writeDone(self, bytes):
self.writing = False
self.offset += bytes
self.writeBufferedSize -= bytes
if self.offset == len(self.writeBuffer):
self.writeBuffer = ""
self.offset = 0
if self.writeBuffer == "":
self.writing = False
# If there's nothing else to write and we're closing,
# do it now.
if self.closingStdin:
self._closeStdin()
self.connectionLostNotify()
else:
self.startWriting()
def write(self, data):
"""Write data to the process' stdin."""
self.writeBuffer += data
self.writeBufferedSize += len(data)
if not self.writing:
self.startWriting()
def writeSequence(self, seq):
"""Write a list of strings to the physical connection.
If possible, make sure that all of the data is written to
the socket at once, without first copying it all into a
single string.
"""
self.write("".join(seq))
def closeStdin(self):
"""Close the process' stdin."""
if not self.closingStdin:
self.closingStdin = True
if not self.writing:
self._closeStdin()
self.connectionLostNotify()
def _closeStdin(self):
if hasattr(self, "hChildStdinWr"):
win32file.CloseHandle(self.hChildStdinWr)
del self.hChildStdinWr
self.closingStdin = False
self.closedStdin = True
def closeStderr(self):
if hasattr(self, "hChildStderrRd"):
win32file.CloseHandle(self.hChildStderrRd)
del self.hChildStderrRd
self.closedStderr = True
self.connectionLostNotify()
def closeStdout(self):
if hasattr(self, "hChildStdoutRd"):
win32file.CloseHandle(self.hChildStdoutRd)
del self.hChildStdoutRd
self.closedStdout = True
self.connectionLostNotify()
def loseConnection(self):
"""Close the process' stdout, in and err."""
self.closeStdin()
self.closeStdout()
self.closeStderr()
def outConnectionLost(self):
self.closeStdout() # in case process closed it, not us
self.protocol.outConnectionLost()
def errConnectionLost(self):
self.closeStderr() # in case process closed it
self.protocol.errConnectionLost()
def inConnectionLost(self):
self._closeStdin()
self.protocol.inConnectionLost()
self.connectionLostNotify()
def connectionLostNotify(self):
"""Will be called 3 times, for stdout/err/in."""
self.closedNotifies = self.closedNotifies + 1
if self.closedNotifies == 3:
self.closed = 1
if self.exited:
self.connectionLost()
def processEnded(self):
self.exited = True
# If all 3 stdio handles are closed, call connectionLost
if self.closed:
self.connectionLost()
def connectionLost(self, reason=None):
"""Shut down resources."""
# Get the exit status and notify the protocol
exitCode = win32process.GetExitCodeProcess(self.hProcess)
if exitCode == 0:
err = error.ProcessDone(exitCode)
else:
err = error.ProcessTerminated(exitCode)
self.protocol.processEnded(failure.Failure(err))
## IConsumer
def registerProducer(self, producer, streaming):
pass
def unregisterProducer(self):
pass
components.backwardsCompatImplements(Process)
| gpl-2.0 | -7,430,044,238,898,812,000 | 946,524,447,457,691,800 | 37.844784 | 147 | 0.627931 | false |
endlessm/chromium-browser | third_party/chromite/cli/cros/cros_deploy_unittest.py | 1 | 3712 | # -*- coding: utf-8 -*-
# Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module tests the cros deploy command."""
from __future__ import print_function
import sys
from chromite.cli import command_unittest
from chromite.cli import deploy
from chromite.cli.cros import cros_deploy
from chromite.lib import commandline
from chromite.lib import cros_test_lib
from chromite.lib import remote_access
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
# pylint: disable=protected-access
class MockDeployCommand(command_unittest.MockCommand):
"""Mock out the deploy command."""
TARGET = 'chromite.cli.cros.cros_deploy.DeployCommand'
TARGET_CLASS = cros_deploy.DeployCommand
COMMAND = 'deploy'
def __init__(self, *args, **kwargs):
command_unittest.MockCommand.__init__(self, *args, **kwargs)
def Run(self, inst):
command_unittest.MockCommand.Run(self, inst)
class CrosDeployTest(cros_test_lib.MockTempDirTestCase,
cros_test_lib.OutputTestCase):
"""Test calling `cros deploy` with various arguments.
These tests just check that arguments as specified on the command
line are properly passed through to deploy. Testing the
actual update flow should be done in the deploy unit tests.
"""
DEVICE = remote_access.TEST_IP
PACKAGES = ['foo', 'bar']
def SetupCommandMock(self, cmd_args):
"""Setup comand mock."""
self.cmd_mock = MockDeployCommand(
cmd_args, base_args=['--cache-dir', self.tempdir])
self.StartPatcher(self.cmd_mock)
def setUp(self):
"""Patches objects."""
self.cmd_mock = None
self.deploy_mock = self.PatchObject(deploy, 'Deploy', autospec=True)
self.run_inside_chroot_mock = self.PatchObject(
commandline, 'RunInsideChroot', autospec=True)
def VerifyDeployParameters(self, device, packages, **kwargs):
"""Verifies the arguments passed to Deployer.Run().
This function helps verify that command line specifications are
parsed properly.
Args:
device: expected device hostname.
packages: expected packages list.
kwargs: keyword arguments expected in the call to Deployer.Run().
Arguments unspecified here are checked against their default
value for `cros deploy`.
"""
deploy_args, deploy_kwargs = self.deploy_mock.call_args
self.assertEqual(device, deploy_args[0].hostname)
self.assertListEqual(packages, deploy_args[1])
# `cros deploy` default options. Must match AddParser().
expected_kwargs = {
'board': None,
'strip': True,
'emerge': True,
'root': '/',
'clean_binpkg': True,
'emerge_args': None,
'ssh_private_key': None,
'ping': True,
'dry_run': False,
'force': False,
'update': False,
'deep': False,
'deep_rev': False}
# Overwrite defaults with any variations in this test.
expected_kwargs.update(kwargs)
self.assertDictEqual(expected_kwargs, deploy_kwargs)
def testDefaults(self):
"""Tests `cros deploy` default values."""
self.SetupCommandMock([self.DEVICE] + self.PACKAGES)
self.cmd_mock.inst.Run()
self.assertTrue(self.run_inside_chroot_mock.called)
self.VerifyDeployParameters(self.DEVICE, self.PACKAGES)
def testDeployError(self):
"""Tests that DeployErrors are passed through."""
with self.OutputCapturer():
self.SetupCommandMock([self.DEVICE] + self.PACKAGES)
self.deploy_mock.side_effect = deploy.DeployError
with self.assertRaises(deploy.DeployError):
self.cmd_mock.inst.Run()
| bsd-3-clause | 8,417,698,370,178,666,000 | 660,989,584,340,603,900 | 32.142857 | 72 | 0.688039 | false |
msteinhoff/foption-bot | src/python/interaction/irc/commands.py | 1 | 42748 | # -*- coding: UTF-8 -*-
"""
$Id$
$URL$
Copyright (c) 2010 foption
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@since Jan 14, 2011
@author Mario Steinhoff
"""
__version__ = '$Rev$'
list = [
'Nick', 'User', 'Mode', 'Quit',
'Join', 'Part', 'Topic', 'Names', 'Invite', 'Kick',
'Privmsg', 'Notice',
'Motd', 'Who', 'Whois',
'Ping', 'Pong',
'WelcomeReply', 'YourHostReply', 'CreatedReply', 'MyInfoReply',
'BounceReply',
'MotdStartReply', 'MotdReply', 'MotdEndReply',
'AwayReply', 'UniqueOpIsReply', 'ChannelModeIsReply', 'InvitingReply',
'TopicReply', 'NoTopicReply',
'WhoisUserReply', 'WhoisServerReply', 'WhoisOperatorReply',
'WhoisIdleReply', 'WhoisChannelsReply', 'quakenet.WhoisAuthReply',
'WhoisEndReply',
'WhoReply', 'WhoEndReply',
'NamesReply', 'NamesEndReply',
'BanListReply', 'BanListEndReply',
'InviteListReply', 'InviteListEndReply',
'ExceptListReply', 'ExceptListEndReply'
'NoSuchServerError', 'TooManyTargetsError', 'NoOriginError',
'NoRecipientError', 'NoTextToSendError', 'NoToplevelError',
'WildTopLevelError', 'NoMotdError', 'UnavailableResourceError',
'NeedMoreParamsError', 'AlreadyRegisteredError', 'UnknownModeError',
'RestrictedError', 'UsersDontMachtError',
'NoSuchNickError', 'NoNicknameGivenError', 'ErroneusNicknameError',
'NicknameInUseError', 'NickCollisionError',
'NoSuchChannelError', 'KeySetError', 'ChannelIsFullError',
'InviteOnlyChannelError', 'BannedFromChannelError', 'BadChannelKeyError',
'BadChannelMaskError', 'NoChannelModesError', 'CannotSendToChannelError',
'TooManyChannelsError', 'UserNotInChannelError', 'NotOnChannelError',
'UserOnChannelError', 'ChanOpPrivilegesNeededError',
]
import string
import random
from core.bot import BotError
from interaction.irc.message import Event
#-------------------------------------------------------------------------------
# Exceptions
#-------------------------------------------------------------------------------
class CommandError(BotError): pass
class MissingArgumentError(CommandError): pass
#-------------------------------------------------------------------------------
# Business Logic
#-------------------------------------------------------------------------------
class Command(object):
"""
High-level API for IRC commands.
"""
def __init__(self, client):
self.client = client
def get_receiver(self):
return self.Receiver(self.client)
def get_sender(self):
return self.Sender(self.client)
class Receiver(object):
"""
IRC command receiver.
Respond to incoming IRC events and dispatch them to all
registered listeners.
"""
def __init__(self, client):
"""
Initialize the receiver object.
@param client: The IRC client instance.
"""
self._listener = []
self.client = client
def add_listener(self, callback):
"""
Add a listener to the receiver instance.
The callback function is called everytime a receive event
occured.
@param callback: A pointer to the callback function.
"""
self._listener.append(callback)
def receive(self, event):
"""
Push a receive event to the command handler.
This will first call the internal command logic and then notice
additional listeners about the event. The event itself can be
modified at any time, altough this is not encouraged.
@param event: The event object.
"""
self._receive(event)
[callback(event) for callback in self._listener]
def _receive(self, event):
"""
Implement general command logic for receive events.
This method can be overriden in sub-classes to implement
module-independent logic.
@param event: The event.
"""
pass
class Sender(object):
def __init__(self, client):
self.client = client
def check_attr(self, attr):
if not hasattr(self, attr):
raise MissingArgumentError(attr)
def create_event(self, container, parameters):
return Event(None, container.token, parameters)
def send(self):
"""
Push a send event to the command handler.
This enables a high-level API for IRC commands. Each command
handler can define class attributes for clean user input and
format input data according to the IRC specifications.
"""
self.client.send_event(self._send())
def _send(self):
"""
Implement general command logic for receive events.
@return An event to send.
"""
pass
"""-------------------------------------------------------------------------
Section: 3.1 Connection Registration
----------------------------------------------------------------------------
The commands described here are used to register a connection with an
IRC server as a user as well as to correctly disconnect.
A "PASS" command is not required for a client connection to be
registered, but it MUST precede the latter of the NICK/USER
combination (for a user connection) or the SERVICE command (for a
service connection). The RECOMMENDED order for a client to register
is as follows:
1. Pass message
2. Nick message 2. Service message
3. User message
Upon success, the client will receive an RPL_WELCOME (for users) or
RPL_YOURESERVICE (for services) message indicating that the
connection is now registered and known the to the entire IRC network.
The reply message MUST contain the full client identifier upon which
it was registered.
----------------------------------------------------------------------------
3.1.1 Password message .................................. 10 - not needed
3.1.2 Nick message ...................................... 10 - needed
3.1.3 User message ...................................... 11 - needed
3.1.4 Oper message ...................................... 12 - not needed
3.1.5 User mode message ................................. 12 - needed
3.1.6 Service message ................................... 13 - not needed
3.1.7 Quit .............................................. 14 - needed
3.1.8 Squit ............................................. 15 - not needed
-------------------------------------------------------------------------"""
class Nick(Command):
"""
Command: NICK
Parameters: <nickname>
NICK command is used to give user a nickname or change the existing
one.
Numeric Replies:
ERR_NONICKNAMEGIVEN ERR_ERRONEUSNICKNAME
ERR_NICKNAMEINUSE ERR_NICKCOLLISION
ERR_UNAVAILRESOURCE ERR_RESTRICTED
"""
token = 'NICK'
class Receiver(Command.Receiver):
def _receive(self, event):
"""
Update the client's identity with the current nickname.
"""
if event.source.nickname == self.client.me.source.nickname:
self.client.me.rename(event.parameter[0])
class Sender(Command.Sender):
def _send(self):
"""
Send a request to set/change the client's nickname.
"""
self.check_attr('nickname')
return self.create_event(Nick, [self.nickname])
class User(Command):
"""
Command: USER
Parameters: <user> <mode> <unused> <realname>
The USER command is used at the beginning of connection to specify
the username, hostname and realname of a new user.
The <mode> parameter should be a numeric, and can be used to
automatically set user modes when registering with the server. This
parameter is a bitmask, with only 2 bits having any signification: if
the bit 2 is set, the user mode 'w' will be set and if the bit 3 is
set, the user mode 'i' will be set. (See Section 3.1.5 "User
Modes").
The <realname> may contain space characters.
Numeric Replies:
ERR_NEEDMOREPARAMS ERR_ALREADYREGISTRED
"""
token ='USER'
class Sender(Command.Sender):
def _send(self):
"""
Register with the IRC server.
"""
self.check_attr('ident')
self.check_attr('realname')
return self.create_event(User, [self.ident, '0', '*', '{0}'.format(self.realname)])
class Mode(Command):
"""
Because user mode message and channel mode are using the same command,
user mode and channel mode logic are implemented in the same class at
the user section.
Command: MODE
Parameters: <nickname>
*( ( "+" / "-" ) *( "i" / "w" / "o" / "O" / "r" ) )
The user MODE's are typically changes which affect either how the
client is seen by others or what 'extra' messages the client is sent.
[...] If no other parameter is given, then the server will return
the current settings for the nick.
The available modes are as follows:
a - user is flagged as away;
i - marks a users as invisible;
w - user receives wallops;
r - restricted user connection;
o - operator flag;
O - local operator flag;
s - marks a user for receipt of server notices.
Additional modes may be available later on.
[...]
Numeric Replies:
ERR_NEEDMOREPARAMS ERR_USERSDONTMATCH
ERR_UMODEUNKNOWNFLAG RPL_UMODEIS
[...]
Command: MODE
Parameters: <channel> *( ( "-" / "+" ) *<modes> *<modeparams> )
The MODE command is provided so that users may query and change the
characteristics of a channel. For more details on available modes
and their uses, see "Internet Relay Chat: Channel Management" [IRC-
CHAN]. Note that there is a maximum limit of three (3) changes per
command for modes that take a parameter.
Numeric Replies:
ERR_NEEDMOREPARAMS ERR_KEYSET
ERR_NOCHANMODES ERR_CHANOPRIVSNEEDED
ERR_USERNOTINCHANNEL ERR_UNKNOWNMODE
RPL_CHANNELMODEIS
RPL_BANLIST RPL_ENDOFBANLIST
RPL_EXCEPTLIST RPL_ENDOFEXCEPTLIST
RPL_INVITELIST RPL_ENDOFINVITELIST
RPL_UNIQOPIS
"""
token = 'MODE'
class Receiver(Command.Receiver):
def _receive(self, event):
pass
class Sender(Command.Sender):
def _send(self):
pass
class Quit(Command):
"""
3.1.7 Quit
Command: QUIT
Parameters: [ <Quit Message> ]
A client session is terminated with a quit message. The server
acknowledges this by sending an ERROR message to the client.
Numeric Replies:
None.
Example:
QUIT :Gone to have lunch ; Preferred message format.
:[email protected] QUIT :Gone to have lunch ; User
syrk has quit IRC to have lunch.
"""
token = 'QUIT'
class Receiver(Command.Receiver):
def _receive(self, event):
pass
class Sender(Command.Sender):
def _send(self):
"""
Send a quit command with a optional quit message.
@param message: The quit message.
"""
parameter = []
if hasattr(self, 'message') and self.message is not None:
parameter.append(self.message)
return self.create_event(Quit, parameter)
"""
----------------------------------------------------------------------------
Section: 3.2 Channel operations
----------------------------------------------------------------------------
This group of messages is concerned with manipulating channels, their
properties (channel modes), and their contents (typically users).
For this reason, these messages SHALL NOT be made available to
services.
All of these messages are requests which will or will not be granted
by the server. The server MUST send a reply informing the user
whether the request was granted, denied or generated an error. When
the server grants the request, the message is typically sent back
(eventually reformatted) to the user with the prefix set to the user
itself.
----------------------------------------------------------------------------
3.2.1 Join message ...................................... 16 - needed
3.2.2 Part message ...................................... 17 - needed
3.2.3 Channel mode message .............................. 18 - needed
3.2.4 Topic message ..................................... 19 - needed
3.2.5 Names message ..................................... 20 - needed
3.2.6 List message ...................................... 21 - not needed
3.2.7 Invite message .................................... 21 - not needed (maybe implemented in the future)
3.2.8 Kick command ...................................... 22 - needed
-------------------------------------------------------------------------"""
class Join(Command):
"""
Command: JOIN
Parameters: ( <channel> *( "," <channel> ) [ <key> *( "," <key> ) ] )
/ "0"
The JOIN command is used by a user to request to start listening to
the specific channel. Servers MUST be able to parse arguments in the
form of a list of target, but SHOULD NOT use lists when sending JOIN
messages to clients.
Once a user has joined a channel, he receives information about
all commands his server receives affecting the channel. This
includes JOIN, MODE, KICK, PART, QUIT and of course PRIVMSG/NOTICE.
This allows channel members to keep track of the other channel
members, as well as channel modes.
If a JOIN is successful, the user receives a JOIN message as
confirmation and is then sent the channel's topic (using RPL_TOPIC) and
the list of users who are on the channel (using RPL_NAMREPLY), which
MUST include the user joining.
[...]
Numeric Replies:
ERR_NEEDMOREPARAMS ERR_BANNEDFROMCHAN
ERR_INVITEONLYCHAN ERR_BADCHANNELKEY
ERR_CHANNELISFULL ERR_BADCHANMASK
ERR_NOSUCHCHANNEL ERR_TOOMANYCHANNELS
ERR_TOOMANYTARGETS ERR_UNAVAILRESOURCE
RPL_TOPIC
"""
token = 'JOIN'
class Sender(Command.Sender):
def _send(self):
"""
Join a channel.
@param channels: The channel names.
@param keys: The optional channel keys.
"""
if hasattr(self, 'channels') and self.channels is None:
parameter = ['0']
else:
parameter = [','.join(self.channels)]
if hasattr(self, 'keys') and self.keys is not None:
parameter.append(','.join(self.keys))
return self.create_event(Join, parameter)
class Part(Command):
"""
Command: PART
Parameters: <channel> *( "," <channel> ) [ <Part Message> ]
The PART command causes the user sending the message to be removed
from the list of active members for all given channels listed in the
parameter string. If a "Part Message" is given, this will be sent
instead of the default message, the nickname. This request is always
granted by the server.
Servers MUST be able to parse arguments in the form of a list of
target, but SHOULD NOT use lists when sending PART messages to
clients.
Numeric Replies:
ERR_NEEDMOREPARAMS ERR_NOSUCHCHANNEL
ERR_NOTONCHANNEL
"""
token = 'PART'
class Sender(Command.Sender):
def _send(self):
"""
Part a channel.
@param channel: The channel name.
@param message: The optional part message.
"""
self.check_attr('channel')
parameter = [self.channel]
if self.message is not None:
parameter.append(self.message)
return self.create_event(Part, parameter)
class Topic(Command):
"""
Command: TOPIC
Parameters: <channel> [ <topic> ]
The TOPIC command is used to change or view the topic of a channel.
The topic for channel <channel> is returned if there is no <topic>
given. If the <topic> parameter is present, the topic for that
channel will be changed, if this action is allowed for the user
requesting it. If the <topic> parameter is an empty string, the
topic for that channel will be removed.
Numeric Replies:
ERR_NEEDMOREPARAMS ERR_NOTONCHANNEL
RPL_NOTOPIC RPL_TOPIC
ERR_CHANOPRIVSNEEDED ERR_NOCHANMODES
"""
token = 'TOPIC'
class Sender(Command.Sender):
def _send(self):
"""
Get/set a channels topic.
"""
self.check_attr('channel')
parameter = [self.channel]
if hasattr(self, 'topic') and self.topic is not None:
parameter.append(self.topic)
return self.create_event(Topic, parameter)
class Names(Command):
"""
Command: NAMES
Parameters: [ <channel> *( "," <channel> ) [ <target> ] ]
By using the NAMES command, a user can list all nicknames that are
visible to him. For more details on what is visible and what is not,
see "Internet Relay Chat: Channel Management" [IRC-CHAN]. The
<channel> parameter specifies which channel(s) to return information
about. There is no error reply for bad channel names.
If no <channel> parameter is given, a list of all channels and their
occupants is returned. At the end of this list, a list of users who
are visible but either not on any channel or not on a visible channel
are listed as being on `channel' "*".
If the <target> parameter is specified, the request is forwarded to
that server which will generate the reply.
Wildcards are allowed in the <target> parameter.
Numerics:
ERR_TOOMANYMATCHES ERR_NOSUCHSERVER
RPL_NAMREPLY RPL_ENDOFNAMES
"""
token = 'NAMES'
class Sender(Command.Sender):
def _send(self):
"""
Request a NAMES list.
"""
self.check_attr('channels')
return self.create_event(Names, [','.join(self.channels)])
class Invite(Command):
"""
Command: INVITE
Parameters: <nickname> <channel>
The INVITE command is used to invite a user to a channel. The
parameter <nickname> is the nickname of the person to be invited to
the target channel <channel>. There is no requirement that the
channel the target user is being invited to must exist or be a valid
channel. However, if the channel exists, only members of the channel
are allowed to invite other users. When the channel has invite-only
flag set, only channel operators may issue INVITE command.
Only the user inviting and the user being invited will receive
notification of the invitation. Other channel members are not
notified. (This is unlike the MODE changes, and is occasionally the
source of trouble for users.)
Numeric Replies:
ERR_NEEDMOREPARAMS ERR_NOSUCHNICK
ERR_NOTONCHANNEL ERR_USERONCHANNEL
ERR_CHANOPRIVSNEEDED
RPL_INVITING RPL_AWAY
"""
token = 'INVITE'
class Sender(Command.Sender):
def _send(self):
self.check_attr('nickname')
self.check_attr('channel')
return self.create_event(Invite, [self.nickname, self.channel])
class Kick(Command):
"""
Command: KICK
Parameters: <channel> *( "," <channel> ) <user> *( "," <user> )
[<comment>]
The KICK command can be used to request the forced removal of a user
from a channel. It causes the <user> to PART from the <channel> by
force. For the message to be syntactically correct, there MUST be
either one channel parameter and multiple user parameter, or as many
channel parameters as there are user parameters. If a "comment" is
given, this will be sent instead of the default message, the nickname
of the user issuing the KICK.
The server MUST NOT send KICK messages with multiple channels or
users to clients. This is necessarily to maintain backward
compatibility with old client software.
Numeric Replies:
ERR_NEEDMOREPARAMS ERR_NOSUCHCHANNEL
ERR_BADCHANMASK ERR_CHANOPRIVSNEEDED
ERR_USERNOTINCHANNEL ERR_NOTONCHANNEL
"""
token = 'KICK'
class Sender(Command.Sender):
def _send(self):
self.check_attr('channels')
self.check_attr('users')
parameter = [','.join(self.channels), ','.join(self.users)]
if hasattr(self, 'message') and self.message is not None:
parameter.append(self.message)
return self.create_event(Kick, parameter)
"""
----------------------------------------------------------------------------
Section: 3.3 Sending messages
----------------------------------------------------------------------------
The main purpose of the IRC protocol is to provide a base for clients
to communicate with each other. PRIVMSG, NOTICE and SQUERY
(described in Section 3.5 on Service Query and Commands) are the only
messages available which actually perform delivery of a text message
from one client to another - the rest just make it possible and try
to ensure it happens in a reliable and structured manner.
----------------------------------------------------------------------------
3.3.1 Private messages .................................. 23 - needed
3.3.2 Notice ............................................ 24 - needed
-------------------------------------------------------------------------"""
class Privmsg(Command):
"""
Command: PRIVMSG
Parameters: <msgtarget> <text to be sent>
PRIVMSG is used to send private messages between users, as well as to
send messages to channels. <msgtarget> is usually the nickname of
the recipient of the message, or a channel name.
The <msgtarget> parameter may also be a host mask (#<mask>) or server
mask ($<mask>). In both cases the server will only send the PRIVMSG
to those who have a server or host matching the mask. The mask MUST
have at least 1 (one) "." in it and no wildcards following the last
".". This requirement exists to prevent people sending messages to
"#*" or "$*", which would broadcast to all users. Wildcards are the
'*' and '?' characters. This extension to the PRIVMSG command is
only available to operators.
Numeric Replies:
ERR_NORECIPIENT ERR_NOTEXTTOSEND
ERR_CANNOTSENDTOCHAN ERR_NOTOPLEVEL
ERR_WILDTOPLEVEL ERR_TOOMANYTARGETS
ERR_NOSUCHNICK
RPL_AWAY
"""
token = 'PRIVMSG'
class Receiver(Command.Receiver):
def _receive(self, event):
if not event.parameter[0].startswith('#') and event.parameter[1] == 'fotzenscheisse':
self.client.stop()
class Sender(Command.Sender):
def _send(self):
self.check_attr('target')
self.check_attr('text')
return self.create_event(Privmsg, [self.target, self.text])
class Notice(Command):
"""
Command: NOTICE
Parameters: <msgtarget> <text>
The NOTICE command is used similarly to PRIVMSG. The difference
between NOTICE and PRIVMSG is that automatic replies MUST NEVER be
sent in response to a NOTICE message. This rule applies to servers
too - they MUST NOT send any error reply back to the client on
receipt of a notice. The object of this rule is to avoid loops
between clients automatically sending something in response to
something it received.
This command is available to services as well as users.
This is typically used by services, and automatons (clients with
either an AI or other interactive program controlling their actions).
See PRIVMSG for more details on replies and examples.
"""
token = 'NOTICE'
class Sender(Command.Sender):
def _send(self):
self.check_attr('target')
self.check_attr('text')
return self.create_event(Notice, [self.target, self.text])
"""
----------------------------------------------------------------------------
Section: 3.4 Server queries and commands
----------------------------------------------------------------------------
3.4.1 Motd message ...................................... 25 - needed
3.4.2 Lusers message .................................... 25 - not needed
3.4.3 Version message ................................... 26 - not needed
3.4.4 Stats message ..................................... 26 - not needed
3.4.5 Links message ..................................... 27 - not needed
3.4.6 Time message ...................................... 28 - not needed
3.4.7 Connect message ................................... 28 - not needed
3.4.8 Trace message ..................................... 29 - not needed
3.4.9 Admin command ..................................... 30 - not needed
3.4.10 Info command ...................................... 31 - not needed
-------------------------------------------------------------------------"""
class Motd(Command):
"""
Command: MOTD
Parameters: [ <target> ]
The MOTD command is used to get the "Message Of The Day" of the given
server, or current server if <target> is omitted.
Wildcards are allowed in the <target> parameter.
Numeric Replies:
RPL_MOTDSTART RPL_MOTD
RPL_ENDOFMOTD ERR_NOMOTD
"""
token = 'MOTD'
class Sender(Command.Sender):
def _send(self):
parameter = []
if hasattr(self, 'target') and self.target is not None:
parameter.append(self.target)
return self.create_event(Motd, parameter)
"""
----------------------------------------------------------------------------
Section: 3.5 Service query and commands
----------------------------------------------------------------------------
3.5.1 Servlist message .................................. 31 - not needed
3.5.2 Squery ............................................ 32 - not needed
-------------------------------------------------------------------------"""
"""
----------------------------------------------------------------------------
Section: 3.6 User based queries
----------------------------------------------------------------------------
3.6.1 Who query ......................................... 32 - needed
3.6.2 Whois query ....................................... 33 - needed
3.6.3 Whowas ............................................ 34 - not needed
-------------------------------------------------------------------------"""
class Who(Command):
"""
Command: WHO
Parameters: [ <mask> [ "o" ] ]
The WHO command is used by a client to generate a query which returns
a list of information which 'matches' the <mask> parameter given by
the client. In the absence of the <mask> parameter, all visible
(users who aren't invisible (user mode +i) and who don't have a
common channel with the requesting client) are listed. The same
result can be achieved by using a <mask> of "0" or any wildcard which
will end up matching every visible user.
The <mask> passed to WHO is matched against users' host, server, real
name and nickname if the channel <mask> cannot be found.
If the "o" parameter is passed only operators are returned according
to the <mask> supplied.
Numeric Replies:
ERR_NOSUCHSERVER
RPL_WHOREPLY RPL_ENDOFWHO
"""
token = 'WHO'
class Sender(Command.Sender):
def _send(self):
self.check_attr('mask')
parameter = [self.mask]
if hasattr(self, 'operators') and self.operators:
parameter.append('o')
return self.create_event(Who, parameter)
class Whois(Command):
"""
Command: WHOIS
Parameters: [ <target> ] <mask> *( "," <mask> )
This command is used to query information about particular user.
The server will answer this command with several numeric messages
indicating different statuses of each user which matches the mask (if
you are entitled to see them). If no wildcard is present in the
<mask>, any information about that nick which you are allowed to see
is presented.
If the <target> parameter is specified, it sends the query to a
specific server. It is useful if you want to know how long the user
in question has been idle as only local server (i.e., the server the
user is directly connected to) knows that information, while
everything else is globally known.
Wildcards are allowed in the <target> parameter.
Numeric Replies:
ERR_NOSUCHSERVER ERR_NONICKNAMEGIVEN
RPL_WHOISUSER RPL_WHOISCHANNELS
RPL_WHOISCHANNELS RPL_WHOISSERVER
RPL_AWAY RPL_WHOISOPERATOR
RPL_WHOISIDLE ERR_NOSUCHNICK
RPL_ENDOFWHOIS
"""
token = 'WHOIS'
class Sender(Command.Sender):
def _send(self):
self.check_attr('user')
parameter = []
if hasattr(self, 'server') and self.server is not None:
parameter.append(self.server)
# add user 2x for extended whois
parameter.append(self.user)
parameter.append(self.user)
return self.create_event(Whois, parameter)
"""
----------------------------------------------------------------------------
Section: 3.7 Miscellaneous messages
----------------------------------------------------------------------------
3.7.1 Kill message ...................................... 35 - not needed
3.7.2 Ping message ...................................... 36 - needed
3.7.3 Pong message ...................................... 37 - needed
3.7.4 Error ............................................. 37 - not needed
-------------------------------------------------------------------------"""
class Ping(Command):
"""
Command: PING
Parameters: <server1> [ <server2> ]
The PING command is used to test the presence of an active client or
server at the other end of the connection. Servers send a PING
message at regular intervals if no other activity detected coming
from a connection. If a connection fails to respond to a PING
message within a set amount of time, that connection is closed. A
PING message MAY be sent even if the connection is active.
When a PING message is received, the appropriate PONG message MUST be
sent as reply to <server1> (server which sent the PING message out)
as soon as possible. If the <server2> parameter is specified, it
represents the target of the ping, and the message gets forwarded
there.
Numeric Replies:
ERR_NOORIGIN ERR_NOSUCHSERVER
"""
token = 'PING'
class Receiver(Command.Receiver):
def _receive(self, event):
pong = self.client.get_command('Pong').get_sender()
if len(event.parameter) == 1:
pong.server = event.parameter[0]
if len(event.parameter) == 2:
pong.server = event.parameter[0]
pong.server2 = event.parameter[1]
pong.send()
class Pong(Command):
"""
Command: PONG
Parameters: <server> [ <server2> ]
PONG message is a reply to ping message. If parameter <server2> is
given, this message MUST be forwarded to given target. The <server>
parameter is the name of the entity who has responded to PING message
and generated this message.
Numeric Replies:
ERR_NOORIGIN ERR_NOSUCHSERVER
"""
token = 'PONG'
class Sender(Command.Sender):
def _send(self):
self.check_attr('server')
parameter = [self.server]
if hasattr(self, 'server2') and self.server2:
parameter.append(self.server2)
return self.create_event(Pong, parameter)
"""-----------------------------------------------------------------------------
5. Replies .................................................... 43
5.1 Command responses ...................................... 43
5.2 Error Replies .......................................... 53
5.3 Reserved numerics ...................................... 59
Numerics in the range from 001 to 099 are used for client-server
connections only and should never travel between servers.
-----------------------------------------------------------------------------"""
class WelcomeReply(Command):
token = '001'
class YourHostReply(Command):
token = '002'
class CreatedReply(Command):
token = '003'
class MyInfoReply(Command):
token = '004'
class BounceReply(Command):
token = '005'
"""-----------------------------------------------------------------------------
Replies generated in the response to commands are found in the
range from 200 to 399.
-----------------------------------------------------------------------------"""
class AwayReply(Command):
token = '301'
class WhoisUserReply(Command):
token = '311'
class WhoisServerReply(Command):
token = '312'
class WhoisOperatorReply(Command):
token = '313'
class WhoEndReply(Command):
token = '315'
class WhoisIdleReply(Command):
token = '317'
class WhoisEndReply(Command):
token = '318'
class WhoisChannelsReply(Command):
token = '319'
class UniqueOpIsReply(Command):
token = '325'
class ChannelModeIsReply(Command):
token = '324'
class NoTopicReply(Command):
token = '331'
class TopicReply(Command):
token = '332'
class InvitingReply(Command):
token = '341'
class InviteListReply(Command):
token = '346'
class InviteListEndReply(Command):
token = '347'
class ExceptListReply(Command):
token = '348'
class ExceptListEndReply(Command):
token = '349'
class WhoReply(Command):
token = '352'
class NamesReply(Command):
"""
353 RPL_NAMREPLY
"( "=" / "*" / "@" ) <channel>
:[ "@" / "+" ] <nick> *( " " [ "@" / "+" ] <nick> )
"@" is used for secret channels
"*" for private channels, and
"=" for others (public channels).
"""
token = '353'
class NamesEndReply(Command):
token = '366'
class BanListReply(Command):
token = '367'
class BanListEndReply(Command):
token = '368'
class MotdReply(Command):
token = '372'
class MotdStartReply(Command):
token = '375'
class MotdEndReply(Command):
token = '376'
class Receiver(Command.Receiver):
def _receive(self, event):
self.client.post_connect()
"""-----------------------------------------------------------------------------
Error replies are found in the range from 400 to 599.
-----------------------------------------------------------------------------"""
class NoSuchNickError(Command):
token = '401'
class NoSuchServerError(Command):
token = '402'
class NoSuchChannelError(Command):
token = '403'
class CannotSendToChannelError(Command):
token = '404'
class TooManyChannelsError(Command):
token = '405'
class TooManyTargetsError(Command):
token = '407'
class NoOriginError(Command):
token = '409'
class NoRecipientError(Command):
token = '411'
class NoTextToSendError(Command):
token = '412'
class NoToplevelError(Command):
token = '413'
class WildTopLevelError(Command):
token = '414'
class NoMotdError(Command):
token = '422'
class Receiver(Command.Receiver):
def _receive(self, event):
self.client.post_connect()
class NoNicknameGivenError(Command):
token = '431'
class Receiver(Command.Receiver):
def _receive(self, event):
nick = self.client.get_command('Nick').get_sender()
nick.nickname = self.client.config.get('nickname')
if len(nick.nickname) == 0:
nick.nickname = self.client.config.get('anickname')
if len(nick.nickname) == 0:
nick.nickname = 'Bot-' + ''.join(random.choice(string.ascii_uppercase) for x in range(3))
self.client.logger.info(
'No nickname was given, trying to use %s',
self.client.me.source.nickname,
nick.nickname
)
nick.send()
class ErroneusNicknameError(Command):
token = '432'
class Receiver(Command.Receiver):
def _receive(self, event):
nick = self.client.get_command('Nick').get_sender()
nick.nickname = 'Bot-' + ''.join(random.choice(string.ascii_uppercase) for x in range(3))
self.client.logger.info(
'Requested nickname %s is not valid on network, trying to use %s instead',
self.client.me.source.nickname,
nick.nickname
)
nick.send()
class NicknameInUseError(Command):
token = '433'
class Receiver(Command.Receiver):
def _receive(self, event):
nick = self.client.get_command('Nick').get_sender()
nick.nickname = self.client.config.get('anickname')
if nick.nickname == self.client.me.source.nickname:
# TODO honor NICKLEN from BounceReply
nickname_length = 15 #quakenet default, hardcoded
random_length = 3 #chosen by fair dice roll
nickname_maxlength = nickname_length - random_length
nick.nickname = nick.nickname[nickname_maxlength:]
nick.nickname += ''.join(random.choice(string.ascii_uppercase) for x in range(random_length))
self.client.logger.info(
'Requested nickname %s is already used on network, trying to use %s instead',
self.client.me.source.nickname,
nick.nickname
)
nick.send()
class NickCollisionError(Command):
token = '436'
class UnavailableResourceError(Command):
token = '437'
class UserNotInChannelError(Command):
token = '441'
class NotOnChannelError(Command):
token = '442'
class UserOnChannelError(Command):
token = '443'
class NeedMoreParamsError(Command):
token = '461'
class AlreadyRegisteredError(Command):
token = '462'
class KeySetError(Command):
token = '467'
class ChannelIsFullError(Command):
token = '471'
class UnknownModeError(Command):
token = '472'
class InviteOnlyChannelError(Command):
token = '473'
class BannedFromChannelError(Command):
token = '474'
class BadChannelKeyError(Command):
token = '475'
class BadChannelMaskError(Command):
token = '476'
class NoChannelModesError(Command):
token = '477'
class ChanOpPrivilegesNeededError(Command):
token = '482'
class RestrictedError(Command):
token = '484'
class UsersDontMachtError(Command):
token = '502'
| mit | 3,850,343,380,775,073,000 | -539,070,051,087,957,400 | 32.686367 | 112 | 0.544447 | false |
adsorensen/girder | girder/api/v1/resource.py | 1 | 18333 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import six
from ..describe import Description, autoDescribeRoute
from ..rest import Resource as BaseResource, RestException, setResponseHeader, setContentDisposition
from girder.constants import AccessType, TokenScope
from girder.api import access
from girder.utility import parseTimestamp
from girder.utility import ziputil
from girder.utility import path as path_util
from girder.utility.progress import ProgressContext
# Plugins can modify this set to allow other types to be searched
allowedSearchTypes = {'collection', 'folder', 'group', 'item', 'user'}
allowedDeleteTypes = {'collection', 'file', 'folder', 'group', 'item', 'user'}
class Resource(BaseResource):
"""
API Endpoints that deal with operations across multiple resource types.
"""
def __init__(self):
super(Resource, self).__init__()
self.resourceName = 'resource'
self.route('GET', ('search',), self.search)
self.route('GET', ('lookup',), self.lookup)
self.route('GET', (':id',), self.getResource)
self.route('GET', (':id', 'path'), self.path)
self.route('PUT', (':id', 'timestamp'), self.setTimestamp)
self.route('GET', ('download',), self.download)
self.route('POST', ('download',), self.download)
self.route('PUT', ('move',), self.moveResources)
self.route('POST', ('copy',), self.copyResources)
self.route('DELETE', (), self.delete)
@access.public
@autoDescribeRoute(
Description('Search for resources in the system.')
.param('q', 'The search query.')
.param('mode', 'The search mode. Can use either a text search or a '
'prefix-based search.', enum=('text', 'prefix'), required=False,
default='text')
.jsonParam('types', 'A JSON list of resource types to search for, e.g. '
'["user", "folder", "item"].', requireArray=True)
.param('level', 'Minimum required access level.', required=False,
dataType='integer', default=AccessType.READ)
.pagingParams(defaultSort=None, defaultLimit=10)
.errorResponse('Invalid type list format.')
)
def search(self, q, mode, types, level, limit, offset):
level = AccessType.validate(level)
user = self.getCurrentUser()
if mode == 'text':
method = 'textSearch'
else:
method = 'prefixSearch'
results = {}
for modelName in types:
if modelName not in allowedSearchTypes:
continue
if '.' in modelName:
name, plugin = modelName.rsplit('.', 1)
model = self.model(name, plugin)
else:
model = self.model(modelName)
results[modelName] = [
model.filter(d, user) for d in getattr(model, method)(
query=q, user=user, limit=limit, offset=offset, level=level)
]
return results
def _validateResourceSet(self, resources, allowedModels=None):
"""
Validate a set of resources against a set of allowed models.
Also ensures the requested resource set is not empty.
# TODO jsonschema could replace this probably
:param resources: The set of resources requested.
:param allowedModels: if present, an iterable of models that may be
included in the resources.
"""
if allowedModels:
invalid = set(resources.keys()) - set(allowedModels)
if invalid:
raise RestException('Invalid resource types requested: ' + ', '.join(invalid))
count = sum([len(v) for v in six.viewvalues(resources)])
if not count:
raise RestException('No resources specified.')
def _getResourceModel(self, kind, funcName=None):
"""
Load and return a model with a specific function or throw an exception.
:param kind: the name of the model to load
:param funcName: a function name to ensure that each model contains.
:returns: the loaded model.
"""
try:
model = self.model(kind)
except ImportError:
model = None
if not model or (funcName and not hasattr(model, funcName)):
raise RestException('Invalid resources format.')
return model
@access.public(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Look up a resource in the data hierarchy by path.')
.param('path',
'The path of the resource. The path must be an absolute Unix '
'path starting with either "/user/[user name]", for a user\'s '
'resources or "/collection/[collection name]", for resources '
'under a collection.')
.param('test',
'Specify whether to return None instead of throwing an '
'exception when path doesn\'t exist.',
required=False, dataType='boolean', default=False)
.errorResponse('Path is invalid.')
.errorResponse('Path refers to a resource that does not exist.')
.errorResponse('Read access was denied for the resource.', 403)
)
def lookup(self, path, test):
return path_util.lookUpPath(path, self.getCurrentUser(), test)['document']
@access.public(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Get path of a resource.')
.param('id', 'The ID of the resource.', paramType='path')
.param('type', 'The type of the resource (item, file, etc.).')
.errorResponse('ID was invalid.')
.errorResponse('Invalid resource type.')
.errorResponse('Read access was denied for the resource.', 403)
)
def path(self, id, type):
user = self.getCurrentUser()
doc = self._getResource(id, type)
if doc is None:
raise RestException('Invalid resource id.')
return path_util.getResourcePath(type, doc, user=user)
@access.cookie(force=True)
@access.public(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Download a set of items, folders, collections, and users '
'as a zip archive.')
.notes('This route is also exposed via the POST method because the '
'request parameters can be quite long, and encoding them in the '
'URL (as is standard when using the GET method) can cause the '
'URL to become too long, which causes errors.')
.jsonParam('resources', 'A JSON-encoded set of resources to download. Each type is '
'a list of ids. For example: {"item": [(item id 1), (item id 2)], '
'"folder": [(folder id 1)]}.', requireObject=True)
.param('includeMetadata', 'Include any metadata in JSON files in the '
'archive.', required=False, dataType='boolean', default=False)
.produces('application/zip')
.errorResponse('Unsupported or unknown resource type.')
.errorResponse('Invalid resources format.')
.errorResponse('No resources specified.')
.errorResponse('Resource not found.')
.errorResponse('Read access was denied for a resource.', 403)
)
def download(self, resources, includeMetadata):
"""
Returns a generator function that will be used to stream out a zip
file containing the listed resource's contents, filtered by
permissions.
"""
user = self.getCurrentUser()
self._validateResourceSet(resources)
# Check that all the resources are valid, so we don't download the zip
# file if it would throw an error.
for kind in resources:
model = self._getResourceModel(kind, 'fileList')
for id in resources[kind]:
if not model.load(id=id, user=user, level=AccessType.READ):
raise RestException('Resource %s %s not found.' % (kind, id))
setResponseHeader('Content-Type', 'application/zip')
setContentDisposition('Resources.zip')
def stream():
zip = ziputil.ZipGenerator()
for kind in resources:
model = self.model(kind)
for id in resources[kind]:
doc = model.load(id=id, user=user, level=AccessType.READ)
for (path, file) in model.fileList(
doc=doc, user=user, includeMetadata=includeMetadata, subpath=True):
for data in zip.addFile(file, path):
yield data
yield zip.footer()
return stream
@access.user(scope=TokenScope.DATA_OWN)
@autoDescribeRoute(
Description('Delete a set of items, folders, or other resources.')
.jsonParam('resources', 'A JSON-encoded set of resources to delete. Each '
'type is a list of ids. For example: {"item": [(item id 1), '
'(item id2)], "folder": [(folder id 1)]}.', requireObject=True)
.param('progress', 'Whether to record progress on this task.',
default=False, required=False, dataType='boolean')
.errorResponse('Unsupported or unknown resource type.')
.errorResponse('Invalid resources format.')
.errorResponse('No resources specified.')
.errorResponse('Resource not found.')
.errorResponse('Admin access was denied for a resource.', 403)
)
def delete(self, resources, progress):
user = self.getCurrentUser()
self._validateResourceSet(resources, allowedDeleteTypes)
total = sum([len(resources[key]) for key in resources])
with ProgressContext(
progress, user=user, title='Deleting resources',
message='Calculating size...') as ctx:
ctx.update(total=total)
current = 0
for kind in resources:
model = self._getResourceModel(kind, 'remove')
for id in resources[kind]:
doc = model.load(id=id, user=user, level=AccessType.ADMIN, exc=True)
# Don't do a subtree count if we weren't asked for progress
if progress:
subtotal = model.subtreeCount(doc)
if subtotal != 1:
total += subtotal - 1
ctx.update(total=total)
model.remove(doc, progress=ctx)
if progress:
current += subtotal
if ctx.progress['data']['current'] != current:
ctx.update(current=current, message='Deleted ' + kind)
def _getResource(self, id, type):
model = self._getResourceModel(type)
return model.load(id=id, user=self.getCurrentUser(), level=AccessType.READ)
@access.admin
@autoDescribeRoute(
Description('Get any resource by ID.')
.param('id', 'The ID of the resource.', paramType='path')
.param('type', 'The type of the resource (item, file, etc.).')
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the resource.', 403)
)
def getResource(self, id, type):
return self._getResource(id, type)
@access.admin
@autoDescribeRoute(
Description('Set the created or updated timestamp for a resource.')
.param('id', 'The ID of the resource.', paramType='path')
.param('type', 'The type of the resource (item, file, etc.).')
.param('created', 'The new created timestamp.', required=False)
.param('updated', 'The new updated timestamp.', required=False)
.errorResponse('ID was invalid.')
.errorResponse('Access was denied for the resource.', 403)
)
def setTimestamp(self, id, type, created, updated):
user = self.getCurrentUser()
model = self._getResourceModel(type)
doc = model.load(id=id, user=user, level=AccessType.WRITE, exc=True)
if created is not None:
if 'created' not in doc:
raise RestException('Resource has no "created" field.')
doc['created'] = parseTimestamp(created)
if updated is not None:
if 'updated' not in doc:
raise RestException('Resource has no "updated" field.')
doc['updated'] = parseTimestamp(updated)
return model.filter(model.save(doc), user=user)
def _prepareMoveOrCopy(self, resources, parentType, parentId):
user = self.getCurrentUser()
self._validateResourceSet(resources, ('folder', 'item'))
if resources.get('item') and parentType != 'folder':
raise RestException('Invalid parentType.')
return self.model(parentType).load(parentId, level=AccessType.WRITE, user=user, exc=True)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Move a set of items and folders.')
.jsonParam('resources', 'A JSON-encoded set of resources to move. Each type '
'is a list of ids. Only folders and items may be specified. '
'For example: {"item": [(item id 1), (item id2)], "folder": '
'[(folder id 1)]}.', requireObject=True)
.param('parentType', 'Parent type for the new parent of these resources.',
enum=('user', 'collection', 'folder'))
.param('parentId', 'Parent ID for the new parent of these resources.')
.param('progress', 'Whether to record progress on this task.',
required=False, default=False, dataType='boolean')
.errorResponse('Unsupported or unknown resource type.')
.errorResponse('Invalid resources format.')
.errorResponse('Resource type not supported.')
.errorResponse('No resources specified.')
.errorResponse('Resource not found.')
.errorResponse('ID was invalid.')
)
def moveResources(self, resources, parentType, parentId, progress):
user = self.getCurrentUser()
parent = self._prepareMoveOrCopy(resources, parentType, parentId)
total = sum([len(resources[key]) for key in resources])
with ProgressContext(
progress, user=user, title='Moving resources',
message='Calculating requirements...', total=total) as ctx:
for kind in resources:
model = self._getResourceModel(kind, 'move')
for id in resources[kind]:
doc = model.load(id=id, user=user, level=AccessType.WRITE, exc=True)
ctx.update(message='Moving %s %s' % (kind, doc.get('name', '')))
if kind == 'item':
if parent['_id'] != doc['folderId']:
model.move(doc, parent)
elif kind == 'folder':
if ((parentType, parent['_id']) !=
(doc['parentCollection'], doc['parentId'])):
model.move(doc, parent, parentType)
ctx.update(increment=1)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Copy a set of items and folders.')
.jsonParam('resources', 'A JSON-encoded set of resources to copy. Each type '
'is a list of ids. Only folders and items may be specified. '
'For example: {"item": [(item id 1), (item id2)], "folder": '
'[(folder id 1)]}.', requireObject=True)
.param('parentType', 'Parent type for the new parent of these '
'resources.')
.param('parentId', 'Parent ID for the new parent of these resources.')
.param('progress', 'Whether to record progress on this task.',
required=False, default=False, dataType='boolean')
.errorResponse('Unsupported or unknown resource type.')
.errorResponse('Invalid resources format.')
.errorResponse('Resource type not supported.')
.errorResponse('No resources specified.')
.errorResponse('Resource not found.')
.errorResponse('ID was invalid.')
)
def copyResources(self, resources, parentType, parentId, progress):
user = self.getCurrentUser()
parent = self._prepareMoveOrCopy(resources, parentType, parentId)
total = len(resources.get('item', []))
if 'folder' in resources:
model = self._getResourceModel('folder')
for id in resources['folder']:
folder = model.load(id=id, user=user, level=AccessType.READ, exc=True)
total += model.subtreeCount(folder)
with ProgressContext(
progress, user=user, title='Copying resources',
message='Calculating requirements...', total=total) as ctx:
for kind in resources:
model = self._getResourceModel(kind)
for id in resources[kind]:
doc = model.load(id=id, user=user, level=AccessType.READ, exc=True)
ctx.update(message='Copying %s %s' % (kind, doc.get('name', '')))
if kind == 'item':
model.copyItem(doc, folder=parent, creator=user)
ctx.update(increment=1)
elif kind == 'folder':
model.copyFolder(
doc, parent=parent, parentType=parentType, creator=user, progress=ctx)
| apache-2.0 | -1,386,928,141,523,975,200 | 7,578,529,325,625,983,000 | 46.618182 | 100 | 0.590193 | false |
johndamen/pyeasyplot | easyplot/gui/plotsettings.py | 1 | 4044 | from PyQt4 import QtGui, QtCore
from .settings import PlotSettings
from . import basewidgets as bw
from .. import datasets
def _marker_field(**kwargs):
return bw.Dropdown(
['.', ',', 'o', '*',
'+', 'x', 'd', 'D',
'v', '^', '<', '>',
's', 'p', '|', '_'], **kwargs)
def _label_field(*args, **kwargs):
return bw.TextOrNone(*args, **kwargs)
def _linestyle_field(**kwargs):
return bw.Dropdown(['-', '--', '-.', ':'], **kwargs)
def _linewidth_field(*args, **kwargs):
return bw.Float(*args, **kwargs)
def _width_field(*args, **kwargs):
return bw.Float(*args, **kwargs)
def _color_field(*args, **kwargs):
return bw.Color(*args, **kwargs)
def _cmap_field(*args, **kwargs):
return bw.Colormap(*args, **kwargs)
def _alpha_field(*args, **kwargs):
return bw.Float(*args, **kwargs)
def _size_field(*args, **kwargs):
return bw.Float(*args, **kwargs)
class TimeseriesPlotSettings(PlotSettings):
DATA_CLS = datasets.Timeseries
def build(self):
super().build()
self.fields['alpha'] = f = _alpha_field()
f.value_changed.connect(self.change)
self.layout.addRow('alpha', f)
self.fields['color'] = f = _color_field()
f.value_changed.connect(self.change)
self.layout.addRow('color', f)
self.fields['linewidth'] = f = _linewidth_field()
f.value_changed.connect(self.change)
self.layout.addRow('linewidth', f)
self.fields['linestyle'] = f = _linestyle_field()
f.value_changed.connect(self.change)
self.layout.addRow('linestyle', f)
self.fields['label'] = f = _label_field()
f.value_changed.connect(self.change)
self.layout.addRow('label', f)
self.fields['marker'] = f = _marker_field()
f.value_changed.connect(self.change)
self.layout.addRow('marker', f)
class PointsPlotSettings(PlotSettings):
DATA_CLS = datasets.Points
def build(self):
super().build()
self.fields['color'] = f = _color_field()
f.value_changed.connect(self.change)
self.layout.addRow('color', f)
self.fields['s'] = f = _size_field()
f.value_changed.connect(self.change)
self.layout.addRow('pointsize', f)
self.fields['alpha'] = f = _alpha_field()
f.value_changed.connect(self.change)
self.layout.addRow('alpha', f)
class ValuePointsPlotSettings(PlotSettings):
DATA_CLS = datasets.ValuePoints
def build(self):
super().build()
self.fields['cmap'] = f = _cmap_field()
f.value_changed.connect(self.change)
self.layout.addRow('colormap', f)
self.fields['s'] = f = _size_field()
f.value_changed.connect(self.change)
self.layout.addRow('pointsize', f)
self.fields['alpha'] = f = _alpha_field()
f.value_changed.connect(self.change)
self.layout.addRow('alpha', f)
class GridPlotSettings(PlotSettings):
DATA_CLS = datasets.Grid
def build(self):
super().build()
self.fields['cmap'] = f = _cmap_field()
f.value_changed.connect(self.change)
self.layout.addRow('colormap', f)
class IrregularGridPlotSettings(PlotSettings):
DATA_CLS = datasets.IrregularGrid
def build(self):
super().build()
self.fields['cmap'] = f = _cmap_field()
f.value_changed.connect(self.change)
self.layout.addRow('colormap', f)
class VectorDataPlotSettings(PlotSettings):
DATA_CLS = datasets.VectorData
def build(self):
super().build()
self.fields['width'] = f = _width_field()
f.value_changed.connect(self.change)
self.layout.addRow('width', f)
self.fields['color'] = f = _color_field()
f.value_changed.connect(self.change)
self.layout.addRow('color', f)
def get_by_dataset(d):
if not isinstance(d, datasets.Dataset):
raise TypeError('argument must be a dataset')
return globals()[d.__class__.__name__+'PlotSettings'] | gpl-3.0 | -7,759,683,967,337,486,000 | -6,449,146,563,416,020,000 | 24.601266 | 57 | 0.599159 | false |
tectronics/pychess | lib/pychess/Players/ICPlayer.py | 20 | 10779 | from collections import defaultdict
from pychess.compat import Queue
from pychess.Players.Player import Player, PlayerIsDead, TurnInterrupt
from pychess.Utils.Move import parseSAN, toAN
from pychess.Utils.lutils.lmove import ParsingError
from pychess.Utils.Offer import Offer
from pychess.Utils.const import *
from pychess.System.Log import log
class ICPlayer (Player):
__type__ = REMOTE
def __init__ (self, gamemodel, ichandle, gameno, color, name, icrating=None):
Player.__init__(self)
self.offers = {}
self.queue = Queue()
self.okqueue = Queue()
self.setName(name)
self.ichandle = ichandle
self.icrating = icrating
self.color = color
self.gameno = gameno
self.gamemodel = gamemodel
# If some times later FICS creates another game with same wplayer,bplayer,gameno
# this will change to False and boardUpdate messages will be ignored
self.current = True
self.connection = connection = self.gamemodel.connection
self.connections = connections = defaultdict(list)
connections[connection.bm].append(connection.bm.connect_after("boardUpdate", self.__boardUpdate))
connections[connection.bm].append(connection.bm.connect_after("playGameCreated", self.__playGameCreated))
connections[connection.bm].append(connection.bm.connect_after("obsGameCreated", self.__obsGameCreated))
connections[connection.om].append(connection.om.connect("onOfferAdd", self.__onOfferAdd))
connections[connection.om].append(connection.om.connect("onOfferRemove", self.__onOfferRemove))
connections[connection.om].append(connection.om.connect("onOfferDeclined", self.__onOfferDeclined))
connections[connection.cm].append(connection.cm.connect("privateMessage", self.__onPrivateMessage))
def getICHandle (self):
return self.name
@property
def time (self):
self.gamemodel.timemodel.getPlayerTime(self.color)
#===========================================================================
# Handle signals from the connection
#===========================================================================
def __playGameCreated (self, bm, ficsgame):
if self.gamemodel.ficsplayers[0] == ficsgame.wplayer and \
self.gamemodel.ficsplayers[1] == ficsgame.bplayer and \
self.gameno == ficsgame.gameno:
log.debug("ICPlayer.__playGameCreated: gameno reappeared: gameno=%s white=%s black=%s" % \
(ficsgame.gameno, ficsgame.wplayer.name, ficsgame.bplayer.name))
self.current = False
def __obsGameCreated (self, bm, ficsgame):
if self.gamemodel.ficsplayers[0] == ficsgame.wplayer and \
self.gamemodel.ficsplayers[1] == ficsgame.bplayer and \
self.gameno == ficsgame.gameno:
log.debug("ICPlayer.__obsGameCreated: gameno reappeared: gameno=%s white=%s black=%s" % \
(ficsgame.gameno, ficsgame.wplayer.name, ficsgame.bplayer.name))
self.current = False
def __onOfferAdd (self, om, offer):
if self.gamemodel.status in UNFINISHED_STATES and not self.gamemodel.isObservationGame():
log.debug("ICPlayer.__onOfferAdd: emitting offer: self.gameno=%s self.name=%s %s" % \
(self.gameno, self.name, offer))
self.offers[offer.index] = offer
self.emit ("offer", offer)
def __onOfferDeclined (self, om, offer):
for offer_ in self.gamemodel.offers.keys():
if offer.type == offer_.type:
offer.param = offer_.param
log.debug("ICPlayer.__onOfferDeclined: emitting decline for %s" % offer)
self.emit("decline", offer)
def __onOfferRemove (self, om, offer):
if offer.index in self.offers:
log.debug("ICPlayer.__onOfferRemove: emitting withdraw: self.gameno=%s self.name=%s %s" % \
(self.gameno, self.name, offer))
self.emit ("withdraw", self.offers[offer.index])
del self.offers[offer.index]
def __onPrivateMessage (self, cm, name, title, isadmin, text):
if name == self.ichandle:
self.emit("offer", Offer(CHAT_ACTION, param=text))
def __boardUpdate (self, bm, gameno, ply, curcol, lastmove, fen, wname, bname, wms, bms):
log.debug("ICPlayer.__boardUpdate: id(self)=%d self=%s %s %s %s %d %d %s %s %d %d" % \
(id(self), self, gameno, wname, bname, ply, curcol, lastmove, fen, wms, bms))
if gameno == self.gameno and len(self.gamemodel.players) >= 2 \
and wname == self.gamemodel.players[0].ichandle \
and bname == self.gamemodel.players[1].ichandle \
and self.current:
log.debug("ICPlayer.__boardUpdate: id=%d self=%s gameno=%s: this is my move" % \
(id(self), self, gameno))
# In some cases (like lost on time) the last move is resent
if ply <= self.gamemodel.ply:
return
if 1-curcol == self.color:
log.debug("ICPlayer.__boardUpdate: id=%d self=%s ply=%d: putting move=%s in queue" % \
(id(self), self, ply, lastmove))
self.queue.put((ply, lastmove))
# Ensure the fics thread doesn't continue parsing, before the
# game/player thread has recieved the move.
# Specifically this ensures that we aren't killed due to end of
# game before our last move is recieved
self.okqueue.get(block=True)
#===========================================================================
# Ending the game
#===========================================================================
def __disconnect (self):
if self.connections is None: return
for obj in self.connections:
for handler_id in self.connections[obj]:
if obj.handler_is_connected(handler_id):
obj.disconnect(handler_id)
self.connections = None
def end (self, status, reason):
self.__disconnect()
self.queue.put("del")
def kill (self, reason):
self.__disconnect()
self.queue.put("del")
#===========================================================================
# Send the player move updates
#===========================================================================
def makeMove (self, board1, move, board2):
log.debug("ICPlayer.makemove: id(self)=%d self=%s move=%s board1=%s board2=%s" % \
(id(self), self, move, board1, board2))
if board2 and not self.gamemodel.isObservationGame():
# TODO: Will this work if we just always use CASTLE_SAN?
cn = CASTLE_KK
if board2.variant == FISCHERRANDOMCHESS:
cn = CASTLE_SAN
self.connection.bm.sendMove (toAN (board2, move, castleNotation=cn))
item = self.queue.get(block=True)
try:
if item == "del":
raise PlayerIsDead
if item == "int":
raise TurnInterrupt
ply, sanmove = item
if ply < board1.ply:
# This should only happen in an observed game
board1 = self.gamemodel.getBoardAtPly(max(ply-1, 0))
log.debug("ICPlayer.makemove: id(self)=%d self=%s from queue got: ply=%d sanmove=%s" % \
(id(self), self, ply, sanmove))
try:
move = parseSAN (board1, sanmove)
log.debug("ICPlayer.makemove: id(self)=%d self=%s parsed move=%s" % \
(id(self), self, move))
except ParsingError as e:
raise
return move
finally:
log.debug("ICPlayer.makemove: id(self)=%d self=%s returning move=%s" % \
(id(self), self, move))
self.okqueue.put("ok")
#===========================================================================
# Interacting with the player
#===========================================================================
def pause (self):
pass
def resume (self):
pass
def setBoard (self, fen):
# setBoard will currently only be called for ServerPlayer when starting
# to observe some game. In this case FICS already knows how the board
# should look, and we don't need to set anything
pass
def playerUndoMoves (self, movecount, gamemodel):
log.debug("ICPlayer.playerUndoMoves: id(self)=%d self=%s, undoing movecount=%d" % \
(id(self), self, movecount))
# If current player has changed so that it is no longer us to move,
# We raise TurnInterruprt in order to let GameModel continue the game
if movecount % 2 == 1 and gamemodel.curplayer != self:
self.queue.put("int")
def putMessage (self, text):
self.connection.cm.tellPlayer (self.name, text)
#===========================================================================
# Offer handling
#===========================================================================
def offerRematch (self):
if self.gamemodel.timed:
min = int(self.gamemodel.timemodel.intervals[0][0])/60
inc = self.gamemodel.timemodel.gain
else:
min = 0
inc = 0
self.connection.om.challenge(self.ichandle,
self.gamemodel.ficsgame.game_type, min, inc,
self.gamemodel.ficsgame.rated)
def offer (self, offer):
log.debug("ICPlayer.offer: self=%s %s" % (repr(self), offer))
if offer.type == TAKEBACK_OFFER:
# only 1 outstanding takeback offer allowed on FICS, so remove any of ours
indexes = self.offers.keys()
for index in indexes:
if self.offers[index].type == TAKEBACK_OFFER:
log.debug("ICPlayer.offer: del self.offers[%s] %s" % (index, offer))
del self.offers[index]
self.connection.om.offer(offer, self.gamemodel.ply)
def offerDeclined (self, offer):
log.debug("ICPlayer.offerDeclined: sending decline for %s" % offer)
self.connection.om.decline(offer)
def offerWithdrawn (self, offer):
pass
def offerError (self, offer, error):
pass
def observe (self):
self.connection.client.run_command("observe %s" % self.ichandle)
| gpl-3.0 | -5,128,197,771,250,149,000 | 9,040,223,025,805,844,000 | 43.726141 | 113 | 0.54634 | false |
florentx/OpenUpgrade | openerp/modules/registry.py | 37 | 17291 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Models registries.
"""
from collections import Mapping
from contextlib import contextmanager
import logging
import threading
import openerp.sql_db
import openerp.osv.orm
import openerp.tools
import openerp.modules.db
import openerp.tools.config
from openerp.tools import assertion_report
_logger = logging.getLogger(__name__)
class Registry(Mapping):
""" Model registry for a particular database.
The registry is essentially a mapping between model names and model
instances. There is one registry instance per database.
"""
def __init__(self, db_name):
super(Registry, self).__init__()
self.models = {} # model name/model instance mapping
self._sql_error = {}
self._store_function = {}
self._init = True
self._init_parent = {}
self._assertion_report = assertion_report.assertion_report()
self.fields_by_model = None
# modules fully loaded (maintained during init phase by `loading` module)
self._init_modules = set()
self.db_name = db_name
self._db = openerp.sql_db.db_connect(db_name)
# special cursor for test mode; None means "normal" mode
self.test_cr = None
# Indicates that the registry is
self.ready = False
# Inter-process signaling (used only when openerp.multi_process is True):
# The `base_registry_signaling` sequence indicates the whole registry
# must be reloaded.
# The `base_cache_signaling sequence` indicates all caches must be
# invalidated (i.e. cleared).
self.base_registry_signaling_sequence = None
self.base_cache_signaling_sequence = None
# Flag indicating if at least one model cache has been cleared.
# Useful only in a multi-process context.
self._any_cache_cleared = False
cr = self.cursor()
has_unaccent = openerp.modules.db.has_unaccent(cr)
if openerp.tools.config['unaccent'] and not has_unaccent:
_logger.warning("The option --unaccent was given but no unaccent() function was found in database.")
self.has_unaccent = openerp.tools.config['unaccent'] and has_unaccent
cr.close()
#
# Mapping abstract methods implementation
# => mixin provides methods keys, items, values, get, __eq__, and __ne__
#
def __len__(self):
""" Return the size of the registry. """
return len(self.models)
def __iter__(self):
""" Return an iterator over all model names. """
return iter(self.models)
def __contains__(self, model_name):
""" Test whether the model with the given name exists. """
return model_name in self.models
def __getitem__(self, model_name):
""" Return the model with the given name or raise KeyError if it doesn't exist."""
return self.models[model_name]
def __call__(self, model_name):
""" Same as ``self[model_name]``. """
return self.models[model_name]
def do_parent_store(self, cr):
for o in self._init_parent:
self.get(o)._parent_store_compute(cr)
self._init = False
def obj_list(self):
""" Return the list of model names in this registry."""
return self.keys()
def add(self, model_name, model):
""" Add or replace a model in the registry."""
self.models[model_name] = model
def load(self, cr, module):
""" Load a given module in the registry.
At the Python level, the modules are already loaded, but not yet on a
per-registry level. This method populates a registry with the given
modules, i.e. it instanciates all the classes of a the given module
and registers them in the registry.
"""
models_to_load = [] # need to preserve loading order
# Instantiate registered classes (via the MetaModel automatic discovery
# or via explicit constructor call), and add them to the pool.
for cls in openerp.osv.orm.MetaModel.module_to_models.get(module.name, []):
# models register themselves in self.models
model = cls.create_instance(self, cr)
if model._name not in models_to_load:
# avoid double-loading models whose declaration is split
models_to_load.append(model._name)
return [self.models[m] for m in models_to_load]
def clear_caches(self):
""" Clear the caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi`` for all the models.
"""
for model in self.models.itervalues():
model.clear_caches()
# Special case for ir_ui_menu which does not use openerp.tools.ormcache.
ir_ui_menu = self.models.get('ir.ui.menu')
if ir_ui_menu:
ir_ui_menu.clear_cache()
# Useful only in a multi-process context.
def reset_any_cache_cleared(self):
self._any_cache_cleared = False
# Useful only in a multi-process context.
def any_cache_cleared(self):
return self._any_cache_cleared
@classmethod
def setup_multi_process_signaling(cls, cr):
if not openerp.multi_process:
return None, None
# Inter-process signaling:
# The `base_registry_signaling` sequence indicates the whole registry
# must be reloaded.
# The `base_cache_signaling sequence` indicates all caches must be
# invalidated (i.e. cleared).
cr.execute("""SELECT sequence_name FROM information_schema.sequences WHERE sequence_name='base_registry_signaling'""")
if not cr.fetchall():
cr.execute("""CREATE SEQUENCE base_registry_signaling INCREMENT BY 1 START WITH 1""")
cr.execute("""SELECT nextval('base_registry_signaling')""")
cr.execute("""CREATE SEQUENCE base_cache_signaling INCREMENT BY 1 START WITH 1""")
cr.execute("""SELECT nextval('base_cache_signaling')""")
cr.execute("""
SELECT base_registry_signaling.last_value,
base_cache_signaling.last_value
FROM base_registry_signaling, base_cache_signaling""")
r, c = cr.fetchone()
_logger.debug("Multiprocess load registry signaling: [Registry: # %s] "\
"[Cache: # %s]",
r, c)
return r, c
def enter_test_mode(self):
""" Enter the 'test' mode, where one cursor serves several requests. """
assert self.test_cr is None
self.test_cr = self._db.test_cursor()
RegistryManager.enter_test_mode()
def leave_test_mode(self):
""" Leave the test mode. """
assert self.test_cr is not None
self.test_cr.force_close()
self.test_cr = None
RegistryManager.leave_test_mode()
def cursor(self):
""" Return a new cursor for the database. The cursor itself may be used
as a context manager to commit/rollback and close automatically.
"""
cr = self.test_cr
if cr is not None:
# While in test mode, we use one special cursor across requests. The
# test cursor uses a reentrant lock to serialize accesses. The lock
# is granted here by cursor(), and automatically released by the
# cursor itself in its method close().
cr.acquire()
return cr
return self._db.cursor()
class DummyRLock(object):
""" Dummy reentrant lock, to be used while running rpc and js tests """
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
self.acquire()
def __exit__(self, type, value, traceback):
self.release()
class RegistryManager(object):
""" Model registries manager.
The manager is responsible for creation and deletion of model
registries (essentially database connection/model registry pairs).
"""
# Mapping between db name and model registry.
# Accessed through the methods below.
registries = {}
_lock = threading.RLock()
_saved_lock = None
@classmethod
def lock(cls):
""" Return the current registry lock. """
return cls._lock
@classmethod
def enter_test_mode(cls):
""" Enter the 'test' mode, where the registry is no longer locked. """
assert cls._saved_lock is None
cls._lock, cls._saved_lock = DummyRLock(), cls._lock
@classmethod
def leave_test_mode(cls):
""" Leave the 'test' mode. """
assert cls._saved_lock is not None
cls._lock, cls._saved_lock = cls._saved_lock, None
@classmethod
def get(cls, db_name, force_demo=False, status=None, update_module=False):
""" Return a registry for a given database name."""
with cls.lock():
try:
return cls.registries[db_name]
except KeyError:
return cls.new(db_name, force_demo, status,
update_module)
finally:
# set db tracker - cleaned up at the WSGI
# dispatching phase in openerp.service.wsgi_server.application
threading.current_thread().dbname = db_name
@classmethod
def new(cls, db_name, force_demo=False, status=None,
update_module=False):
""" Create and return a new registry for a given database name.
The (possibly) previous registry for that database name is discarded.
"""
import openerp.modules
with cls.lock():
registry = Registry(db_name)
# Initializing a registry will call general code which will in turn
# call registries.get (this object) to obtain the registry being
# initialized. Make it available in the registries dictionary then
# remove it if an exception is raised.
cls.delete(db_name)
cls.registries[db_name] = registry
try:
with registry.cursor() as cr:
seq_registry, seq_cache = Registry.setup_multi_process_signaling(cr)
registry.base_registry_signaling_sequence = seq_registry
registry.base_cache_signaling_sequence = seq_cache
# This should be a method on Registry
openerp.modules.load_modules(registry._db, force_demo, status, update_module)
except Exception:
del cls.registries[db_name]
raise
# load_modules() above can replace the registry by calling
# indirectly new() again (when modules have to be uninstalled).
# Yeah, crazy.
registry = cls.registries[db_name]
cr = registry.cursor()
try:
registry.do_parent_store(cr)
cr.commit()
finally:
cr.close()
registry.ready = True
if update_module:
# only in case of update, otherwise we'll have an infinite reload loop!
cls.signal_registry_change(db_name)
return registry
@classmethod
def delete(cls, db_name):
"""Delete the registry linked to a given database. """
with cls.lock():
if db_name in cls.registries:
cls.registries[db_name].clear_caches()
del cls.registries[db_name]
@classmethod
def delete_all(cls):
"""Delete all the registries. """
with cls.lock():
for db_name in cls.registries.keys():
cls.delete(db_name)
@classmethod
def clear_caches(cls, db_name):
"""Clear caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi`` for all the models
of the given database name.
This method is given to spare you a ``RegistryManager.get(db_name)``
that would loads the given database if it was not already loaded.
"""
with cls.lock():
if db_name in cls.registries:
cls.registries[db_name].clear_caches()
@classmethod
def check_registry_signaling(cls, db_name):
"""
Check if the modules have changed and performs all necessary operations to update
the registry of the corresponding database.
:returns: True if changes has been detected in the database and False otherwise.
"""
changed = False
if openerp.multi_process and db_name in cls.registries:
registry = cls.get(db_name)
cr = registry.cursor()
try:
cr.execute("""
SELECT base_registry_signaling.last_value,
base_cache_signaling.last_value
FROM base_registry_signaling, base_cache_signaling""")
r, c = cr.fetchone()
_logger.debug("Multiprocess signaling check: [Registry - old# %s new# %s] "\
"[Cache - old# %s new# %s]",
registry.base_registry_signaling_sequence, r,
registry.base_cache_signaling_sequence, c)
# Check if the model registry must be reloaded (e.g. after the
# database has been updated by another process).
if registry.base_registry_signaling_sequence is not None and registry.base_registry_signaling_sequence != r:
changed = True
_logger.info("Reloading the model registry after database signaling.")
registry = cls.new(db_name)
# Check if the model caches must be invalidated (e.g. after a write
# occured on another process). Don't clear right after a registry
# has been reload.
elif registry.base_cache_signaling_sequence is not None and registry.base_cache_signaling_sequence != c:
changed = True
_logger.info("Invalidating all model caches after database signaling.")
registry.clear_caches()
registry.reset_any_cache_cleared()
# One possible reason caches have been invalidated is the
# use of decimal_precision.write(), in which case we need
# to refresh fields.float columns.
for model in registry.models.values():
for column in model._columns.values():
if hasattr(column, 'digits_change'):
column.digits_change(cr)
registry.base_registry_signaling_sequence = r
registry.base_cache_signaling_sequence = c
finally:
cr.close()
return changed
@classmethod
def signal_caches_change(cls, db_name):
if openerp.multi_process and db_name in cls.registries:
# Check the registries if any cache has been cleared and signal it
# through the database to other processes.
registry = cls.get(db_name)
if registry.any_cache_cleared():
_logger.info("At least one model cache has been cleared, signaling through the database.")
cr = registry.cursor()
r = 1
try:
cr.execute("select nextval('base_cache_signaling')")
r = cr.fetchone()[0]
finally:
cr.close()
registry.base_cache_signaling_sequence = r
registry.reset_any_cache_cleared()
@classmethod
def signal_registry_change(cls, db_name):
if openerp.multi_process and db_name in cls.registries:
_logger.info("Registry changed, signaling through the database")
registry = cls.get(db_name)
cr = registry.cursor()
r = 1
try:
cr.execute("select nextval('base_registry_signaling')")
r = cr.fetchone()[0]
finally:
cr.close()
registry.base_registry_signaling_sequence = r
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 8,535,521,494,388,671,000 | 8,223,570,994,028,910,000 | 38.749425 | 126 | 0.592563 | false |
iut-ibk/P8-WSC-GUI | 3dparty/Editra/src/extern/dexml/_util.py | 1 | 4954 | import copy
class Error(Exception):
"""Base exception class for the dexml module."""
pass
class ParseError(Error):
"""Exception raised when XML could not be parsed into objects."""
pass
class RenderError(Error):
"""Exception raised when object could not be rendered into XML."""
pass
class XmlError(Error):
"""Exception raised to encapsulate errors from underlying XML parser."""
pass
class PARSE_DONE:
"""Constant returned by a Field when it has finished parsing."""
pass
class PARSE_MORE:
"""Constant returned by a Field when it wants additional nodes to parse."""
pass
class PARSE_SKIP:
"""Constant returned by a Field when it cannot parse the given node."""
pass
class PARSE_CHILDREN:
"""Constant returned by a Field to parse children from its container tag."""
pass
class Meta:
"""Class holding meta-information about a dexml.Model subclass.
Each dexml.Model subclass has an attribute 'meta' which is an instance
of this class. That instance holds information about how to model
corresponds to XML, such as its tagname, namespace, and error handling
semantics. You would not ordinarily create an instance of this class;
instead let the ModelMetaclass create one automatically.
These attributes control how the model corresponds to the XML:
* tagname: the name of the tag representing this model
* namespace: the XML namespace in which this model lives
These attributes control parsing/rendering behaviour:
* namespace_prefix: the prefix to use for rendering namespaced tags
* ignore_unknown_elements: ignore unknown elements when parsing
* case_sensitive: match tag/attr names case-sensitively
* order_sensitive: match child tags in order of field definition
"""
_defaults = {"tagname":None,
"namespace":None,
"namespace_prefix":None,
"ignore_unknown_elements":True,
"case_sensitive":True,
"order_sensitive":True}
def __init__(self,name,meta_attrs):
for (attr,default) in self._defaults.items():
setattr(self,attr,meta_attrs.get(attr,default))
if self.tagname is None:
self.tagname = name
def _meta_attributes(meta):
"""Extract attributes from a "meta" object."""
meta_attrs = {}
if meta:
for attr in dir(meta):
if not attr.startswith("_"):
meta_attrs[attr] = getattr(meta,attr)
return meta_attrs
class ModelMetaclass(type):
"""Metaclass for dexml.Model and subclasses.
This metaclass is responsible for introspecting Model class definitions
and setting up appropriate default behaviours. For example, this metaclass
sets a Model's default tagname to be equal to the declared class name.
"""
instances = {}
def __new__(mcls,name,bases,attrs):
cls = super(ModelMetaclass,mcls).__new__(mcls,name,bases,attrs)
# Don't do anything if it's not a subclass of Model
parents = [b for b in bases if isinstance(b, ModelMetaclass)]
# HACK
import fields
if not parents:
return cls
# Set up the cls.meta object, inheriting from base classes
meta_attrs = {}
for base in bases:
if isinstance(base,ModelMetaclass) and hasattr(base,"meta"):
meta_attrs.update(_meta_attributes(base.meta))
meta_attrs.pop("tagname",None)
meta_attrs.update(_meta_attributes(attrs.get("meta",None)))
cls.meta = Meta(name,meta_attrs)
# Create ordered list of field objects, telling each about their
# name and containing class. Inherit fields from base classes
# only if not overridden on the class itself.
base_fields = {}
for base in bases:
if not isinstance(base,ModelMetaclass):
continue
for field in base._fields:
if field.field_name not in base_fields:
field = copy.copy(field)
field.model_class = cls
base_fields[field.field_name] = field
cls_fields = []
for (name,value) in attrs.iteritems():
if isinstance(value,fields.Field):
base_fields.pop(name,None)
value.field_name = name
value.model_class = cls
cls_fields.append(value)
cls._fields = base_fields.values() + cls_fields
cls._fields.sort(key=lambda f: f._order_counter)
# Register the new class so we can find it by name later on
mcls.instances[(cls.meta.namespace,cls.meta.tagname)] = cls
return cls
@classmethod
def find_class(mcls,tagname,namespace=None):
"""Find dexml.Model subclass for the given tagname and namespace."""
return mcls.instances.get((namespace,tagname))
| gpl-2.0 | -772,129,963,646,064,600 | -9,125,006,998,751,099,000 | 36.530303 | 80 | 0.636455 | false |
jimi-c/ansible | lib/ansible/modules/network/avi/avi_sslkeyandcertificate.py | 20 | 5892 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_sslkeyandcertificate
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of SSLKeyAndCertificate Avi RESTful Object
description:
- This module is used to configure SSLKeyAndCertificate object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
ca_certs:
description:
- Ca certificates in certificate chain.
certificate:
description:
- Sslcertificate settings for sslkeyandcertificate.
required: true
certificate_management_profile_ref:
description:
- It is a reference to an object of type certificatemanagementprofile.
created_by:
description:
- Creator name.
dynamic_params:
description:
- Dynamic parameters needed for certificate management profile.
enckey_base64:
description:
- Encrypted private key corresponding to the private key (e.g.
- Those generated by an hsm such as thales nshield).
enckey_name:
description:
- Name of the encrypted private key (e.g.
- Those generated by an hsm such as thales nshield).
hardwaresecuritymodulegroup_ref:
description:
- It is a reference to an object of type hardwaresecuritymodulegroup.
key:
description:
- Private key.
key_params:
description:
- Sslkeyparams settings for sslkeyandcertificate.
name:
description:
- Name of the object.
required: true
status:
description:
- Enum options - ssl_certificate_finished, ssl_certificate_pending.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_CERTIFICATE_FINISHED.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Enum options - ssl_certificate_type_virtualservice, ssl_certificate_type_system, ssl_certificate_type_ca.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_CERTIFICATE_TYPE_VIRTUALSERVICE.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a SSL Key and Certificate
avi_sslkeyandcertificate:
controller: 10.10.27.90
username: admin
password: AviNetworks123!
key: |
-----BEGIN PRIVATE KEY-----
....
-----END PRIVATE KEY-----
certificate:
self_signed: true
certificate: |
-----BEGIN CERTIFICATE-----
....
-----END CERTIFICATE-----
type: SSL_CERTIFICATE_TYPE_VIRTUALSERVICE
name: MyTestCert
"""
RETURN = '''
obj:
description: SSLKeyAndCertificate (api/sslkeyandcertificate) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
ca_certs=dict(type='list',),
certificate=dict(type='dict', required=True),
certificate_management_profile_ref=dict(type='str',),
created_by=dict(type='str',),
dynamic_params=dict(type='list',),
enckey_base64=dict(type='str',),
enckey_name=dict(type='str',),
hardwaresecuritymodulegroup_ref=dict(type='str',),
key=dict(type='str', no_log=True,),
key_params=dict(type='dict',),
name=dict(type='str', required=True),
status=dict(type='str',),
tenant_ref=dict(type='str',),
type=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'sslkeyandcertificate',
set(['key']))
if __name__ == '__main__':
main()
| gpl-3.0 | -3,616,715,411,615,654,000 | -1,966,255,721,316,874,200 | 32.862069 | 136 | 0.613714 | false |
jameslovejoy/apportionment | scripts/apportion.py | 1 | 1096 | import math
class Apportion:
populations = {}
seats = {}
def __init__(self):
f = open('../data/2010.csv', 'r')
for line in f:
state, pop = [s.strip() for s in line.split(',')]
self.seats[state] = 1
self.populations[state] = int(pop.strip())
@classmethod
def find_highest_priority(cls):
highest = 0
highest_state = None
for state in cls.populations:
n = cls.seats[state]
priority = cls.populations[state] / math.sqrt(n*(n+1))
if priority > highest:
highest = priority
highest_state = state
return highest_state
@classmethod
def run(cls):
# 435 seats: Every state gets 1 to start, leaving 385 left to apportion.
for n in range(385):
state = cls.find_highest_priority()
cls.seats[state] += 1
seat_number = 51 + n
print "Assigning Seat {} to {}".format(seat_number, state)
print "Just missed the cut..."
state = cls.find_highest_priority()
print "Seat 436 would be assigned to {}".format(state)
for state in sorted(cls.seats):
print("{}\t{}").format(state.rjust(20), str(cls.seats[state]).rjust(3))
Apportion().run() | mit | 9,031,649,650,366,959,000 | 7,728,455,417,453,197,000 | 22.340426 | 74 | 0.64781 | false |
pasmod/simurg | simurg/scrapper/template.py | 1 | 4965 | from selector_finder import find_selector
from dragnet import content_extractor
from collections import OrderedDict
from unidecode import unidecode
from bs4 import BeautifulSoup
from simurg.clients.fetcher import fetch
from simurg.util import is_valid
import logging
import os.path
import time
import re
def clean_soup(soup):
"""Removes some elements that may negatively affect the
quality of headline extraction
# Arguments
soup: parsed html document
"""
exclude_tags = ['style', 'script', '[document]', 'head', 'title']
[s.extract() for s in soup(exclude_tags)]
def find_headline_element(soup, headline):
"""Finds the headline element on a page based on a headline hint.
# Argument
soup: parsed html page
headline: headline hint to be used
# Returns
el: headline element (None if not found)
"""
clean_soup(soup)
# headline sometimes contains "..." at the end. We eliminate it.
headline = headline[:-4]
if ':' in headline:
headline = headline.split(':')[1]
elems = soup(text=re.compile(re.escape(headline)))
d = {}
for el in elems:
d[el.parent] = el.parent.text.strip()
headline_elems = sorted(d, key=lambda k: len(d[k]))
if len(headline_elems) > 0:
return headline_elems
logging.debug('Headline "{}" not found'.format(unidecode(headline)))
return None
def append_html(news, redis_client):
"""Appends an html field to the news, only if the wayback_url is valid and
the url does not already exist in the database.
# Arguments
news: news object as dictionary
# Returns
news: news object with or without html field
"""
if is_valid(news, field='wayback_url'):
fetch_url = news['wayback_url']
else:
fetch_url = news['url']
if not redis_client.exists(news['url']):
news['html'] = fetch(fetch_url)
return news
logging.info('Skipping duplicate url: {}'.format(news['url']))
return news
def append_headline_selector(news):
"""Appends the headline css selector field to the news, only if the html
field exists and is valid.
# Arguments
news: news object as dictionary
# Returns
news: news object with or without headline css selector field
"""
if is_valid(news, field='html'):
soup = BeautifulSoup(news['html'], 'html.parser')
headline_elems = find_headline_element(soup, news['headline'])
if headline_elems:
news['headline_selector'] = find_selector(soup, headline_elems)
return news
logging.debug('Headline css selector could not be found!')
else:
logging.debug('Fetching html page failed. url={}'.
format(news['url']))
return news
def get_base_url(lang='de'):
"""Return the google news url for a specific language
# Arguments
lang: required language for google news
# Returns
url: corresponding google news url for the given language
"""
if lang == 'de':
return 'http://news.google.com/news?ned=de'
if lang == 'en':
return 'http://news.google.com/news?ned=us'
if lang == 'fr':
return 'https://news.google.com/news?ned=fr'
if lang == 'it':
return 'https://news.google.com/news?ned=it'
else:
raise ValueError('unsupported language {}'.format(lang))
def populate(redis_client):
"""Populates the entries in the database with fields such as headline,
body, html and url
# Arguments
lang: language of the database
# Returns
news: news objects populated with required fields
"""
keys = redis_client.keys()
folder = 'docs/{}/'.format(redis_client.lang)
for key in keys:
value = redis_client.get(key)
f = folder + value['id'] + '.json'
if os.path.isfile(f):
logging.info('Skipping existing document: {}'.format(f))
continue
if value['wayback_url'] == 'None':
html = fetch(value['url'])
else:
html = fetch(value['wayback_url'])
time.sleep(1)
if html:
soup = BeautifulSoup(html, 'html.parser')
else:
continue
headline_elems = soup.select(value['headline_selector'], None)
if len(headline_elems) > 0:
headline = headline_elems[0].text.strip()
else:
logging.debug('Headline can not be refound: url={}, selector={}'
.format(value['url'], value['headline_selector']))
continue
news = OrderedDict()
news['id'] = value['id']
news['timestamp'] = value['timestamp']
news['lang'] = redis_client.lang
news['url'] = value['url']
news['wayback_url'] = value['wayback_url']
news['headline'] = headline.strip()
news['body'] = content_extractor.analyze(html).strip()
yield news
| mit | -1,377,423,109,789,908,200 | 7,906,435,386,137,959,000 | 30.424051 | 78 | 0.611682 | false |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/setuptools/command/test.py | 33 | 8865 | import os
import operator
import sys
import contextlib
import itertools
from distutils.errors import DistutilsError, DistutilsOptionError
from distutils import log
from unittest import TestLoader
from setuptools.extern import six
from setuptools.extern.six.moves import map, filter
from pkg_resources import (resource_listdir, resource_exists, normalize_path,
working_set, _namespace_packages,
add_activation_listener, require, EntryPoint)
from setuptools import Command
from setuptools.py31compat import unittest_main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module, pattern=None):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
# adapted from jaraco.classes.properties:NonDataProperty
class NonDataProperty(object):
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self.fget(obj)
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
('test-suite=', 's',
"Run single test, case or suite (e.g. 'module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite and self.test_module:
msg = "You may specify a module or a suite, but not both"
raise DistutilsOptionError(msg)
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
@NonDataProperty
def test_args(self):
return list(self._test_args())
def _test_args(self):
if self.verbose:
yield '--verbose'
if self.test_suite:
yield self.test_suite
def with_project_on_sys_path(self, func):
"""
Backward compatibility for project_on_sys_path context.
"""
with self.project_on_sys_path():
func()
@contextlib.contextmanager
def project_on_sys_path(self, include_dists=[]):
with_2to3 = six.PY3 and getattr(self.distribution, 'use_2to3', False)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
project_path = normalize_path(ei_cmd.egg_base)
sys.path.insert(0, project_path)
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
with self.paths_on_pythonpath([project_path]):
yield
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
@staticmethod
@contextlib.contextmanager
def paths_on_pythonpath(paths):
"""
Add the indicated paths to the head of the PYTHONPATH environment
variable so that subprocesses will also see the packages at
these paths.
Do this in a context that restores the value on exit.
"""
nothing = object()
orig_pythonpath = os.environ.get('PYTHONPATH', nothing)
current_pythonpath = os.environ.get('PYTHONPATH', '')
try:
prefix = os.pathsep.join(paths)
to_join = filter(None, [prefix, current_pythonpath])
new_path = os.pathsep.join(to_join)
if new_path:
os.environ['PYTHONPATH'] = new_path
yield
finally:
if orig_pythonpath is nothing:
os.environ.pop('PYTHONPATH', None)
else:
os.environ['PYTHONPATH'] = orig_pythonpath
@staticmethod
def install_dists(dist):
"""
Install the requirements indicated by self.distribution and
return an iterable of the dists that were built.
"""
ir_d = dist.fetch_build_eggs(dist.install_requires or [])
tr_d = dist.fetch_build_eggs(dist.tests_require or [])
return itertools.chain(ir_d, tr_d)
def run(self):
installed_dists = self.install_dists(self.distribution)
cmd = ' '.join(self._argv)
if self.dry_run:
self.announce('skipping "%s" (dry run)' % cmd)
return
self.announce('running "%s"' % cmd)
paths = map(operator.attrgetter('location'), installed_dists)
with self.paths_on_pythonpath(paths):
with self.project_on_sys_path():
self.run_tests()
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if six.PY3 and getattr(self.distribution, 'use_2to3', False):
module = self.test_suite.split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
exit_kwarg = {} if sys.version_info < (2, 7) else {"exit": False}
test = unittest_main(
None, None, self._argv,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
**exit_kwarg
)
if not test.result.wasSuccessful():
msg = 'Test failed: %s' % test.result
self.announce(msg, log.ERROR)
raise DistutilsError(msg)
@property
def _argv(self):
return ['unittest'] + self.test_args
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.resolve()()
| gpl-3.0 | -8,062,828,841,724,871,000 | 2,998,261,229,861,099,500 | 33.901575 | 79 | 0.576086 | false |
misterdanb/midi.py | midi.py | 1 | 21677 | import sys
import os
import struct
import array
from enum import Enum
def bytes_to_uint16(byte_list):
return struct.unpack('>H', byte_list[:4])[0]
def uint16_to_bytes(value):
return struct.pack('>H', value)
def bytes_to_uint24(byte_list):
return struct.unpack('>I', b'\x00' + byte_list[:3])[0]
def uint24_to_bytes(value):
return struct.pack('>I', value)[1:4]
def bytes_to_uint32(byte_list):
return struct.unpack('>I', byte_list[:4])[0]
def uint32_to_bytes(value):
return struct.pack('>I', value)
def bytes_to_str(byte_list):
return byte_list.decode('utf-8')
def str_to_bytes(value):
return value.encode('utf-8')
def enum_values(enum):
return list(map(lambda x: x.value, enum))
def enum_names(enum):
return list(map(lambda x: x.name, enum))
def decode_variable_length_value(byte_list):
value = 0
tmp_pos = 0
while byte_list[tmp_pos] & 0b10000000 != 0:
value_part = byte_list[tmp_pos] & 0b01111111
value |= value_part
value <<= 7
tmp_pos += 1
value_part = byte_list[tmp_pos] & 0b01111111
value |= value_part
tmp_pos += 1
return(value, tmp_pos)
def encode_variable_length_value(value):
bytes_repr = bytearray()
bytes_repr.insert(0, value & 0b01111111)
value >>= 7
while value & 0b01111111 != 0:
bytes_repr.insert(0, (value & 0b01111111) | 0b10000000)
value >>= 7
return(bytes(bytes_repr))
class MidiException(Exception):
pass
class MidiFile():
def __init__(self, path):
self.path = path
self.chunks = []
try:
with open(path, 'rb') as midi_file:
midi_data = midi_file.read()
file_pos = 0
while file_pos < len(midi_data):
new_chunk = Chunk(midi_data[file_pos:])
self.chunks.append(new_chunk)
file_pos += 8 + new_chunk.length
except:
raise(MidiException('Could not open midi file'))
def __iter__(self):
for chunk in self.chunks:
yield(chunk)
def __repr__(self):
return('<File: ' + self.path + '>')
def export(self, path='out.mid'):
with open(path, 'wb') as midi_file:
for chunk in self.chunks:
midi_file.write(chunk.to_bytes())
class ChunkType(Enum):
m_thd = 'MThd'
m_trk = 'MTrk'
class Chunk():
def __init__(self, byte_list):
self.chunk_type = ChunkType(bytes_to_str(byte_list[:4]))
self.length = bytes_to_uint32(byte_list[4:8])
if self.chunk_type == ChunkType.m_thd:
if self.length == 6:
self.file_format = bytes_to_uint16(byte_list[8:10])
self.tracks_count = bytes_to_uint16(byte_list[10:12])
self.division = bytes_to_uint16(byte_list[12:14])
else:
raise(MidiException('Invalid MThd chunk'))
elif self.chunk_type == ChunkType.m_trk:
self.mtrk_events = []
tmp_pos = 8
while tmp_pos < 8 + self.length:
new_mtrk_event = MTrkEvent(byte_list[tmp_pos:])
self.mtrk_events.append(new_mtrk_event)
tmp_pos += new_mtrk_event.length
def __iter__(self):
if self.chunk_type == ChunkType.m_thd:
yield(None)
else:
for mtrk_event in self.mtrk_events:
yield(mtrk_event)
def __repr__(self):
if self.chunk_type == ChunkType.m_thd:
return('<Chunk Type: ' + self.chunk_type.name + ', ' +
'Length: ' + str(self.length) + ', ' +
'File format: ' + str(self.file_format) + ', ' +
'Tracks count: ' + str(self.tracks_count) + ', ' +
'Division: ' + str(self.division) + '>')
elif self.chunk_type == ChunkType.m_trk:
return('<Chunk Type: ' + self.chunk_type.name + '. ' +
'Length: ' + str(self.length) + '>')
def to_bytes(self):
bytes_repr = bytearray()
bytes_repr += str_to_bytes(self.chunk_type.value);
bytes_repr += uint32_to_bytes(self.length);
if self.chunk_type == ChunkType.m_thd:
bytes_repr += uint16_to_bytes(self.file_format)
bytes_repr += uint16_to_bytes(self.tracks_count)
bytes_repr += uint16_to_bytes(self.division)
elif self.chunk_type == ChunkType.m_trk:
for mtrk_event in self.mtrk_events:
bytes_repr += mtrk_event.to_bytes()
return(bytes(bytes_repr))
class MTrkEvent():
def __init__(self, byte_list):
self.delta_time, self.length = decode_variable_length_value(byte_list)
tmp_pos = self.length
event_code = byte_list[tmp_pos]
if (event_code & 0b11110000) in enum_values(MidiEventType):
self.event = MidiEvent(byte_list[tmp_pos:])
elif event_code in enum_values(SystemEventType):
self.event = SystemEvent(byte_list[tmp_pos:])
elif event_code == 0b11111111:
self.event = MetaEvent(byte_list[tmp_pos:])
else:
raise(MidiException('No such event'))
self.length += self.event.length
def __repr__(self):
return('<Delta time: ' + str(self.delta_time) + ', ' +
'Event: ' + self.event.__class__.__name__ + '>')
def to_bytes(self):
bytes_repr = bytearray()
bytes_repr += encode_variable_length_value(self.delta_time)
bytes_repr += self.event.to_bytes()
return(bytes(bytes_repr))
class MidiEventType(Enum):
note_off = 0b10000000
note_on = 0b10010000
note_pressure = 0b10100000
control_change = 0b10110000
program_change = 0b11000000
channel_pressure = 0b11010000
pitch_change = 0b11100000
class MidiEvent():
def __init__(self, byte_list):
try:
self.event_type = MidiEventType(byte_list[0] & 0b11110000)
self.channel_number = byte_list[0] & 0b00001111
if self.event_type == MidiEventType.note_off or \
self.event_type == MidiEventType.note_on:
self.note = byte_list[1]
self.velocity = byte_list[2]
self.length = 3
elif self.event_type == MidiEventType.note_pressure:
self.note = byte_list[1]
self.pressure = byte_list[2]
self.length = 3
elif self.event_type == MidiEventType.control_change:
self.control_number = byte_list[1]
self.new_value = byte_list[2]
self.length = 3
elif self.event_type == MidiEventType.program_change:
self.program_number = byte_list[1]
self.length = 2
elif self.event_type == MidiEventType.channel_pressure:
self.channel_pressure = byte_list[1]
self.length = 2
elif self.event_type == MidiEventType.pitch_change:
self.bottom = byte_list[1]
self.next_value = byte_list[2]
self.length = 3
except ValueError:
raise(MidiException('No such midi event type'))
def __repr__(self):
if self.event_type == MidiEventType.note_off or \
self.event_type == MidiEventType.note_on:
return('<Midi event type: ' + self.event_type.name + ', ' +
'Channel number: ' + str(self.channel_number) + ', ' +
'Note number: ' + str(self.note) + ', ' +
'Velocity: ' + str(self.velocity) + '>')
elif self.event_type == MidiEventType.note_pressure:
return('<Midi event type: ' + self.event_type.name + ', ' +
'Channel number: ' + str(self.channel_number) + ', ' +
'Note number: ' + str(self.note) + ', ' +
'Pressure: ' + str(self.pressure) + '>')
elif self.event_type == MidiEventType.control_change:
return('<Midi event type: ' + self.event_type.name + '. ' +
'Channel number: ' + str(self.channel_number) + ', ' +
'Controller number: ' + str(self.control_number) + ', ' +
'New Value: ' + str(self.new_value) + '>')
elif self.event_type == MidiEventType.program_change:
return('<Midi event type: ' + self.event_type.name + ', ' +
'Channel number: ' + str(self.channel_number) + ', ' +
'New program number: ' + str(self.program_number) + '>')
elif self.event_type == MidiEventType.channel_pressure:
return('<Midi event type: ' + self.event_type.name + ', ' +
'Channel number: ' + str(self.channel_number) + ', ' +
'Pressure: ' + str(self.channel_pressure) + '>')
elif self.event_type == MidiEventType.pitch_change:
return('<Midi event type: ' + self.event_type.name + ', ' +
'Channel: ' + str(self.channel_number) + ', ' +
'Bottom: ' + str(self.bottom) + ', ' +
'Next Value: ' + str(self.next_value) + '>')
def to_bytes(self):
bytes_repr = bytearray()
bytes_repr.append(self.event_type.value | self.channel_number)
if self.event_type == MidiEventType.note_off or \
self.event_type == MidiEventType.note_on:
bytes_repr.append(self.note)
bytes_repr.append(self.velocity)
elif self.event_type == MidiEventType.note_pressure:
bytes_repr.append(self.note)
bytes_repr.append(self.pressure)
elif self.event_type == MidiEventType.control_change:
bytes_repr.append(self.control_number)
bytes_repr.append(self.new_value)
elif self.event_type == MidiEventType.program_change:
bytes_repr.append(self.program_number)
elif self.event_type == MidiEventType.channel_pressure:
bytes_repr.append(self.channel_pressure)
elif self.event_type == MidiEventType.pitch_change:
bytes_repr.append(self.bottom)
bytes_repr.append(self.next_value)
return(bytes(bytes_repr))
class SystemEventType(Enum):
exclusive = 0b11110000
common_song_position = 0b11110010
common_song_select = 0b11110011
common_tune_request = 0b11110110
common = 0b11110111
real_time_timing_clock = 0b11111000
real_time_start = 0b11111010
real_time_continue = 0b11111011
real_time_stop = 0b11111100
real_time_active_sensing = 0b11111110
class SystemEvent():
def __init__(self, byte_list):
try:
self.event_type = SystemEventType(byte_list[0])
if self.event_type == SystemEventType.exclusive or \
self.event_type == SystemEventType.common:
self.length = 2
tmp_pos = 1
while byte_list[tmp_pos] != SystemEventType.common.value:
tmp_pos += 1
self.length += 1
self.payload = byte_list[1:self.length - 1]
elif self.event_type == SystemEventType.common_song_position:
self.length = 3
elif self.event_type == SystemEventType.common_song_select:
self.length = 2
elif self.event_type == SystemEventType.common_tune_request:
self.length = 1
elif self.event_type == SystemEventType.real_time_timing_clock:
self.length = 1
elif self.event_type == SystemEventType.real_time_start:
self.length = 1
elif self.event_type == SystemEventType.real_time_continue:
self.length = 1
elif self.event_type == SystemEventType.real_time_stop:
self.length = 1
elif self.event_type == SystemEventType.real_time_active_sensing:
self.length = 1
elif self.event_type == SystemEventType.real_time_reset:
self.length = 1
except ValueError:
raise(MidiException('No such system event type'))
def __repr__(self):
if self.event_type == SystemEventType.exclusive or \
self.event_type == SystemEventType.common:
return('<System event type: ' + self.event_type.name + ', ' +
'Payload: ' + str(self.payload) + '>')
else:
return('<System event type: ' + self.event_type.name + '>')
def to_bytes(self):
bytes_repr = bytearray()
bytes_repr.append(self.event_type.value)
if self.event_type == SystemEventType.exclusive or \
self.event_type == SystemEventType.common:
bytes_repr += self.payload
bytes_repr.append(SystemEventType.common.value)
elif self.event_type == SystemEventType.common_song_position:
# todo
bytes_repr.append(0)
bytes_repr.append(0)
elif self.event_type == SystemEventType.common_song_select:
# todo
bytes_repr.append(0)
elif self.event_type == SystemEventType.common_tune_request:
pass
elif self.event_type == SystemEventType.real_time_timing_clock:
pass
elif self.event_type == SystemEventType.real_time_start:
pass
elif self.event_type == SystemEventType.real_time_continue:
pass
elif self.event_type == SystemEventType.real_time_stop:
pass
elif self.event_type == SystemEventType.real_time_active_sensing:
pass
elif self.event_type == SystemEventType.real_time_reset:
pass
return(bytes(bytes_repr))
class MetaEventType(Enum):
sequence_number = 0b00000000
text = 0b00000001
copyright_notice = 0b00000010
text_sequence_or_track_name = 0b00000011
instrument_name = 0b00000100
lyric = 0b00000101
marker = 0b0000110
cue_point = 0b00000111
channel_prefix = 0b00100000
end_of_track = 0b00101111
tempo = 0b01010001
smpte_offset = 0b01010100
time_signature = 0b01011000
key_signature = 0b01011001
sequencer_specific_payload = 0b01111111
class MetaEvent():
def __init__(self, byte_list):
if byte_list[0] == 0b11111111:
try:
self.event_type = MetaEventType(byte_list[1])
self.payload_length, self.length = decode_variable_length_value(byte_list[2:])
tmp_pos = 2 + self.length
payload = byte_list[tmp_pos:tmp_pos + self.payload_length]
if self.event_type == MetaEventType.sequence_number:
self.sequence_number = bytes_to_uint16(payload)
elif self.event_type == MetaEventType.text:
self.text = bytes_to_str(payload)
elif self.event_type == MetaEventType.copyright_notice:
self.copyright_notice = bytes_to_str(payload)
elif self.event_type == MetaEventType.text_sequence_or_track_name:
self.text_sequence_or_track_name = bytes_to_str(payload)
elif self.event_type == MetaEventType.instrument_name:
self.instrument_name = bytes_to_str(payload)
elif self.event_type == MetaEventType.lyric:
self.lyric = bytes_to_str(payload)
elif self.event_type == MetaEventType.marker:
self.marker = bytes_to_str(payload)
elif self.event_type == MetaEventType.cue_point:
self.cue_point = bytes_to_str(payload)
elif self.event_type == MetaEventType.channel_prefix:
self.channel_prefix = payload[0]
elif self.event_type == MetaEventType.end_of_track:
pass
elif self.event_type == MetaEventType.tempo:
self.tempo = bytes_to_uint24(payload)
elif self.event_type == MetaEventType.smpte_offset:
self.smpte_offset = payload
elif self.event_type == MetaEventType.time_signature:
self.time_signature = payload
elif self.event_type == MetaEventType.key_signature:
self.key_signature = payload
elif self.event_type == MetaEventType.sequencer_specific_payload:
self.sequencer_specific_payload = payload
self.length += 2 + self.payload_length
except:
raise(MidiException('No such meta event'))
else:
raise(MidiException('Not a meta event'))
def __repr__(self):
if self.event_type == MetaEventType.sequence_number:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Sequence number: ' + str(self.sequence_number) + '>')
elif self.event_type == MetaEventType.text:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Text: ' + self.text + '>')
elif self.event_type == MetaEventType.copyright_notice:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Copyright notice: ' + self.copyright_notice + '>')
elif self.event_type == MetaEventType.text_sequence_or_track_name:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Text sequence or track name: ' + self.text_sequence_or_track_name + '>')
elif self.event_type == MetaEventType.instrument_name:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Instrument name: ' + self.instrument_name + '>')
elif self.event_type == MetaEventType.lyric:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Lyric: ' + self.lyric + '>')
elif self.event_type == MetaEventType.marker:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Marker: ' + self.marker + '>')
elif self.event_type == MetaEventType.cue_point:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Cue point: ' + self.cue_point + '>')
elif self.event_type == MetaEventType.channel_prefix:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Channel prefix: ' + str(self.channel_prefix) + '>')
elif self.event_type == MetaEventType.end_of_track:
return('<Meta event type: ' + self.event_type.name + '>')
elif self.event_type == MetaEventType.tempo:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Tempo: ' + str(self.tempo) + '>')
elif self.event_type == MetaEventType.smpte_offset:
return('<Meta event type: ' + self.event_type.name + ', ' +
'SMPTE offset: ' + str(self.smpte_offset) + '>')
elif self.event_type == MetaEventType.time_signature:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Time signature: ' + str(self.time_signature) + '>')
elif self.event_type == MetaEventType.key_signature:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Key signature: ' + str(self.key_signature) + '>')
elif self.event_type == MetaEventType.sequencer_specific_payload:
return('<Meta event type: ' + self.event_type.name + ', ' +
'Sequencer specific payload: ' + str(self.sequencer_specific_payload) + '>')
def to_bytes(self):
bytes_repr = bytearray()
bytes_repr.append(0b11111111)
bytes_repr.append(self.event_type.value)
bytes_repr += encode_variable_length_value(self.payload_length)
if self.event_type == MetaEventType.sequence_number:
bytes_repr += uint16_to_bytes(self.sequence_number)
elif self.event_type == MetaEventType.text:
bytes_repr += str_to_bytes(self.text)
elif self.event_type == MetaEventType.copyright_notice:
bytes_repr += str_to_bytes(self.copyright_noticed)
elif self.event_type == MetaEventType.text_sequence_or_track_name:
bytes_repr += str_to_bytes(self.text_sequence_or_track_name)
elif self.event_type == MetaEventType.instrument_name:
bytes_repr += str_to_bytes(self.instrument_name)
elif self.event_type == MetaEventType.lyric:
bytes_repr += str_to_bytes(self.lyric)
elif self.event_type == MetaEventType.marker:
bytes_repr += str_to_bytes(self.marker)
elif self.event_type == MetaEventType.cue_point:
bytes_repr += str_to_bytes(self.cue_point)
elif self.event_type == MetaEventType.channel_prefix:
# this is not looking too safe
bytes_repr.append(self.channel_prefix)
elif self.event_type == MetaEventType.end_of_track:
pass
elif self.event_type == MetaEventType.tempo:
bytes_repr += uint24_to_bytes(self.tempo)
elif self.event_type == MetaEventType.smpte_offset:
bytes_repr += self.smpte_offset
elif self.event_type == MetaEventType.time_signature:
bytes_repr += self.time_signature
elif self.event_type == MetaEventType.key_signature:
bytes_repr += self.key_signature
elif self.event_type == MetaEventType.sequencer_specific_payload:
bytes_repr += self.sequencer_specific_payload
return(bytes(bytes_repr))
| gpl-3.0 | -2,492,258,582,125,247,500 | 2,765,832,044,542,811,600 | 39.068392 | 95 | 0.565069 | false |
braams/shtoom | shtoom/ui/qtui/shtoommainwindow.py | 1 | 7392 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'shtoommainwindow.ui'
#
# Created: Wed Jan 19 15:16:20 2005
# by: The PyQt User Interface Compiler (pyuic) 3.13
#
# WARNING! All changes made in this file will be lost!
from qt import *
class ShtoomMainWindow(QMainWindow):
def __init__(self,parent = None,name = None,fl = 0):
QMainWindow.__init__(self,parent,name,fl)
self.statusBar()
if not name:
self.setName("ShtoomMainWindow")
self.setCentralWidget(QWidget(self,"qt_central_widget"))
ShtoomMainWindowLayout = QVBoxLayout(self.centralWidget(),11,6,"ShtoomMainWindowLayout")
layout4 = QHBoxLayout(None,0,6,"layout4")
self.textLabel1 = QLabel(self.centralWidget(),"textLabel1")
self.textLabel1.setAlignment(QLabel.AlignVCenter | QLabel.AlignRight)
layout4.addWidget(self.textLabel1)
self.addressComboBox = QComboBox(0,self.centralWidget(),"addressComboBox")
self.addressComboBox.setSizePolicy(QSizePolicy(QSizePolicy.Expanding,QSizePolicy.Minimum,0,0,self.addressComboBox.sizePolicy().hasHeightForWidth()))
self.addressComboBox.setEditable(1)
layout4.addWidget(self.addressComboBox)
self.lookupButton = QPushButton(self.centralWidget(),"lookupButton")
self.lookupButton.setSizePolicy(QSizePolicy(QSizePolicy.Minimum,QSizePolicy.Minimum,0,0,self.lookupButton.sizePolicy().hasHeightForWidth()))
self.lookupButton.setMaximumSize(QSize(25,32767))
layout4.addWidget(self.lookupButton)
ShtoomMainWindowLayout.addLayout(layout4)
layout2 = QHBoxLayout(None,0,6,"layout2")
self.callButton = QPushButton(self.centralWidget(),"callButton")
self.callButton.setSizePolicy(QSizePolicy(QSizePolicy.Minimum,QSizePolicy.Minimum,0,0,self.callButton.sizePolicy().hasHeightForWidth()))
layout2.addWidget(self.callButton)
self.hangupButton = QPushButton(self.centralWidget(),"hangupButton")
self.hangupButton.setSizePolicy(QSizePolicy(QSizePolicy.Minimum,QSizePolicy.Minimum,0,0,self.hangupButton.sizePolicy().hasHeightForWidth()))
layout2.addWidget(self.hangupButton)
self.registerButton = QPushButton(self.centralWidget(),"registerButton")
self.registerButton.setSizePolicy(QSizePolicy(QSizePolicy.Minimum,QSizePolicy.Minimum,0,0,self.registerButton.sizePolicy().hasHeightForWidth()))
layout2.addWidget(self.registerButton)
ShtoomMainWindowLayout.addLayout(layout2)
self.statusLabel = QLabel(self.centralWidget(),"statusLabel")
ShtoomMainWindowLayout.addWidget(self.statusLabel)
self.fileDTMFAction = QAction(self,"fileDTMFAction")
self.fileDTMFAction.setEnabled(1)
self.fileDebugAction = QAction(self,"fileDebugAction")
self.fileDebugAction.setEnabled(1)
self.fileExitAction = QAction(self,"fileExitAction")
self.helpAboutAction = QAction(self,"helpAboutAction")
self.editPreferencesAction = QAction(self,"editPreferencesAction")
self.MenuBar = QMenuBar(self,"MenuBar")
self.fileMenu = QPopupMenu(self)
self.fileDTMFAction.addTo(self.fileMenu)
self.fileDebugAction.addTo(self.fileMenu)
self.fileMenu.insertSeparator()
self.fileExitAction.addTo(self.fileMenu)
self.MenuBar.insertItem(QString(""),self.fileMenu,1)
self.Edit = QPopupMenu(self)
self.editPreferencesAction.addTo(self.Edit)
self.MenuBar.insertItem(QString(""),self.Edit,2)
self.helpMenu = QPopupMenu(self)
self.helpAboutAction.addTo(self.helpMenu)
self.MenuBar.insertItem(QString(""),self.helpMenu,3)
self.languageChange()
self.resize(QSize(343,156).expandedTo(self.minimumSizeHint()))
self.clearWState(Qt.WState_Polished)
self.connect(self.fileDTMFAction,SIGNAL("activated()"),self.fileDTMF)
self.connect(self.fileDebugAction,SIGNAL("activated()"),self.fileDebugging)
self.connect(self.editPreferencesAction,SIGNAL("activated()"),self.editPreferences)
self.connect(self.fileExitAction,SIGNAL("activated()"),self.fileExit)
self.connect(self.helpAboutAction,SIGNAL("activated()"),self.helpAbout)
self.connect(self.callButton,SIGNAL("clicked()"),self.callButton_clicked)
self.connect(self.hangupButton,SIGNAL("clicked()"),self.hangupButton_clicked)
self.connect(self.registerButton,SIGNAL("clicked()"),self.registerButton_clicked)
self.connect(self.lookupButton,SIGNAL("clicked()"),self.lookupButton_clicked)
def languageChange(self):
self.setCaption(self.__tr("Shtoom"))
self.textLabel1.setText(self.__tr("Address"))
self.lookupButton.setText(self.__tr("..."))
self.callButton.setText(self.__tr("Call"))
self.hangupButton.setText(self.__tr("Hang Up"))
self.registerButton.setText(self.__tr("Register"))
self.statusLabel.setText(QString.null)
self.fileDTMFAction.setText(self.__tr("DTMF"))
self.fileDTMFAction.setMenuText(self.__tr("DTMF"))
self.fileDTMFAction.setToolTip(self.__tr("Show DTMF Window"))
self.fileDTMFAction.setAccel(self.__tr("Ctrl+D"))
self.fileDebugAction.setText(self.__tr("Debug Log"))
self.fileDebugAction.setMenuText(self.__tr("Debug Log"))
self.fileDebugAction.setToolTip(self.__tr("Show Debugging Log"))
self.fileDebugAction.setAccel(self.__tr("Ctrl+O"))
self.fileExitAction.setText(self.__tr("Exit"))
self.fileExitAction.setMenuText(self.__tr("Exit"))
self.fileExitAction.setAccel(QString.null)
self.helpAboutAction.setText(self.__tr("About"))
self.helpAboutAction.setMenuText(self.__tr("About"))
self.helpAboutAction.setAccel(QString.null)
self.editPreferencesAction.setText(self.__tr("Preferences"))
self.editPreferencesAction.setMenuText(self.__tr("Preferences"))
self.editPreferencesAction.setAccel(self.__tr("Ctrl+P"))
if self.MenuBar.findItem(1):
self.MenuBar.findItem(1).setText(self.__tr("File"))
if self.MenuBar.findItem(2):
self.MenuBar.findItem(2).setText(self.__tr("Edit"))
if self.MenuBar.findItem(3):
self.MenuBar.findItem(3).setText(self.__tr("Help"))
def fileDTMF(self):
print "ShtoomMainWindow.fileDTMF(): Not implemented yet"
def fileDebugging(self):
print "ShtoomMainWindow.fileDebugging(): Not implemented yet"
def fileExit(self):
print "ShtoomMainWindow.fileExit(): Not implemented yet"
def editPreferences(self):
print "ShtoomMainWindow.editPreferences(): Not implemented yet"
def helpAbout(self):
print "ShtoomMainWindow.helpAbout(): Not implemented yet"
def callButton_clicked(self):
print "ShtoomMainWindow.callButton_clicked(): Not implemented yet"
def hangupButton_clicked(self):
print "ShtoomMainWindow.hangupButton_clicked(): Not implemented yet"
def registerButton_clicked(self):
print "ShtoomMainWindow.registerButton_clicked(): Not implemented yet"
def lookupButton_clicked(self):
print "ShtoomMainWindow.lookupButton_clicked(): Not implemented yet"
def __tr(self,s,c = None):
return qApp.translate("ShtoomMainWindow",s,c)
| lgpl-2.1 | -1,418,590,952,217,392,000 | -651,073,081,880,459,900 | 43.263473 | 156 | 0.700352 | false |
hkariti/ansible | lib/ansible/modules/network/avi/avi_cluster.py | 26 | 3935 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cluster
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of Cluster Avi RESTful Object
description:
- This module is used to configure Cluster object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
name:
description:
- Name of the object.
required: true
nodes:
description:
- List of clusternode.
rejoin_nodes_automatically:
description:
- Re-join cluster nodes automatically in the event one of the node is reset to factory.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
virtual_ip:
description:
- A virtual ip address.
- This ip address will be dynamically reconfigured so that it always is the ip of the cluster leader.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create Cluster object
avi_cluster:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_cluster
"""
RETURN = '''
obj:
description: Cluster (api/cluster) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
name=dict(type='str', required=True),
nodes=dict(type='list',),
rejoin_nodes_automatically=dict(type='bool',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
virtual_ip=dict(type='dict',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cluster',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 | -8,221,304,636,462,987,000 | -3,814,945,574,266,707,000 | 30.733871 | 113 | 0.610165 | false |
jiahaoliang/group-based-policy | gbpservice/nfp/service_vendor_agents/haproxy/haproxy-agent/src/local.py | 101 | 1745 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Greenthread local storage of variables using weak references"""
import weakref
from eventlet import corolocal
class WeakLocal(corolocal.local):
def __getattribute__(self, attr):
rval = corolocal.local.__getattribute__(self, attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
# the weak reference and return the inner value here.
rval = rval()
return rval
def __setattr__(self, attr, value):
value = weakref.ref(value)
return corolocal.local.__setattr__(self, attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()
# A "weak" store uses weak references and allows an object to fall out of scope
# when it falls out of scope in the code that uses the thread local storage. A
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
strong_store = corolocal.local
| apache-2.0 | 4,687,735,678,810,720,000 | 5,540,456,532,889,170,000 | 35.354167 | 79 | 0.699713 | false |
barnsnake351/nova | nova/tests/unit/scheduler/filters/test_availability_zone_filters.py | 57 | 2170 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.scheduler.filters import availability_zone_filter
from nova import test
from nova.tests.unit.scheduler import fakes
@mock.patch('nova.scheduler.filters.utils.aggregate_metadata_get_by_host')
class TestAvailabilityZoneFilter(test.NoDBTestCase):
def setUp(self):
super(TestAvailabilityZoneFilter, self).setUp()
self.filt_cls = availability_zone_filter.AvailabilityZoneFilter()
@staticmethod
def _make_zone_request(zone):
return {
'context': mock.sentinel.ctx,
'request_spec': {
'instance_properties': {
'availability_zone': zone
}
}
}
def test_availability_zone_filter_same(self, agg_mock):
agg_mock.return_value = {'availability_zone': 'nova'}
request = self._make_zone_request('nova')
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(self.filt_cls.host_passes(host, request))
def test_availability_zone_filter_same_comma(self, agg_mock):
agg_mock.return_value = {'availability_zone': 'nova,nova2'}
request = self._make_zone_request('nova')
host = fakes.FakeHostState('host1', 'node1', {})
self.assertTrue(self.filt_cls.host_passes(host, request))
def test_availability_zone_filter_different(self, agg_mock):
agg_mock.return_value = {'availability_zone': 'nova'}
request = self._make_zone_request('bad')
host = fakes.FakeHostState('host1', 'node1', {})
self.assertFalse(self.filt_cls.host_passes(host, request))
| apache-2.0 | -2,229,624,942,153,084,400 | 8,143,637,773,980,883,000 | 39.185185 | 78 | 0.664977 | false |
N3da/incubator-airflow | airflow/contrib/hooks/spark_submit_hook.py | 6 | 10915 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import subprocess
import re
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
log = logging.getLogger(__name__)
class SparkSubmitHook(BaseHook):
"""
This hook is a wrapper around the spark-submit binary to kick off a spark-submit job.
It requires that the "spark-submit" binary is in the PATH or the spark_home to be
supplied.
:param conf: Arbitrary Spark configuration properties
:type conf: dict
:param conn_id: The connection id as configured in Airflow administration. When an
invalid connection_id is supplied, it will default to yarn.
:type conn_id: str
:param files: Upload additional files to the container running the job, separated by a
comma. For example hive-site.xml.
:type files: str
:param py_files: Additional python files used by the job, can be .zip, .egg or .py.
:type py_files: str
:param jars: Submit additional jars to upload and place them in executor classpath.
:type jars: str
:param java_class: the main class of the Java application
:type java_class: str
:param total_executor_cores: (Standalone & Mesos only) Total cores for all executors (Default: all the available cores on the worker)
:type total_executor_cores: int
:param executor_cores: (Standalone & YARN only) Number of cores per executor (Default: 2)
:type executor_cores: int
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:type executor_memory: str
:param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G) (Default: 1G)
:type driver_memory: str
:param keytab: Full path to the file that contains the keytab
:type keytab: str
:param principal: The name of the kerberos principal used for keytab
:type principal: str
:param name: Name of the job (default airflow-spark)
:type name: str
:param num_executors: Number of executors to launch
:type num_executors: int
:param application_args: Arguments for the application being submitted
:type application_args: list
:param verbose: Whether to pass the verbose flag to spark-submit process for debugging
:type verbose: bool
"""
def __init__(self,
conf=None,
conn_id='spark_default',
files=None,
py_files=None,
jars=None,
java_class=None,
total_executor_cores=None,
executor_cores=None,
executor_memory=None,
driver_memory=None,
keytab=None,
principal=None,
name='default-name',
num_executors=None,
application_args=None,
verbose=False):
self._conf = conf
self._conn_id = conn_id
self._files = files
self._py_files = py_files
self._jars = jars
self._java_class = java_class
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._driver_memory = driver_memory
self._keytab = keytab
self._principal = principal
self._name = name
self._num_executors = num_executors
self._application_args = application_args
self._verbose = verbose
self._sp = None
self._yarn_application_id = None
self._connection = self._resolve_connection()
self._is_yarn = 'yarn' in self._connection['master']
def _resolve_connection(self):
# Build from connection master or default to yarn if not available
conn_data = {'master': 'yarn',
'queue': None,
'deploy_mode': None,
'spark_home': None,
'spark_binary': 'spark-submit'}
try:
# Master can be local, yarn, spark://HOST:PORT or mesos://HOST:PORT
conn = self.get_connection(self._conn_id)
if conn.port:
conn_data['master'] = "{}:{}".format(conn.host, conn.port)
else:
conn_data['master'] = conn.host
# Determine optional yarn queue from the extra field
extra = conn.extra_dejson
conn_data['queue'] = extra.get('queue', None)
conn_data['deploy_mode'] = extra.get('deploy-mode', None)
conn_data['spark_home'] = extra.get('spark-home', None)
conn_data['spark_binary'] = extra.get('spark-binary', 'spark-submit')
except AirflowException:
logging.debug(
"Could not load connection string {}, defaulting to {}".format(
self._conn_id, conn_data['master']
)
)
return conn_data
def get_conn(self):
pass
def _build_command(self, application):
"""
Construct the spark-submit command to execute.
:param application: command to append to the spark-submit command
:type application: str
:return: full command to be executed
"""
# If the spark_home is passed then build the spark-submit executable path using
# the spark_home; otherwise assume that spark-submit is present in the path to
# the executing user
if self._connection['spark_home']:
connection_cmd = [os.path.join(self._connection['spark_home'], 'bin', self._connection['spark_binary'])]
else:
connection_cmd = [self._connection['spark_binary']]
# The url ot the spark master
connection_cmd += ["--master", self._connection['master']]
if self._conf:
for key in self._conf:
connection_cmd += ["--conf", "{}={}".format(key, str(self._conf[key]))]
if self._files:
connection_cmd += ["--files", self._files]
if self._py_files:
connection_cmd += ["--py-files", self._py_files]
if self._jars:
connection_cmd += ["--jars", self._jars]
if self._num_executors:
connection_cmd += ["--num-executors", str(self._num_executors)]
if self._total_executor_cores:
connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)]
if self._executor_cores:
connection_cmd += ["--executor-cores", str(self._executor_cores)]
if self._executor_memory:
connection_cmd += ["--executor-memory", self._executor_memory]
if self._driver_memory:
connection_cmd += ["--driver-memory", self._driver_memory]
if self._keytab:
connection_cmd += ["--keytab", self._keytab]
if self._principal:
connection_cmd += ["--principal", self._principal]
if self._name:
connection_cmd += ["--name", self._name]
if self._java_class:
connection_cmd += ["--class", self._java_class]
if self._verbose:
connection_cmd += ["--verbose"]
if self._connection['queue']:
connection_cmd += ["--queue", self._connection['queue']]
if self._connection['deploy_mode']:
connection_cmd += ["--deploy-mode", self._connection['deploy_mode']]
# The actual script to execute
connection_cmd += [application]
# Append any application arguments
if self._application_args:
for arg in self._application_args:
if len(arg.split()) > 1:
for splitted_option in arg.split():
connection_cmd += [splitted_option]
else:
connection_cmd += [arg]
logging.debug("Spark-Submit cmd: {}".format(connection_cmd))
return connection_cmd
def submit(self, application="", **kwargs):
"""
Remote Popen to execute the spark-submit job
:param application: Submitted application, jar or py file
:type application: str
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_submit_cmd = self._build_command(application)
self._sp = subprocess.Popen(spark_submit_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
# Using two iterators here to support 'real-time' logging
sources = [self._sp.stdout, self._sp.stderr]
for source in sources:
self._process_log(iter(source.readline, b''))
output, stderr = self._sp.communicate()
if self._sp.returncode:
raise AirflowException(
"Cannot execute: {}. Error code is: {}. Output: {}, Stderr: {}".format(
spark_submit_cmd, self._sp.returncode, output, stderr
)
)
def _process_log(self, itr):
"""
Processes the log files and extracts useful information out of it
:param itr: An iterator which iterates over the input of the subprocess
"""
# Consume the iterator
for line in itr:
line = line.decode('utf-8').strip()
# If we run yarn cluster mode, we want to extract the application id from
# the logs so we can kill the application when we stop it unexpectedly
if self._is_yarn and self._connection['deploy_mode'] == 'cluster':
match = re.search('(application[0-9_]+)', line)
if match:
self._yarn_application_id = match.groups()[0]
# Pass to logging
logging.info(line)
def on_kill(self):
if self._sp and self._sp.poll() is None:
logging.info('Sending kill signal to {}'.format(self._connection['spark_binary']))
self._sp.kill()
if self._yarn_application_id:
logging.info('Killing application on YARN')
kill_cmd = "yarn application -kill {0}".format(self._yarn_application_id).split()
yarn_kill = subprocess.Popen(kill_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
logging.info("YARN killed with return code: {0}".format(yarn_kill.wait()))
| apache-2.0 | 6,804,328,194,732,230,000 | 8,516,041,179,863,978,000 | 40.344697 | 137 | 0.587998 | false |
NeilBryant/check_mk | doc/treasures/wato_geo_fields.py | 6 | 2020 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# place this file to ~/local/share/check_mk/web/plugins/wato to get two new fields in the wato host properties.
# this fields can be used to add Latiude and Longitude information. Usefull for the Nagvis Geomap
declare_host_attribute(
NagiosTextAttribute(
"lat",
"_LAT",
"Latitude",
"Latitude",
),
show_in_table = False,
show_in_folder = False,
)
declare_host_attribute(
NagiosTextAttribute(
"long",
"_LONG",
"Longitude",
"Longitude",
),
show_in_table = False,
show_in_folder = False,
)
| gpl-2.0 | 6,379,861,087,450,581,000 | -3,642,297,114,961,537,500 | 39.4 | 111 | 0.50099 | false |
Glasgow2015/team-10 | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/big5freq.py | 3133 | 82594 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = (
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
# flake8: noqa
| apache-2.0 | -1,274,368,888,638,577,000 | 3,766,945,904,748,904,000 | 88.290811 | 103 | 0.770903 | false |
upliftaero/MissionPlanner | Lib/lib2to3/fixes/fix_exec.py | 61 | 1042 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for exec.
This converts usages of the exec statement into calls to a built-in
exec() function.
exec code in ns1, ns2 -> exec(code, ns1, ns2)
"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Comma, Name, Call
class FixExec(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
exec_stmt< 'exec' a=any 'in' b=any [',' c=any] >
|
exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any >
"""
def transform(self, node, results):
assert results
syms = self.syms
a = results["a"]
b = results.get("b")
c = results.get("c")
args = [a.clone()]
args[0].prefix = ""
if b is not None:
args.extend([Comma(), b.clone()])
if c is not None:
args.extend([Comma(), c.clone()])
return Call(Name(u"exec"), args, prefix=node.prefix)
| gpl-3.0 | 7,758,853,958,036,030,000 | 7,853,760,355,158,253,000 | 24.05 | 67 | 0.556622 | false |
pforret/python-for-android | python-modules/twisted/twisted/internet/epollreactor.py | 56 | 8121 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An epoll() based implementation of the twisted main loop.
To install the event loop (and you should do this before any connections,
listeners or connectors are added)::
from twisted.internet import epollreactor
epollreactor.install()
"""
import sys, errno
from zope.interface import implements
from twisted.internet.interfaces import IReactorFDSet
from twisted.python import _epoll
from twisted.python import log
from twisted.internet import posixbase, error
from twisted.internet.main import CONNECTION_DONE, CONNECTION_LOST
_POLL_DISCONNECTED = (_epoll.HUP | _epoll.ERR)
class EPollReactor(posixbase.PosixReactorBase):
"""
A reactor that uses epoll(4).
@ivar _poller: A L{poll} which will be used to check for I/O
readiness.
@ivar _selectables: A dictionary mapping integer file descriptors to
instances of L{FileDescriptor} which have been registered with the
reactor. All L{FileDescriptors} which are currently receiving read or
write readiness notifications will be present as values in this
dictionary.
@ivar _reads: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for read readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
@ivar _writes: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for write readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
"""
implements(IReactorFDSet)
def __init__(self):
"""
Initialize epoll object, file descriptor tracking dictionaries, and the
base class.
"""
# Create the poller we're going to use. The 1024 here is just a hint
# to the kernel, it is not a hard maximum.
self._poller = _epoll.epoll(1024)
self._reads = {}
self._writes = {}
self._selectables = {}
posixbase.PosixReactorBase.__init__(self)
def _add(self, xer, primary, other, selectables, event, antievent):
"""
Private method for adding a descriptor from the event loop.
It takes care of adding it if new or modifying it if already added
for another state (read -> read/write for example).
"""
fd = xer.fileno()
if fd not in primary:
cmd = _epoll.CTL_ADD
flags = event
if fd in other:
flags |= antievent
cmd = _epoll.CTL_MOD
# epoll_ctl can raise all kinds of IOErrors, and every one
# indicates a bug either in the reactor or application-code.
# Let them all through so someone sees a traceback and fixes
# something. We'll do the same thing for every other call to
# this method in this file.
self._poller._control(cmd, fd, flags)
# Update our own tracking state *only* after the epoll call has
# succeeded. Otherwise we may get out of sync.
primary[fd] = 1
selectables[fd] = xer
def addReader(self, reader):
"""
Add a FileDescriptor for notification of data available to read.
"""
self._add(reader, self._reads, self._writes, self._selectables, _epoll.IN, _epoll.OUT)
def addWriter(self, writer):
"""
Add a FileDescriptor for notification of data available to write.
"""
self._add(writer, self._writes, self._reads, self._selectables, _epoll.OUT, _epoll.IN)
def _remove(self, xer, primary, other, selectables, event, antievent):
"""
Private method for removing a descriptor from the event loop.
It does the inverse job of _add, and also add a check in case of the fd
has gone away.
"""
fd = xer.fileno()
if fd == -1:
for fd, fdes in selectables.items():
if xer is fdes:
break
else:
return
if fd in primary:
cmd = _epoll.CTL_DEL
flags = event
if fd in other:
flags = antievent
cmd = _epoll.CTL_MOD
else:
del selectables[fd]
del primary[fd]
# See comment above _control call in _add.
self._poller._control(cmd, fd, flags)
def removeReader(self, reader):
"""
Remove a Selectable for notification of data available to read.
"""
self._remove(reader, self._reads, self._writes, self._selectables, _epoll.IN, _epoll.OUT)
def removeWriter(self, writer):
"""
Remove a Selectable for notification of data available to write.
"""
self._remove(writer, self._writes, self._reads, self._selectables, _epoll.OUT, _epoll.IN)
def removeAll(self):
"""
Remove all selectables, and return a list of them.
"""
return self._removeAll(
[self._selectables[fd] for fd in self._reads],
[self._selectables[fd] for fd in self._writes])
def getReaders(self):
return [self._selectables[fd] for fd in self._reads]
def getWriters(self):
return [self._selectables[fd] for fd in self._writes]
def doPoll(self, timeout):
"""
Poll the poller for new events.
"""
if timeout is None:
timeout = 1
timeout = int(timeout * 1000) # convert seconds to milliseconds
try:
# Limit the number of events to the number of io objects we're
# currently tracking (because that's maybe a good heuristic) and
# the amount of time we block to the value specified by our
# caller.
l = self._poller.wait(len(self._selectables), timeout)
except IOError, err:
if err.errno == errno.EINTR:
return
# See epoll_wait(2) for documentation on the other conditions
# under which this can fail. They can only be due to a serious
# programming error on our part, so let's just announce them
# loudly.
raise
_drdw = self._doReadOrWrite
for fd, event in l:
try:
selectable = self._selectables[fd]
except KeyError:
pass
else:
log.callWithLogger(selectable, _drdw, selectable, fd, event)
doIteration = doPoll
def _doReadOrWrite(self, selectable, fd, event):
"""
fd is available for read or write, make the work and raise errors
if necessary.
"""
why = None
inRead = False
if event & _POLL_DISCONNECTED and not (event & _epoll.IN):
if fd in self._reads:
inRead = True
why = CONNECTION_DONE
else:
why = CONNECTION_LOST
else:
try:
if event & _epoll.IN:
why = selectable.doRead()
inRead = True
if not why and event & _epoll.OUT:
why = selectable.doWrite()
inRead = False
if selectable.fileno() != fd:
why = error.ConnectionFdescWentAway(
'Filedescriptor went away')
inRead = False
except:
log.err()
why = sys.exc_info()[1]
if why:
self._disconnectSelectable(selectable, why, inRead)
def install():
"""
Install the epoll() reactor.
"""
p = EPollReactor()
from twisted.internet.main import installReactor
installReactor(p)
__all__ = ["EPollReactor", "install"]
| apache-2.0 | -6,208,712,831,011,388,000 | 5,703,907,464,347,179,000 | 32.557851 | 97 | 0.585273 | false |
chand3040/sree_odoo | openerp/addons/account/company.py | 384 | 2814 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'expects_chart_of_accounts': fields.boolean('Expects a Chart of Accounts'),
'tax_calculation_rounding_method': fields.selection([
('round_per_line', 'Round per Line'),
('round_globally', 'Round Globally'),
], 'Tax Calculation Rounding Method',
help="If you select 'Round per Line' : for each tax, the tax amount will first be computed and rounded for each PO/SO/invoice line and then these rounded amounts will be summed, leading to the total amount for that tax. If you select 'Round Globally': for each tax, the tax amount will be computed for each PO/SO/invoice line, then these amounts will be summed and eventually this total tax amount will be rounded. If you sell with tax included, you should choose 'Round per line' because you certainly want the sum of your tax-included line subtotals to be equal to the total amount with taxes."),
'paypal_account': fields.char("Paypal Account", size=128, help="Paypal username (usually email) for receiving online payments."),
'overdue_msg': fields.text('Overdue Payments Message', translate=True),
}
_defaults = {
'expects_chart_of_accounts': True,
'tax_calculation_rounding_method': 'round_per_line',
'overdue_msg': '''Dear Sir/Madam,
Our records indicate that some payments on your account are still due. Please find details below.
If the amount has already been paid, please disregard this notice. Otherwise, please forward us the total amount stated below.
If you have any queries regarding your account, Please contact us.
Thank you in advance for your cooperation.
Best Regards,'''
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,484,536,606,652,720,000 | 2,761,927,527,431,824,400 | 54.176471 | 610 | 0.673063 | false |
ThePletch/ansible | lib/ansible/modules/cloud/amazon/ec2_win_password.py | 23 | 5690 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ec2_win_password
short_description: gets the default administrator password for ec2 windows instances
description:
- Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. i-XXXXXXX). This module has a dependency on python-boto.
version_added: "2.0"
author: "Rick Mendes (@rickmendes)"
options:
instance_id:
description:
- The instance id to get the password data from.
required: true
key_file:
description:
- Path to the file containing the key pair used on the instance.
required: true
key_passphrase:
version_added: "2.0"
description:
- The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3.
required: false
default: null
wait:
version_added: "2.0"
description:
- Whether or not to wait for the password to be available before returning.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
version_added: "2.0"
description:
- Number of seconds to wait before giving up.
required: false
default: 120
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Example of getting a password
tasks:
- name: get the Administrator password
ec2_win_password:
profile: my-boto-profile
instance_id: i-XXXXXX
region: us-east-1
key_file: "~/aws-creds/my_test_key.pem"
# Example of getting a password with a password protected key
tasks:
- name: get the Administrator password
ec2_win_password:
profile: my-boto-profile
instance_id: i-XXXXXX
region: us-east-1
key_file: "~/aws-creds/my_protected_test_key.pem"
key_passphrase: "secret"
# Example of waiting for a password
tasks:
- name: get the Administrator password
ec2_win_password:
profile: my-boto-profile
instance_id: i-XXXXXX
region: us-east-1
key_file: "~/aws-creds/my_test_key.pem"
wait: yes
wait_timeout: 45
'''
from base64 import b64decode
from os.path import expanduser
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey import RSA
import datetime
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance_id = dict(required=True),
key_file = dict(required=True),
key_passphrase = dict(no_log=True, default=None, required=False),
wait = dict(type='bool', default=False, required=False),
wait_timeout = dict(default=120, required=False),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='Boto required for this module.')
instance_id = module.params.get('instance_id')
key_file = expanduser(module.params.get('key_file'))
key_passphrase = module.params.get('key_passphrase')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
ec2 = ec2_connect(module)
if wait:
start = datetime.datetime.now()
end = start + datetime.timedelta(seconds=wait_timeout)
while datetime.datetime.now() < end:
data = ec2.get_password_data(instance_id)
decoded = b64decode(data)
if wait and not decoded:
time.sleep(5)
else:
break
else:
data = ec2.get_password_data(instance_id)
decoded = b64decode(data)
if wait and datetime.datetime.now() >= end:
module.fail_json(msg = "wait for password timeout after %d seconds" % wait_timeout)
try:
f = open(key_file, 'r')
except IOError as e:
module.fail_json(msg = "I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
else:
try:
with f:
key = RSA.importKey(f.read(), key_passphrase)
except (ValueError, IndexError, TypeError) as e:
module.fail_json(msg = "unable to parse key file")
cipher = PKCS1_v1_5.new(key)
sentinel = 'password decryption failed!!!'
try:
decrypted = cipher.decrypt(decoded, sentinel)
except ValueError as e:
decrypted = None
if decrypted == None:
module.exit_json(win_password='', changed=False)
else:
if wait:
elapsed = datetime.datetime.now() - start
module.exit_json(win_password=decrypted, changed=True, elapsed=elapsed.seconds)
else:
module.exit_json(win_password=decrypted, changed=True)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 | 1,812,431,572,659,451,000 | -2,460,186,445,876,385,000 | 30.611111 | 265 | 0.657821 | false |
dbones/linux | tools/perf/python/twatch.py | 1565 | 1316 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <[email protected]>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 | -4,612,025,150,409,063,000 | 5,008,937,875,037,986,000 | 31.097561 | 75 | 0.668693 | false |
grzes/djangae | djangae/fields/charfields.py | 2 | 1862 | from django.core.exceptions import ImproperlyConfigured
from django.db import models
from djangae.core import validators
from google.appengine.api.datastore_types import _MAX_STRING_LENGTH
class CharOrNoneField(models.CharField):
""" A field that stores only non-empty strings or None (it won't store empty strings).
This is useful if you want values to be unique but also want to allow empty values.
"""
empty_strings_allowed = False
def __init__(self, *args, **kwargs):
# Don't allow null=False because that would be insane.
if not kwargs.get('null', True):
raise ImproperlyConfigured("You can't set null=False on a CharOrNoneField.")
# Set blank=True as the default, but allow it to be overridden, as it's theoretically
# possible that you might want to prevent emptiness only in a form
defaults = dict(null=True, blank=True, default=None)
defaults.update(**kwargs)
super(CharOrNoneField, self).__init__(*args, **defaults)
def pre_save(self, model_instance, add):
value = super(CharOrNoneField, self).pre_save(model_instance, add)
# Change empty strings to None
if not value:
return None
return value
class CharField(models.CharField):
def __init__(self, max_length=_MAX_STRING_LENGTH, *args, **kwargs):
assert max_length <= _MAX_STRING_LENGTH, \
"%ss max_length must not be grater than %d bytes." % (self.__class__.__name__, _MAX_STRING_LENGTH)
super(CharField, self).__init__(max_length=max_length, *args, **kwargs)
# Append the MaxBytesValidator if it's not been included already
self.validators = [
x for x in self.validators if not isinstance(x, validators.MaxBytesValidator)
] + [validators.MaxBytesValidator(limit_value=max_length)]
| bsd-3-clause | -7,902,577,568,315,349,000 | -5,738,393,728,855,404,000 | 41.318182 | 110 | 0.670784 | false |
wolfe-pack/moro | public/javascripts/brat/tools/norm_db_lookup.py | 3 | 4451 | #!/usr/bin/env python
# Test script for lookup in a normalization SQL DB, intended for
# DB testing.
# TODO: duplicates parts of primary norm DB implementation, dedup.
import sys
import os.path
import sqlite3 as sqlite
TYPE_TABLES = ["names", "attributes", "infos"]
NON_EMPTY_TABLES = set(["names"])
def argparser():
import argparse
ap=argparse.ArgumentParser(description="Print results of lookup in normalization SQL DB for keys read from STDIN.")
ap.add_argument("-v", "--verbose", default=False, action="store_true", help="Verbose output.")
ap.add_argument("-np", "--no-prompt", default=False, action="store_true", help="No prompt.")
ap.add_argument("database", metavar="DATABASE", help="Name of database to read")
return ap
def string_norm_form(s):
return s.lower().strip().replace('-', ' ')
def datas_by_ids(cursor, ids):
# select separately from names, attributes and infos
responses = {}
for table in TYPE_TABLES:
command = '''
SELECT E.uid, L.text, N.value
FROM entities E
JOIN %s N
ON E.id = N.entity_id
JOIN labels L
ON L.id = N.label_id
WHERE E.uid IN (%s)''' % (table, ','.join(['?' for i in ids]))
cursor.execute(command, list(ids))
response = cursor.fetchall()
# group by ID first
for id_, label, value in response:
if id_ not in responses:
responses[id_] = {}
if table not in responses[id_]:
responses[id_][table] = []
responses[id_][table].append([label, value])
# short-circuit on missing or incomplete entry
if (table in NON_EMPTY_TABLES and
len([i for i in responses if responses[i][table] == 0]) != 0):
return None
# empty or incomplete?
for id_ in responses:
for t in NON_EMPTY_TABLES:
if len(responses[id_][t]) == 0:
return None
# has expected content, format and return
datas = {}
for id_ in responses:
datas[id_] = []
for t in TYPE_TABLES:
datas[id_].append(responses[id_].get(t,[]))
return datas
def ids_by_name(cursor, name, exactmatch=False, return_match=False):
return ids_by_names(cursor, [name], exactmatch, return_match)
def ids_by_names(cursor, names, exactmatch=False, return_match=False):
if not return_match:
command = 'SELECT E.uid'
else:
command = 'SELECT E.uid, N.value'
command += '''
FROM entities E
JOIN names N
ON E.id = N.entity_id
'''
if exactmatch:
command += 'WHERE N.value IN (%s)' % ','.join(['?' for n in names])
else:
command += 'WHERE N.normvalue IN (%s)' % ','.join(['?' for n in names])
names = [string_norm_form(n) for n in names]
cursor.execute(command, names)
responses = cursor.fetchall()
if not return_match:
return [r[0] for r in responses]
else:
return [(r[0],r[1]) for r in responses]
def main(argv):
arg = argparser().parse_args(argv[1:])
# try a couple of alternative locations based on the given DB
# name: name as path, name as filename in work dir, and name as
# filename without suffix in work dir
dbn = arg.database
dbpaths = [dbn, os.path.join('work', dbn), os.path.join('work', dbn)+'.db']
dbfn = None
for p in dbpaths:
if os.path.exists(p):
dbfn = p
break
if dbfn is None:
print >> sys.stderr, "Error: %s: no such file" % dbfn
return 1
try:
connection = sqlite.connect(dbfn)
except sqlite.OperationalError, e:
print >> sys.stderr, "Error connecting to DB %s:" % dbfn, e
return 1
cursor = connection.cursor()
while True:
if not arg.no_prompt:
print ">>> ",
l = sys.stdin.readline()
if not l:
break
l = l.rstrip()
try:
r = ids_by_name(cursor, l)
if len(r) != 0:
d = datas_by_ids(cursor, r)
for i in d:
print i+'\t', '\t'.join([' '.join(["%s:%s" % (k,v) for k,v in a]) for a in d[i]])
elif l == '':
print "(Use Ctrl-D to exit)"
else:
print "(no record found for '%s')" % l
except Exception, e:
print >> sys.stderr, "Unexpected error", e
return 1
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| bsd-2-clause | -5,372,956,697,015,947,000 | 6,638,653,463,522,596,000 | 29.074324 | 119 | 0.567288 | false |
goodwinnk/intellij-community | plugins/hg4idea/testData/bin/hgext/convert/bzr.py | 94 | 11295 | # bzr.py - bzr support for the convert extension
#
# Copyright 2008, 2009 Marek Kubica <[email protected]> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
# This module is for handling 'bzr', that was formerly known as Bazaar-NG;
# it cannot access 'bar' repositories, but they were never used very much
import os
from mercurial import demandimport
# these do not work with demandimport, blacklist
demandimport.ignore.extend([
'bzrlib.transactions',
'bzrlib.urlutils',
'ElementPath',
])
from mercurial.i18n import _
from mercurial import util
from common import NoRepo, commit, converter_source
try:
# bazaar imports
from bzrlib import bzrdir, revision, errors
from bzrlib.revisionspec import RevisionSpec
except ImportError:
pass
supportedkinds = ('file', 'symlink')
class bzr_source(converter_source):
"""Reads Bazaar repositories by using the Bazaar Python libraries"""
def __init__(self, ui, path, rev=None):
super(bzr_source, self).__init__(ui, path, rev=rev)
if not os.path.exists(os.path.join(path, '.bzr')):
raise NoRepo(_('%s does not look like a Bazaar repository')
% path)
try:
# access bzrlib stuff
bzrdir
except NameError:
raise NoRepo(_('Bazaar modules could not be loaded'))
path = os.path.abspath(path)
self._checkrepotype(path)
try:
self.sourcerepo = bzrdir.BzrDir.open(path).open_repository()
except errors.NoRepositoryPresent:
raise NoRepo(_('%s does not look like a Bazaar repository')
% path)
self._parentids = {}
def _checkrepotype(self, path):
# Lightweight checkouts detection is informational but probably
# fragile at API level. It should not terminate the conversion.
try:
from bzrlib import bzrdir
dir = bzrdir.BzrDir.open_containing(path)[0]
try:
tree = dir.open_workingtree(recommend_upgrade=False)
branch = tree.branch
except (errors.NoWorkingTree, errors.NotLocalUrl):
tree = None
branch = dir.open_branch()
if (tree is not None and tree.bzrdir.root_transport.base !=
branch.bzrdir.root_transport.base):
self.ui.warn(_('warning: lightweight checkouts may cause '
'conversion failures, try with a regular '
'branch instead.\n'))
except Exception:
self.ui.note(_('bzr source type could not be determined\n'))
def before(self):
"""Before the conversion begins, acquire a read lock
for all the operations that might need it. Fortunately
read locks don't block other reads or writes to the
repository, so this shouldn't have any impact on the usage of
the source repository.
The alternative would be locking on every operation that
needs locks (there are currently two: getting the file and
getting the parent map) and releasing immediately after,
but this approach can take even 40% longer."""
self.sourcerepo.lock_read()
def after(self):
self.sourcerepo.unlock()
def _bzrbranches(self):
return self.sourcerepo.find_branches(using=True)
def getheads(self):
if not self.rev:
# Set using=True to avoid nested repositories (see issue3254)
heads = sorted([b.last_revision() for b in self._bzrbranches()])
else:
revid = None
for branch in self._bzrbranches():
try:
r = RevisionSpec.from_string(self.rev)
info = r.in_history(branch)
except errors.BzrError:
pass
revid = info.rev_id
if revid is None:
raise util.Abort(_('%s is not a valid revision') % self.rev)
heads = [revid]
# Empty repositories return 'null:', which cannot be retrieved
heads = [h for h in heads if h != 'null:']
return heads
def getfile(self, name, rev):
revtree = self.sourcerepo.revision_tree(rev)
fileid = revtree.path2id(name.decode(self.encoding or 'utf-8'))
kind = None
if fileid is not None:
kind = revtree.kind(fileid)
if kind not in supportedkinds:
# the file is not available anymore - was deleted
raise IOError(_('%s is not available in %s anymore') %
(name, rev))
mode = self._modecache[(name, rev)]
if kind == 'symlink':
target = revtree.get_symlink_target(fileid)
if target is None:
raise util.Abort(_('%s.%s symlink has no target')
% (name, rev))
return target, mode
else:
sio = revtree.get_file(fileid)
return sio.read(), mode
def getchanges(self, version):
# set up caches: modecache and revtree
self._modecache = {}
self._revtree = self.sourcerepo.revision_tree(version)
# get the parentids from the cache
parentids = self._parentids.pop(version)
# only diff against first parent id
prevtree = self.sourcerepo.revision_tree(parentids[0])
return self._gettreechanges(self._revtree, prevtree)
def getcommit(self, version):
rev = self.sourcerepo.get_revision(version)
# populate parent id cache
if not rev.parent_ids:
parents = []
self._parentids[version] = (revision.NULL_REVISION,)
else:
parents = self._filterghosts(rev.parent_ids)
self._parentids[version] = parents
branch = self.recode(rev.properties.get('branch-nick', u'default'))
if branch == 'trunk':
branch = 'default'
return commit(parents=parents,
date='%d %d' % (rev.timestamp, -rev.timezone),
author=self.recode(rev.committer),
desc=self.recode(rev.message),
branch=branch,
rev=version)
def gettags(self):
bytetags = {}
for branch in self._bzrbranches():
if not branch.supports_tags():
return {}
tagdict = branch.tags.get_tag_dict()
for name, rev in tagdict.iteritems():
bytetags[self.recode(name)] = rev
return bytetags
def getchangedfiles(self, rev, i):
self._modecache = {}
curtree = self.sourcerepo.revision_tree(rev)
if i is not None:
parentid = self._parentids[rev][i]
else:
# no parent id, get the empty revision
parentid = revision.NULL_REVISION
prevtree = self.sourcerepo.revision_tree(parentid)
changes = [e[0] for e in self._gettreechanges(curtree, prevtree)[0]]
return changes
def _gettreechanges(self, current, origin):
revid = current._revision_id
changes = []
renames = {}
seen = set()
# Process the entries by reverse lexicographic name order to
# handle nested renames correctly, most specific first.
curchanges = sorted(current.iter_changes(origin),
key=lambda c: c[1][0] or c[1][1],
reverse=True)
for (fileid, paths, changed_content, versioned, parent, name,
kind, executable) in curchanges:
if paths[0] == u'' or paths[1] == u'':
# ignore changes to tree root
continue
# bazaar tracks directories, mercurial does not, so
# we have to rename the directory contents
if kind[1] == 'directory':
if kind[0] not in (None, 'directory'):
# Replacing 'something' with a directory, record it
# so it can be removed.
changes.append((self.recode(paths[0]), revid))
if kind[0] == 'directory' and None not in paths:
renaming = paths[0] != paths[1]
# neither an add nor an delete - a move
# rename all directory contents manually
subdir = origin.inventory.path2id(paths[0])
# get all child-entries of the directory
for name, entry in origin.inventory.iter_entries(subdir):
# hg does not track directory renames
if entry.kind == 'directory':
continue
frompath = self.recode(paths[0] + '/' + name)
if frompath in seen:
# Already handled by a more specific change entry
# This is important when you have:
# a => b
# a/c => a/c
# Here a/c must not be renamed into b/c
continue
seen.add(frompath)
if not renaming:
continue
topath = self.recode(paths[1] + '/' + name)
# register the files as changed
changes.append((frompath, revid))
changes.append((topath, revid))
# add to mode cache
mode = ((entry.executable and 'x')
or (entry.kind == 'symlink' and 's')
or '')
self._modecache[(topath, revid)] = mode
# register the change as move
renames[topath] = frompath
# no further changes, go to the next change
continue
# we got unicode paths, need to convert them
path, topath = paths
if path is not None:
path = self.recode(path)
if topath is not None:
topath = self.recode(topath)
seen.add(path or topath)
if topath is None:
# file deleted
changes.append((path, revid))
continue
# renamed
if path and path != topath:
renames[topath] = path
changes.append((path, revid))
# populate the mode cache
kind, executable = [e[1] for e in (kind, executable)]
mode = ((executable and 'x') or (kind == 'symlink' and 'l')
or '')
self._modecache[(topath, revid)] = mode
changes.append((topath, revid))
return changes, renames
def _filterghosts(self, ids):
"""Filters out ghost revisions which hg does not support, see
<http://bazaar-vcs.org/GhostRevision>
"""
parentmap = self.sourcerepo.get_parent_map(ids)
parents = tuple([parent for parent in ids if parent in parentmap])
return parents
| apache-2.0 | -9,218,364,765,642,955,000 | -2,003,782,386,176,890,600 | 38.631579 | 77 | 0.544489 | false |
glaubitz/fs-uae-debian | launcher/arcade/arcade_main.py | 2 | 4015 | from arcade.Application import Application
from arcade.glui.imageloader import ImageLoader
from arcade.ui.arcade_window import (
ArcadeWindow,
check_argument,
fullscreen,
maximized,
)
from fsbc.settings import Settings
from fsbc.system import macosx
from .gnome3 import running_in_gnome_3, handle_gnome_extensions
import launcher.version
from fsbc.init import initialize_application
K_UI_MODE_ALL_HIDDEN = 3
K_UI_OPTION_AUTO_SHOW_MENU_BAR = 1 << 0
def os_x_set_system_ui_mode(mode, option):
# noinspection PyUnresolvedReferences
import objc
# noinspection PyUnresolvedReferences
from Foundation import NSBundle
bundle = NSBundle.bundleWithPath_(
"/System/Library/Frameworks/Carbon.framework"
)
objc.loadBundleFunctions(
bundle, globals(), (("SetSystemUIMode", b"III", ""),)
)
# noinspection PyUnresolvedReferences
SetSystemUIMode(mode, option)
def main():
application = Application()
initialize_application("fs-uae-arcade", version=launcher.version.VERSION)
# fs_width, fs_height = fsui.get_screen_size()
# cursor_position = None
# use_window = False
# use_window_decorations = True
# use_fullscreen = True
# use_fullscreen_window = False
# use_top_clock = check_argument("top_clock") != "0"
# use_top_logo = check_argument("top_logo") != "0"
if macosx:
if fullscreen() or maximized():
if check_argument("system_autohide") == "1":
os_x_set_system_ui_mode(
K_UI_MODE_ALL_HIDDEN, K_UI_OPTION_AUTO_SHOW_MENU_BAR
)
elif running_in_gnome_3():
if fullscreen() or maximized():
# use_fullscreen = False
# use_window_decorations = False
# use_window = "maximized"
if check_argument("system_autohide") == "1":
handle_gnome_extensions()
# cursor_position = fs_width - 1, fs_height - 1
# use_top_clock = False
# use_top_logo = False
# app.settings["fs-uae:fullscreen-mode::default"] = "window"
else:
# We want a normal window.
pass
Settings.instance().set("__arcade", "1")
# if windows:
# pass
# elif macosx:
# # use_fullscreen_window = True
# # Settings.instance().set("__fullscreen_mode", "window")
# pass
# else:
# # app.settings["fs-uae:fullscreen-mode::default"] = "window"
# pass
# if check_argument("fullscreen"):
# use_fullscreen = check_argument("fullscreen") == "1"
#
# if "--fullscreen-mode=fullscreen" in sys.argv:
# use_fullscreen_window = False
# elif "--fullscreen-mode=window" in sys.argv:
# use_fullscreen_window = True
#
# if "--maximize" in sys.argv:
# use_window = "maximized"
# use_fullscreen = False
#
# if "--no-window-decorations" in sys.argv:
# use_window_decorations = False
# app.settings["game-center:fullscreen"] = \
# "1" if use_fullscreen else "0"
# if use_fullscreen_window:
# app.settings["game-center:fullscreen-mode"] = "window"
# else:
# app.settings["game-center:fullscreen-mode"] = ""
# app.settings["game-center:window-decorations"] = \
# "1" if use_window_decorations else "0"
# app.settings["game-center:maximize"] = \
# "1" if use_window == "maximized" else "0"
# app.settings["game-center:top-clock"] = "1" if use_top_clock else "0"
# app.settings["game-center:top-logo"] = "1" if use_top_logo else "0"
ArcadeWindow().show_auto()
# if cursor_position is not None:
# os.environ["FSGS_RETURN_CURSOR_TO"] = "{0},{1}".format(
# cursor_position[0], cursor_position[1])
application.run()
print("application.run returned")
application.stop()
ImageLoader.get().stop()
application.wait()
print(" --- arcade.arcade_main.main is done ---")
return
| gpl-2.0 | 3,741,795,106,100,212,700 | -7,173,011,385,604,123,000 | 30.865079 | 77 | 0.604483 | false |
PfarrCh/openrsa | test/solovaystrassen_testcase.py | 1 | 1957 | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2017 Christian Pfarr
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import unittest
from solovaystrassen import SolovayStrassen
class SolovayStrassenTestCase(unittest.TestCase):
def setUp(self):
self.a1 = 17
self.a2 = 29
self.a3 = 23
self.maybe = 91
def tearDown(self):
del self.a1
del self.a2
del self.a3
del self.maybe
def test_composition(self):
self.assertTrue(not SolovayStrassen.is_composite(self.a1, self.maybe),
"SolovayStrassen detects a composition, but it could be prime")
self.assertTrue(not SolovayStrassen.is_composite(self.a2, self.maybe),
"SolovayStrassen detects a composition, but it could be prime")
self.assertTrue(SolovayStrassen.is_composite(self.a3, self.maybe),
"SolovayStrassen detects no composition, but it is one")
| mit | -5,712,498,380,342,278,000 | 6,746,465,156,154,131,000 | 38.14 | 87 | 0.722024 | false |
Ichimonji10/robottelo | tests/foreman/performance/test_candlepin_concurrent_subscription_attach.py | 2 | 3812 | """Test class for concurrent subscription by register and attach
@Requirement: Candlepin concurrent subscription attach
@CaseAutomation: Automated
@CaseLevel: Acceptance
@CaseComponent: OTHER
@TestType: Functional
@CaseImportance: High
@Upstream: No
"""
from robottelo.performance.constants import (
ATTACH_ENV,
RAW_ATT_FILE_NAME,
RAW_REG_FILE_NAME,
STAT_ATT_FILE_NAME,
STAT_REG_FILE_NAME,
)
from robottelo.test import ConcurrentTestCase
class ConcurrentSubAttachTestCase(ConcurrentTestCase):
"""Concurrent Subscribe to Red Hat Satellite 6 Server by attach tests"""
@classmethod
def setUpClass(cls):
super(ConcurrentSubAttachTestCase, cls).setUpClass()
# parameters for concurrent register and attach test
# note: may need to change savepoint name
cls._set_testcase_parameters(
'enabled_repos',
RAW_ATT_FILE_NAME,
STAT_ATT_FILE_NAME,
raw_reg=RAW_REG_FILE_NAME,
stat_reg=STAT_REG_FILE_NAME,
)
# parameters for attach step
cls.environment = ATTACH_ENV
def setUp(self):
super(ConcurrentSubAttachTestCase, self).setUp()
# Get subscription id
(self.sub_id, sub_name) = self._get_subscription_id()
self.logger.debug(
'subscription {0} id is: {1}'.format(sub_name, self.sub_id))
def test_subscribe_ak_sequential(self):
"""Subscribe system sequentially using 1 virtual machine
@id: 41d80f4f-60df-4a49-967c-929604ca156e
@Steps:
1. create result dictionary
2. sequentially run by one thread;
the thread iterates all total number of iterations
3. produce result of timing
@Assert: Restoring where there's no system registered
"""
self.kick_off_ak_test(self.num_threads[0], 5000)
def test_register_attach_2_clients(self):
"""Subscribe system concurrently using 2 virtual machines
@id: 9849c556-c2a7-4ae3-a7b7-5291bdf158fd
@Steps:
1. create result dictionary
2. concurrent run by multiple threads;
each thread iterates a limited number of times
3. produce result of timing
@Assert: Restoring from database without any registered systems.
"""
self.kick_off_att_test(self.num_threads[1], 5000)
def test_register_attach_4_clients(self):
"""Subscribe system concurrently using 4 virtual machines
@id: dfc7da77-6127-42ee-bbaa-4e3b48c86c9d
@Assert: Restoring from database without any registered systems.
"""
self.kick_off_att_test(self.num_threads[2], 5000)
def test_register_attach_6_clients(self):
"""Subscribe system concurrently using 6 virtual machines
@id: 1a03261a-2756-4ea2-a718-86b5cfa9bd87
@Assert: Restoring from database without any registered systems.
"""
self.kick_off_att_test(self.num_threads[3], 6000)
def test_register_attach_8_clients(self):
"""Subscribe system concurrently using 8 virtual machines
@id: fc5049b1-93ba-4cba-854f-bb763d137832
@Assert: Restoring from database without any registered systems.
"""
self.kick_off_att_test(self.num_threads[4], 5000)
def test_register_attach_10_clients(self):
"""Subscribe system concurrently using 10 virtual machines
@id: a7ce9e04-b9cc-4c2b-b9e8-22ea8ceb1fab
@Steps:
1. create result dictionary
2. concurrent run by multiple threads;
and each thread iterates a limited number of times
3. produce result of timing
@Assert: Restoring from database without any registered systems.
"""
self.kick_off_att_test(self.num_threads[5], 5000)
| gpl-3.0 | -1,950,675,690,087,150,800 | 2,581,508,777,786,944,000 | 27.447761 | 76 | 0.658709 | false |
bjodah/symengine.py | symengine/tests/test_sets.py | 2 | 3799 | from symengine.utilities import raises
from symengine.lib.symengine_wrapper import (Interval, EmptySet, UniversalSet,
FiniteSet, Union, Complement, ImageSet, ConditionSet, Reals, Integers,
And, Or, oo, Symbol, true, Ge, Eq, Gt)
def test_Interval():
assert Interval(0, oo) == Interval(0, oo, False, True)
assert Interval(-oo, 0) == Interval(-oo, 0, True, False)
assert Interval(oo, -oo) == EmptySet()
assert Interval(oo, oo) == EmptySet()
assert Interval(-oo, -oo) == EmptySet()
assert isinstance(Interval(1, 1), FiniteSet)
assert Interval(1, 0) == EmptySet()
assert Interval(1, 1, False, True) == EmptySet()
assert Interval(1, 1, True, False) == EmptySet()
assert Interval(1, 1, True, True) == EmptySet()
assert Interval(1, 2).union(Interval(2, 3)) == Interval(1, 3)
def test_EmptySet():
E = EmptySet()
assert E.intersection(UniversalSet()) == E
def test_UniversalSet():
U = UniversalSet()
x = Symbol("x")
assert U.union(Interval(2, 4)) == U
assert U.intersection(Interval(2, 4)) == Interval(2, 4)
assert U.contains(0) == true
def test_Reals():
R = Reals()
assert R.union(Interval(2, 4)) == R
assert R.contains(0) == true
def test_Reals():
Z = Integers()
assert Z.union(FiniteSet(2, 4)) == Z
assert Z.contains(0) == true
def test_FiniteSet():
x = Symbol("x")
A = FiniteSet(1, 2, 3)
B = FiniteSet(3, 4, 5)
AorB = Union(A, B)
AandB = A.intersection(B)
assert AandB == FiniteSet(3)
assert FiniteSet(EmptySet()) != EmptySet()
assert FiniteSet(FiniteSet(1, 2, 3)) != FiniteSet(1, 2, 3)
def test_Union():
assert Union(Interval(1, 2), Interval(2, 3)) == Interval(1, 3)
assert Union(Interval(1, 2), Interval(2, 3, True)) == Interval(1, 3)
assert Union(Interval(1, 3), Interval(2, 4)) == Interval(1, 4)
assert Union(Interval(1, 2), Interval(1, 3)) == Interval(1, 3)
assert Union(Interval(1, 3), Interval(1, 2)) == Interval(1, 3)
assert Union(Interval(1, 3, False, True), Interval(1, 2)) == \
Interval(1, 3, False, True)
assert Union(Interval(1, 3), Interval(1, 2, False, True)) == Interval(1, 3)
assert Union(Interval(1, 2, True), Interval(1, 3)) == Interval(1, 3)
assert Union(Interval(1, 2, True), Interval(1, 3, True)) == \
Interval(1, 3, True)
assert Union(Interval(1, 2, True), Interval(1, 3, True, True)) == \
Interval(1, 3, True, True)
assert Union(Interval(1, 2, True, True), Interval(1, 3, True)) == \
Interval(1, 3, True)
assert Union(Interval(1, 3), Interval(2, 3)) == Interval(1, 3)
assert Union(Interval(1, 3, False, True), Interval(2, 3)) == \
Interval(1, 3)
assert Union(Interval(1, 2, False, True), Interval(2, 3, True)) != \
Interval(1, 3)
assert Union(Interval(1, 2), EmptySet()) == Interval(1, 2)
assert Union(EmptySet()) == EmptySet()
def test_Complement():
assert Complement(Interval(1, 3), Interval(1, 2)) == Interval(2, 3, True)
assert Complement(FiniteSet(1, 3, 4), FiniteSet(3, 4)) == FiniteSet(1)
assert Complement(Union(Interval(0, 2),
FiniteSet(2, 3, 4)), Interval(1, 3)) == \
Union(Interval(0, 1, False, True), FiniteSet(4))
def test_ConditionSet():
x = Symbol("x")
i1 = Interval(-oo, oo)
f1 = FiniteSet(0, 1, 2, 4)
cond1 = Ge(x**2, 9)
assert ConditionSet(x, And(Eq(0, 1), i1.contains(x))) == EmptySet()
assert ConditionSet(x, And(Gt(1, 0), i1.contains(x))) == i1
assert ConditionSet(x, And(cond1, f1.contains(x))) == FiniteSet(4)
def test_ImageSet():
x = Symbol("x")
i1 = Interval(0, 1)
assert ImageSet(x, x**2, EmptySet()) == EmptySet()
assert ImageSet(x, 1, i1) == FiniteSet(1)
assert ImageSet(x, x, i1) == i1
| mit | -5,361,911,500,748,104,000 | 4,104,237,634,754,970,000 | 34.504673 | 79 | 0.603843 | false |
simartin/servo | tests/wpt/web-platform-tests/tools/third_party/h2/examples/fragments/client_upgrade_fragment.py | 14 | 3726 | # -*- coding: utf-8 -*-
"""
Client Plaintext Upgrade
~~~~~~~~~~~~~~~~~~~~~~~~
This example code fragment demonstrates how to set up a HTTP/2 client that uses
the plaintext HTTP Upgrade mechanism to negotiate HTTP/2 connectivity. For
maximum explanatory value it uses the synchronous socket API that comes with
the Python standard library. In product code you will want to use an actual
HTTP/1.1 client if possible.
This code requires Python 3.5 or later.
"""
import h2.connection
import socket
def establish_tcp_connection():
"""
This function establishes a client-side TCP connection. How it works isn't
very important to this example. For the purpose of this example we connect
to localhost.
"""
return socket.create_connection(('localhost', 80))
def send_initial_request(connection, settings):
"""
For the sake of this upgrade demonstration, we're going to issue a GET
request against the root of the site. In principle the best request to
issue for an upgrade is actually ``OPTIONS *``, but this is remarkably
poorly supported and can break in weird ways.
"""
# Craft our initial request per RFC 7540 Section 3.2. This requires two
# special header fields: the Upgrade headre, and the HTTP2-Settings header.
# The value of the HTTP2-Settings header field comes from h2.
request = (
b"GET / HTTP/1.1\r\n" +
b"Host: localhost\r\n" +
b"Upgrade: h2c\r\n" +
b"HTTP2-Settings: " + settings + b"\r\n" +
b"\r\n"
)
connection.sendall(request)
def get_upgrade_response(connection):
"""
This function reads from the socket until the HTTP/1.1 end-of-headers
sequence (CRLFCRLF) is received. It then checks what the status code of the
response is.
This is not a substitute for proper HTTP/1.1 parsing, but it's good enough
for example purposes.
"""
data = b''
while b'\r\n\r\n' not in data:
data += connection.recv(8192)
headers, rest = data.split(b'\r\n\r\n', 1)
# An upgrade response begins HTTP/1.1 101 Switching Protocols. Look for the
# code. In production code you should also check that the upgrade is to
# h2c, but here we know we only offered one upgrade so there's only one
# possible upgrade in use.
split_headers = headers.split()
if split_headers[1] != b'101':
raise RuntimeError("Not upgrading!")
# We don't care about the HTTP/1.1 data anymore, but we do care about
# any other data we read from the socket: this is going to be HTTP/2 data
# that must be passed to the H2Connection.
return rest
def main():
"""
The client upgrade flow.
"""
# Step 1: Establish the TCP connecton.
connection = establish_tcp_connection()
# Step 2: Create H2 Connection object, put it in upgrade mode, and get the
# value of the HTTP2-Settings header we want to use.
h2_connection = h2.connection.H2Connection()
settings_header_value = h2_connection.initiate_upgrade_connection()
# Step 3: Send the initial HTTP/1.1 request with the upgrade fields.
send_initial_request(connection, settings_header_value)
# Step 4: Read the HTTP/1.1 response, look for 101 response.
extra_data = get_upgrade_response(connection)
# Step 5: Immediately send the pending HTTP/2 data.
connection.sendall(h2_connection.data_to_send())
# Step 6: Feed the body data to the connection.
events = connection.receive_data(extra_data)
# Now you can enter your main loop, beginning by processing the first set
# of events above. These events may include ResponseReceived, which will
# contain the response to the request we made in Step 3.
main_loop(events)
| mpl-2.0 | 6,828,279,949,292,134,000 | -8,920,447,986,015,363,000 | 35.174757 | 79 | 0.688943 | false |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/contrib/learn/python/learn/utils/inspect_checkpoint.py | 123 | 2686 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple script for inspect checkpoint files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.python.platform import app
FLAGS = None
def print_tensors_in_checkpoint_file(file_name, tensor_name):
"""Prints tensors in a checkpoint file.
If no `tensor_name` is provided, prints the tensor names and shapes
in the checkpoint file.
If `tensor_name` is provided, prints the content of the tensor.
Args:
file_name: Name of the checkpoint file.
tensor_name: Name of the tensor in the checkpoint file to print.
"""
try:
if not tensor_name:
variables = checkpoint_utils.list_variables(file_name)
for name, shape in variables:
print("%s\t%s" % (name, str(shape)))
else:
print("tensor_name: ", tensor_name)
print(checkpoint_utils.load_variable(file_name, tensor_name))
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
def main(unused_argv):
if not FLAGS.file_name:
print("Usage: inspect_checkpoint --file_name=<checkpoint_file_name "
"or directory> [--tensor_name=tensor_to_print]")
sys.exit(1)
else:
print_tensors_in_checkpoint_file(FLAGS.file_name, FLAGS.tensor_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--file_name",
type=str,
default="",
help="Checkpoint filename"
)
parser.add_argument(
"--tensor_name",
type=str,
default="",
help="Name of the tensor to inspect"
)
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| bsd-2-clause | 5,637,685,460,979,745,000 | 807,677,324,143,029,600 | 31.756098 | 80 | 0.674237 | false |
thienluong/SAFplus-Availability-Scalability-Platform | src/ide/genshi/build/lib.linux-x86_64-2.7/genshi/template/tests/base.py | 25 | 1473 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import unittest
from genshi.template.base import Template, Context
class ContextTestCase(unittest.TestCase):
def test_copy(self):
# create a non-trivial context with some dummy
# frames, match templates and py:choice stacks.
orig_ctxt = Context(a=5, b=6)
orig_ctxt.push({'c': 7})
orig_ctxt._match_templates.append(object())
orig_ctxt._choice_stack.append(object())
ctxt = orig_ctxt.copy()
self.assertNotEqual(id(orig_ctxt), id(ctxt))
self.assertEqual(repr(orig_ctxt), repr(ctxt))
self.assertEqual(orig_ctxt._match_templates, ctxt._match_templates)
self.assertEqual(orig_ctxt._choice_stack, ctxt._choice_stack)
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(Template.__module__))
suite.addTest(unittest.makeSuite(ContextTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| gpl-2.0 | -1,583,609,276,247,912,200 | 5,003,546,765,167,786,000 | 34.071429 | 75 | 0.699253 | false |
ahmed-mahran/hue | desktop/core/ext-py/South-1.0.2/south/utils/__init__.py | 119 | 1945 | """
Generally helpful utility functions.
"""
def _ask_for_it_by_name(name):
"Returns an object referenced by absolute path."
bits = str(name).split(".")
## what if there is no absolute reference?
if len(bits) > 1:
modulename = ".".join(bits[:-1])
else:
modulename = bits[0]
module = __import__(modulename, {}, {}, bits[-1])
if len(bits) == 1:
return module
else:
return getattr(module, bits[-1])
def ask_for_it_by_name(name):
"Returns an object referenced by absolute path. (Memoised outer wrapper)"
if name not in ask_for_it_by_name.cache:
ask_for_it_by_name.cache[name] = _ask_for_it_by_name(name)
return ask_for_it_by_name.cache[name]
ask_for_it_by_name.cache = {}
def get_attribute(item, attribute):
"""
Like getattr, but recursive (i.e. you can ask for 'foo.bar.yay'.)
"""
value = item
for part in attribute.split("."):
value = getattr(value, part)
return value
def auto_through(field):
"Returns if the M2M class passed in has an autogenerated through table or not."
return (
# Django 1.0/1.1
(not field.rel.through)
or
# Django 1.2+
getattr(getattr(field.rel.through, "_meta", None), "auto_created", False)
)
def auto_model(model):
"Returns if the given model was automatically generated."
return getattr(model._meta, "auto_created", False)
def memoize(function):
"Standard memoization decorator."
name = function.__name__
_name = '_' + name
def method(self):
if not hasattr(self, _name):
value = function(self)
setattr(self, _name, value)
return getattr(self, _name)
def invalidate():
if hasattr(method, _name):
delattr(method, _name)
method.__name__ = function.__name__
method.__doc__ = function.__doc__
method._invalidate = invalidate
return method
| apache-2.0 | 3,648,588,516,844,751,400 | 992,213,877,068,737,200 | 25.643836 | 83 | 0.603085 | false |
lakshayg/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/distribution_test.py | 96 | 8140 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import distributions
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
ds = distributions
class DistributionTest(test.TestCase):
def testParamShapesAndFromParams(self):
classes = [
ds.Normal,
ds.Bernoulli,
ds.Beta,
ds.Chi2,
ds.Exponential,
ds.Gamma,
ds.InverseGamma,
ds.Laplace,
ds.StudentT,
ds.Uniform,
]
sample_shapes = [(), (10,), (10, 20, 30)]
with self.test_session():
for cls in classes:
for sample_shape in sample_shapes:
param_shapes = cls.param_shapes(sample_shape)
params = dict([(name, random_ops.random_normal(shape))
for name, shape in param_shapes.items()])
dist = cls(**params)
self.assertAllEqual(sample_shape,
array_ops.shape(dist.sample()).eval())
dist_copy = dist.copy()
self.assertAllEqual(sample_shape,
array_ops.shape(dist_copy.sample()).eval())
self.assertEqual(dist.parameters, dist_copy.parameters)
def testCopyExtraArgs(self):
with self.test_session():
# Note: we cannot easily test all distributions since each requires
# different initialization arguments. We therefore spot test a few.
normal = ds.Normal(loc=1., scale=2., validate_args=True)
self.assertEqual(normal.parameters, normal.copy().parameters)
wishart = ds.WishartFull(df=2, scale=[[1., 2], [2, 5]],
validate_args=True)
self.assertEqual(wishart.parameters, wishart.copy().parameters)
def testCopyOverride(self):
with self.test_session():
normal = ds.Normal(loc=1., scale=2., validate_args=True)
unused_normal_copy = normal.copy(validate_args=False)
base_params = normal.parameters.copy()
copy_params = normal.copy(validate_args=False).parameters.copy()
self.assertNotEqual(
base_params.pop("validate_args"), copy_params.pop("validate_args"))
self.assertEqual(base_params, copy_params)
def testIsScalar(self):
with self.test_session():
mu = 1.
sigma = 2.
normal = ds.Normal(mu, sigma, validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(normal.is_scalar_batch()))
normal = ds.Normal([mu], [sigma], validate_args=True)
self.assertTrue(tensor_util.constant_value(normal.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(normal.is_scalar_batch()))
mvn = ds.MultivariateNormalDiag([mu], [sigma], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertTrue(tensor_util.constant_value(mvn.is_scalar_batch()))
mvn = ds.MultivariateNormalDiag([[mu]], [[sigma]], validate_args=True)
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_event()))
self.assertFalse(tensor_util.constant_value(mvn.is_scalar_batch()))
# We now test every codepath within the underlying is_scalar_helper
# function.
# Test case 1, 2.
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
# None would fire an exception were it actually executed.
self.assertTrue(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertTrue(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
x = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
# None would fire an exception were it actually executed.
self.assertFalse(normal._is_scalar_helper(x.get_shape(), lambda: None))
self.assertFalse(
normal._is_scalar_helper(tensor_shape.TensorShape(None),
lambda: array_ops.shape(x)))
# Test case 3.
x = array_ops.placeholder(dtype=dtypes.int32)
is_scalar = normal._is_scalar_helper(x.get_shape(),
lambda: array_ops.shape(x))
self.assertTrue(is_scalar.eval(feed_dict={x: 1}))
self.assertFalse(is_scalar.eval(feed_dict={x: [1]}))
def _GetFakeDistribution(self):
class FakeDistribution(ds.Distribution):
"""Fake Distribution for testing _set_sample_static_shape."""
def __init__(self, batch_shape=None, event_shape=None):
self._static_batch_shape = tensor_shape.TensorShape(batch_shape)
self._static_event_shape = tensor_shape.TensorShape(event_shape)
super(FakeDistribution, self).__init__(
dtype=dtypes.float32,
reparameterization_type=distributions.NOT_REPARAMETERIZED,
validate_args=True,
allow_nan_stats=True,
name="DummyDistribution")
def _batch_shape(self):
return self._static_batch_shape
def _event_shape(self):
return self._static_event_shape
return FakeDistribution
def testSampleShapeHints(self):
fake_distribution = self._GetFakeDistribution()
with self.test_session():
# Make a new session since we're playing with static shapes. [And below.]
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[2, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
# We use as_list since TensorShape comparison does not work correctly for
# unknown values, ie, Dimension(None).
self.assertAllEqual([6, 7, 2, 3, 5], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[5])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, 5], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=[None])
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertAllEqual([6, 7, None, 3, None], y.get_shape().as_list())
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=None, event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
with self.test_session():
x = array_ops.placeholder(dtype=dtypes.float32)
dist = fake_distribution(batch_shape=[None, 3], event_shape=None)
sample_shape = ops.convert_to_tensor([6, 7], dtype=dtypes.int32)
y = dist._set_sample_static_shape(x, sample_shape)
self.assertTrue(y.get_shape().ndims is None)
if __name__ == "__main__":
test.main()
| apache-2.0 | -2,866,307,709,390,289,000 | 9,015,472,784,551,158,000 | 41.176166 | 80 | 0.652457 | false |
azlanismail/prismgames | examples/games/car/networkx/algorithms/isomorphism/vf2userfunc.py | 1 | 7715 | """
Module to simplify the specification of user-defined equality functions for
node and edge attributes during isomorphism checks.
During the construction of an isomorphism, the algorithm considers two
candidate nodes n1 in G1 and n2 in G2. The graphs G1 and G2 are then
compared with respect to properties involving n1 and n2, and if the outcome
is good, then the candidate nodes are considered isomorphic. NetworkX
provides a simple mechanism for users to extend the comparisons to include
node and edge attributes.
Node attributes are handled by the node_match keyword. When considering
n1 and n2, the algorithm passes their node attribute dictionaries to
node_match, and if it returns False, then n1 and n2 cannot be
considered to be isomorphic.
Edge attributes are handled by the edge_match keyword. When considering
n1 and n2, the algorithm must verify that outgoing edges from n1 are
commensurate with the outgoing edges for n2. If the graph is directed,
then a similar check is also performed for incoming edges.
Focusing only on outgoing edges, we consider pairs of nodes (n1, v1) from
G1 and (n2, v2) from G2. For graphs and digraphs, there is only one edge
between (n1, v1) and only one edge between (n2, v2). Those edge attribute
dictionaries are passed to edge_match, and if it returns False, then
n1 and n2 cannot be considered isomorphic. For multigraphs and
multidigraphs, there can be multiple edges between (n1, v1) and also
multiple edges between (n2, v2). Now, there must exist an isomorphism
from "all the edges between (n1, v1)" to "all the edges between (n2, v2)".
So, all of the edge attribute dictionaries are passed to edge_match, and
it must determine if there is an isomorphism between the two sets of edges.
"""
import networkx as nx
from . import isomorphvf2 as vf2
__all__ = ['GraphMatcher',
'DiGraphMatcher',
'MultiGraphMatcher',
'MultiDiGraphMatcher',
]
def _semantic_feasibility(self, G1_node, G2_node):
"""Returns True if mapping G1_node to G2_node is semantically feasible.
"""
# Make sure the nodes match
if self.node_match is not None:
nm = self.node_match(self.G1.node[G1_node], self.G2.node[G2_node])
if not nm:
return False
# Make sure the edges match
if self.edge_match is not None:
# Cached lookups
G1_adj = self.G1_adj
G2_adj = self.G2_adj
core_1 = self.core_1
edge_match = self.edge_match
for neighbor in G1_adj[G1_node]:
# G1_node is not in core_1, so we must handle R_self separately
if neighbor == G1_node:
if not edge_match(G1_adj[G1_node][G1_node],
G2_adj[G2_node][G2_node]):
return False
elif neighbor in core_1:
if not edge_match(G1_adj[G1_node][neighbor],
G2_adj[G2_node][core_1[neighbor]]):
return False
# syntactic check has already verified that neighbors are symmetric
return True
class GraphMatcher(vf2.GraphMatcher):
"""VF2 isomorphism checker for undirected graphs.
"""
def __init__(self, G1, G2, node_match=None, edge_match=None):
"""Initialize graph matcher.
Parameters
----------
G1, G2: graph
The graphs to be tested.
node_match: callable
A function that returns True iff node n1 in G1 and n2 in G2
should be considered equal during the isomorphism test. The
function will be called like::
node_match(G1.node[n1], G2.node[n2])
That is, the function will receive the node attribute dictionaries
of the nodes under consideration. If None, then no attributes are
considered when testing for an isomorphism.
edge_match: callable
A function that returns True iff the edge attribute dictionary for
the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should be
considered equal during the isomorphism test. The function will be
called like::
edge_match(G1[u1][v1], G2[u2][v2])
That is, the function will receive the edge attribute dictionaries
of the edges under consideration. If None, then no attributes are
considered when testing for an isomorphism.
"""
vf2.GraphMatcher.__init__(self, G1, G2)
self.node_match = node_match
self.edge_match = edge_match
# These will be modified during checks to minimize code repeat.
self.G1_adj = self.G1.adj
self.G2_adj = self.G2.adj
semantic_feasibility = _semantic_feasibility
class DiGraphMatcher(vf2.DiGraphMatcher):
"""VF2 isomorphism checker for directed graphs.
"""
def __init__(self, G1, G2, node_match=None, edge_match=None):
"""Initialize graph matcher.
Parameters
----------
G1, G2 : graph
The graphs to be tested.
node_match : callable
A function that returns True iff node n1 in G1 and n2 in G2
should be considered equal during the isomorphism test. The
function will be called like::
node_match(G1.node[n1], G2.node[n2])
That is, the function will receive the node attribute dictionaries
of the nodes under consideration. If None, then no attributes are
considered when testing for an isomorphism.
edge_match : callable
A function that returns True iff the edge attribute dictionary for
the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should be
considered equal during the isomorphism test. The function will be
called like::
edge_match(G1[u1][v1], G2[u2][v2])
That is, the function will receive the edge attribute dictionaries
of the edges under consideration. If None, then no attributes are
considered when testing for an isomorphism.
"""
vf2.DiGraphMatcher.__init__(self, G1, G2)
self.node_match = node_match
self.edge_match = edge_match
# These will be modified during checks to minimize code repeat.
self.G1_adj = self.G1.adj
self.G2_adj = self.G2.adj
def semantic_feasibility(self, G1_node, G2_node):
"""Returns True if mapping G1_node to G2_node is semantically feasible."""
# Test node_match and also test edge_match on successors
feasible = _semantic_feasibility(self, G1_node, G2_node)
if not feasible:
return False
# Test edge_match on predecessors
self.G1_adj = self.G1.pred
self.G2_adj = self.G2.pred
feasible = _semantic_feasibility(self, G1_node, G2_node)
self.G1_adj = self.G1.adj
self.G2_adj = self.G2.adj
return feasible
## The "semantics" of edge_match are different for multi(di)graphs, but
## the implementation is the same. So, technically we do not need to
## provide "multi" versions, but we do so to match NetworkX's base classes.
class MultiGraphMatcher(GraphMatcher):
"""VF2 isomorphism checker for undirected multigraphs. """
pass
class MultiDiGraphMatcher(DiGraphMatcher):
"""VF2 isomorphism checker for directed multigraphs. """
pass
| gpl-2.0 | 2,907,974,477,302,273,500 | 3,341,074,629,566,073,300 | 36.964646 | 82 | 0.625275 | false |
Donnerbart/hazelcast-simulator | dist/src/main/dist/bin/benchmark-report.py | 1 | 32024 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# todo:
# - writing html
# - pruning of dstats to match running time
# - when comparing benchmarks; use 1 color for all plots from 1 benchmark
# - if no latency info is found; print warning
# - when not a lot of data points, them time issues in gnuplot (use WORKER_PERFORMANCE_MONITOR_INTERVAL_SECONDS=default)
# - timeseries: avg
# - gnuplot y axis formatting; long numbers are unreadable because not dots or comma's
# - throughput per member in main output directory
# - latency distribution doesn't show the percentiles; doesn't load xlabels.csv
#
# done:
# - better commandline help
# - throughput per worker in a single plot
# - default gnuplot colors stink; often they are not distinguishable
#
# backlog
# - google chart option
# - svg option
# - latency per worker
# - option to plot with real time.
# - dstats merging for members?
# - cpu usage merging needs to be divided by number of agents.
# - option not to make low part of graph shrink
# - option to show real time
import argparse
import csv
import os
import re
import tempfile
parser = argparse.ArgumentParser(description='Creating a benchmark report from one or more benchmarks.')
parser.add_argument('benchmarks', metavar='B', nargs='+',
help='a benchmark to be used in the comparison')
# parser.add_argument('-r', '--realtime', default='report', help='print the real time of the datapoints.')
parser.add_argument('-o', '--output', nargs=1,
help='The output directory for the report. By default a report directory in the working directory is created.')
x = parser.parse_args()
args = x.benchmarks
simulator_home = os.environ['SIMULATOR_HOME']
if not x.output:
report_dir = "report"
else:
report_dir = x.output[0]
print("output directory '" + report_dir + "'")
# ================ utils ========================
def dump(obj):
for attr in dir(obj):
print "obj.%s = %s" % (attr, getattr(obj, attr))
def ensure_dir(file_path):
if not os.path.exists(file_path):
os.makedirs(file_path)
# ================ plotting =========================
class Gnuplot:
image_width = 1280
image_height = 1024
filepath = None
ylabel = None
is_bytes = None
def __init__(self, directory, title, basefilename=None):
self.tmp = tempfile.NamedTemporaryFile(delete=False)
self.title = title
self.directory = directory
self.ts_list = []
self.titles = {}
self.basefilename = basefilename
def _complete(self):
self.tmp.flush()
from os import system
system('gnuplot ' + self.tmp.name)
def _write(self, line):
self.tmp.write(line + '\n')
def add(self, ts, title=None):
self.ts_list.append(ts)
self.titles[ts] = title
return self
# returns a color for the time series. We are using some hard coded colors to make sure
# the the colors are predictable and very much different. If there are too many time series
# then we just rely on the default mechanism
def _color(self, ts):
if (len(self.ts_list)) > 8:
return None
# for list of colors: http://www.ss.scphys.kyoto-u.ac.jp/person/yonezawa/contents/program/gnuplot/colorname_list.html
if ts == self.ts_list[0]:
return "red"
elif ts == self.ts_list[1]:
return "blue"
elif ts == self.ts_list[2]:
return "forest-green"
elif ts == self.ts_list[3]:
return "gold"
elif ts == self.ts_list[4]:
return "grey"
elif ts == self.ts_list[5]:
return "brown"
elif ts == self.ts_list[6]:
return "violet"
else:
return "orchid"
def plot(self):
empty = True
for ts in self.ts_list:
if not ts.is_empty():
empty = False
break
if empty:
# print("Skipping plot of " + self.title + "; timeseries are empty")
return
ts_first = self.ts_list[0]
self.ylabel = ts_first.ylabel
if self.basefilename:
self.filepath = os.path.join(self.directory, self.basefilename + ".png")
else:
self.filepath = os.path.join(self.directory, ts_first.name + ".png")
self.is_bytes = ts_first.is_bytes
ensure_dir(self.directory)
self._plot()
print(self.filepath)
def _plot(self):
raise NotImplementedError("Please Implement this method")
class TimeseriesGnuplot(Gnuplot):
def __init__(self, directory, title, basefilename=None):
Gnuplot.__init__(self, directory, title, basefilename)
def _plot(self):
# self._write("unset autoscale y")
self._write("set title '" + self.title + "' noenhanced")
self._write("set style data lines")
self._write('set datafile separator ","')
self._write("set terminal png size " + str(self.image_width) + "," + str(self.image_height))
self._write("set grid")
self._write("set key below")
self._write("set xdata time")
self._write("set timefmt \"%s\"")
self._write("offset = 0")
self._write("t0(x)=(offset=($0==0) ? x : offset, x - offset)")
self._write("set xlabel 'Time minutes:seconds'")
self._write("set ylabel '" + self.ylabel + "'")
if self.is_bytes:
# the problem here is that this is 1000 based; not 1024
self._write("set format y '%.1s%cB'")
# else:
# self._write("set format y '%.0f'")
self._write("set output '" + self.filepath + "'")
self._write("plot \\")
tmp_files = []
for ts in self.ts_list:
ts_file = ts.to_tmp_file()
tmp_files.append(ts_file)
if len(self.ts_list) > 1:
title = self.titles[ts]
if not title:
title = ts.name
title_str = "title \"" + title + "\" noenhanced"
else:
title_str = "title \"\""
color = self._color(ts)
lt = ""
if color:
lt = "lt rgb \"" + color + "\""
self._write(" \'" + ts_file.name + "\' using (t0(timecolumn(1))):2 " + title_str + " " + lt + ", \\")
self._complete()
for tmp_file in tmp_files:
tmp_file.close()
class LatencyDistributionGnuplot(Gnuplot):
def __init__(self, directory, title):
Gnuplot.__init__(self, directory, title)
def _plot(self):
self._write("set datafile separator \",\"")
self._write("set title '" + self.title + "' noenhanced")
self._write("set terminal png size " + str(self.image_width) + "," + str(self.image_height))
self._write("set grid")
self._write("unset xtics")
self._write("set ylabel 'Latency (μs)'")
self._write("set logscale x")
self._write('set key top left')
self._write("set style line 1 lt 1 lw 3 pt 3 linecolor rgb \"red\"")
self._write("set output '" + self.filepath + "'")
self._write("plot '"+simulator_home+"/bin/xlabels.csv' notitle with labels center offset 0, 1.5 point,\\")
tmp_files = []
for ts in self.ts_list:
ts_file = ts.to_tmp_file()
tmp_files.append(ts_file)
if len(self.ts_list) > 1:
title = self.titles[ts]
if not title:
title = ts.name
title_str = "title \"" + title + "\" noenhanced"
else:
title_str = "title \"\""
color = self._color(ts)
lt = ""
if color:
lt = "lt rgb \"" + color + "\""
self._write(" \"" + ts_file.name + "\" using 1:2 " + title_str + " " + lt + " with lines, \\")
self._complete()
for tmp_file in tmp_files:
tmp_file.close()
print(self.tmp.name)
class GoogleCharts:
def __init__(self, ts, directory, title):
self.title = title
self.ts = ts
self.directory = directory
with open('chart_template.html', 'r') as f:
self.chart_template = f.read()
def plot(self):
filepath = os.path.join(self.directory, self.ts.name + ".html")
empty = True
for ts in ts_list:
if not ts.is_empty():
empty = False
break
if empty:
print("Skipping plot of " + filepath + "; timeseries are empty")
return
rows = ""
first = True
for item in self.ts.items:
rows += "[" + str(item.time) + "," + str(item.value) + "]"
if first:
rows += ","
rows += "\n"
chart = self.chart_template.replace("$rows", rows)
ensure_dir(self.directory)
with open(filepath, 'w') as f:
f.write(chart)
print filepath
# a series is effectively a list of key/values. It could be a time series where the key is the time and the value
# is the measured value e.g. cpu usage.
class Series:
name = None
def __init__(self, name, ylabel, is_bytes, ts_list=None, items=None, ):
if ts_list is None:
ts_list = []
self.is_bytes = is_bytes
self.name = name
self.ylabel = ylabel
if not items:
self.items = []
else:
self.items = items
self.attributes = {}
for source_ts in ts_list:
if source_ts.is_empty():
continue
# add all items in the source_ts, to the result_ts
for index in range(0, source_ts.length()):
source_item = source_ts.items[index]
if self.length() > index:
result_item = self.items[index]
result_item.value += source_item.value
else:
self.add(source_item.time, source_item.value)
def add(self, time, value):
self.items.append(KeyValue(time, value))
def start_time(self):
if not self.items:
return None
else:
return self.items[0].time
def end_time(self):
if not self.items:
return None
else:
return self.items[len(self.items) - 1].time
def to_tmp_file(self):
temp = tempfile.NamedTemporaryFile(delete=False)
for item in self.items:
temp.write(str(item.time) + ',' + str(item.value) + '\n')
temp.close()
return temp
def length(self):
return len(self.items)
def is_empty(self):
return self.length() == 0
def min(self):
result = None
for item in self.items:
if not result or item.value < result:
result = item.value
return result
def max(self):
result = None
for item in self.items:
if not result or item.value > result:
result = item.value
return result
# A key/value in a series
class KeyValue:
time = None
value = None
def __init__(self, time, value):
self.time = time
self.value = float(value)
# A handle to a series. With a handle you can refer to a series, without needing to pull it into memory. Since we could have
# a lot of measured data, we want to prevent getting it all in memory.
class SeriesHandle:
def __init__(self, src, name, title, ylabel, load_method, args=None, is_bytes=False):
if not args:
args = []
self.src = src
self.name = name
self.title = title
self.ylabel = ylabel
self.load_method = load_method
self.args = args
self.is_bytes = is_bytes
def load(self):
items = self.load_method(*self.args)
return Series(self.name, self.ylabel, self.is_bytes, items=items)
class Worker:
name = ""
directory = ""
performance_csv = None
def __init__(self, name, directory):
self.name = name
self.directory = directory
refs = []
self.ts_references = refs
refs.append(SeriesHandle("throughput", "throughput_" + name, "Throughput", "Operations/second",
self.__load_throughput))
refs.append(SeriesHandle("dstat", "memory_used", "Memory Used", "Memory used",
self.__load_dstat, args=[1], is_bytes=True))
refs.append(SeriesHandle("dstat", "memory_buffered", "Memory Buffered", "Memory Buffered",
self.__load_dstat, args=[2], is_bytes=True))
refs.append(SeriesHandle("dstat", "memory_cached", "Memory Cached", "Memory Cached",
self.__load_dstat, args=[3], is_bytes=True))
refs.append(SeriesHandle("dstat", "memory_free", "Memory Free", "Memory Free",
self.__load_dstat, args=[4], is_bytes=True))
refs.append(SeriesHandle("dstat", "cpu_user", "CPU User", "CPU User %",
self.__load_dstat, args=[5]))
refs.append(SeriesHandle("dstat", "cpu_system", "CPU System", "CPU System %",
self.__load_dstat, args=[6]))
refs.append(SeriesHandle("dstat", "cpu_idle", "CPU Idle", "CPU Idle %",
self.__load_dstat, args=[7]))
refs.append(SeriesHandle("dstat", "cpu_wait", "CPU Wait", "CPU Wait %",
self.__load_dstat, args=[8]))
refs.append(SeriesHandle("dstat", "cpu_total", "CPU Total", "CPU Total %",
self.__load_dstat_cpu_total_ts))
refs.append(SeriesHandle("dstat", "cpu_hardware_interrupts", "CPU Hardware Interrupts", "CPU Hardware Interrupts/sec",
self.__load_dstat, args=[9]))
refs.append(SeriesHandle("dstat", "cpu_software_interrupts", "CPU Software Interrupts", "CPU Software Interrupts/sec",
self.__load_dstat, args=[10]))
refs.append(SeriesHandle("dstat", "disk_read", "Disk Reads", "Disk Reads/sec",
self.__load_dstat, args=[11], is_bytes=True))
refs.append(SeriesHandle("dstat", "disk_write", "Disk Writes", "Disk writes/sec",
self.__load_dstat, args=[12], is_bytes=True))
refs.append(SeriesHandle("dstat", "net_receive", "Net Receive", "Receiving/second",
self.__load_dstat, args=[13], is_bytes=True))
refs.append(SeriesHandle("dstat", "net_send", "Net Send", "Sending/second",
self.__load_dstat, args=[14], is_bytes=True))
refs.append(SeriesHandle("dstat", "page_in", "Page in", "todo",
self.__load_dstat, args=[15]))
refs.append(SeriesHandle("dstat", "page_out", "Page out", "todo",
self.__load_dstat, args=[16]))
refs.append(SeriesHandle("dstat", "system_interrupts", "System Interrupts", "System Interrupts/sec",
self.__load_dstat, args=[17]))
refs.append(SeriesHandle("dstat", "system_context_switches", "System Context Switches", "System Context Switches/sec",
self.__load_dstat, args=[18]))
refs.append(SeriesHandle("dstat", "load_average_1m", "Load Average 1 Minute", "Load",
self.__load_dstat, args=[19]))
refs.append(SeriesHandle("dstat", "load_average_5m", "Load Average 5 Minutes", "Load",
self.__load_dstat, args=[20]))
refs.append(SeriesHandle("dstat", "load_average_15m", "Load Average 15 Minute", "Load",
self.__load_dstat, args=[21]))
refs.append(SeriesHandle("gc", "pause_time", "Pause time", "seconds",
self.__load_gc, args=[1, True]))
refs.append(SeriesHandle("gc", "young_size_before_gc", "Young size before gc", "Size",
self.__load_gc, args=[5, True], is_bytes=True))
refs.append(SeriesHandle("gc", "young_size_after_gc", "Young size after gc", "Size",
self.__load_gc, args=[6, True], is_bytes=True))
refs.append(SeriesHandle("gc", "young_size_max", "Young size max", "Size",
self.__load_gc, args=[7, True], is_bytes=True))
refs.append(SeriesHandle("gc", "young_collected", "Young collected", "Collected",
self.__load_gc, args=[8, True], is_bytes=True))
refs.append(SeriesHandle("gc", "young_collected_rate", "Young collection rate", "Collected/second",
self.__load_gc, args=[9, True], is_bytes=True))
refs.append(SeriesHandle("gc", "young_allocated", "Young allocated", "Allocation",
self.__load_gc, args=[10, True], is_bytes=True))
refs.append(SeriesHandle("gc", "allocation_rate", "Allocation rate", "Allocated/second",
self.__load_gc, args=[11, True], is_bytes=True))
refs.append(SeriesHandle("gc", "heap_size_before_gc", "Heap size before gc", "Size",
self.__load_gc, args=[12, False], is_bytes=True))
refs.append(SeriesHandle("gc", "heap_size_after_gc", "Heap size after gc", "Size",
self.__load_gc, args=[13, False], is_bytes=True))
refs.append(SeriesHandle("gc", "heap_size_max", "Heap size max", "Size",
self.__load_gc, args=[14, False], is_bytes=True))
refs.append(SeriesHandle("gc", "heap_collected", "Heap collected", "Size",
self.__load_gc, args=[15, False], is_bytes=True))
refs.append(SeriesHandle("gc", "heap_collected_rate", "Heap collected rate", "Collected/second",
self.__load_gc, args=[16, False], is_bytes=True))
refs.append(SeriesHandle("gc", "promotion", "Promoted", "Size",
self.__load_gc, args=[17, False], is_bytes=True))
refs.append(SeriesHandle("gc", "promotion_rate", "Promotion rate", "Promoted/second",
self.__load_gc, args=[18, True], is_bytes=True))
refs.append(SeriesHandle("gc", "old_size_before_gc", "Tenured size before gc", "Size",
self.__load_gc, args=[19, True], is_bytes=True))
refs.append(SeriesHandle("gc", "old_size_after_gc", "Tenured size after gc", "Size",
self.__load_gc, args=[20, True], is_bytes=True))
refs.append(SeriesHandle("gc", "old_total", "Tenured size total", "Size",
self.__load_gc, args=[21, True], is_bytes=True))
refs.append(SeriesHandle("gc", "meta_size_before_gc", "Meta/Perm size before gc", "Size",
self.__load_gc, args=[22, True], is_bytes=True))
refs.append(SeriesHandle("gc", "meta_size_after_gc", "Meta/Perm size after gc", "Size",
self.__load_gc, args=[23, True], is_bytes=True))
refs.append(SeriesHandle("gc", "meta_total", "Meta/Perm size total", "Size",
self.__load_gc, args=[24, True], is_bytes=True))
# Returns the name of the agent this worker belongs to
def agent(self):
index = self.name.index("_", 3)
return self.name[0:index]
def is_driver(self):
return os.path.exists(self.performance_csv)
def __load_throughput(self):
performance_csv = os.path.join(self.directory, "performance.csv")
result = []
if os.path.exists(performance_csv):
with open(performance_csv, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
# skip the first line
next(csvreader)
for row in csvreader:
result.append(KeyValue(row[0], row[4]))
return result
def __load_dstat(self, column):
dstat_csv = os.path.join(self.directory, "dstat.csv")
result = []
if os.path.exists(dstat_csv):
with open(dstat_csv, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
# we need to skip the first 7 lines
for x in range(0, 8):
next(csvreader)
for row in csvreader:
if column < len(row): # protection if column doesn't exist
result.append(KeyValue(row[0], row[column]))
return result
def __load_gc(self, column, filter_minus_one):
gc_csv = os.path.join(self.directory, "gc.csv")
result = []
if os.path.exists(gc_csv):
with open(gc_csv, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
# we need to skip the first line
next(csvreader)
for row in csvreader:
key = row[0]
value = row[column]
if value != "-1" or not filter_minus_one:
result.append(KeyValue(key, value))
return result
# total cpu usage isn't explicitly provided by dstat, so we just sum the user+system
def __load_dstat_cpu_total_ts(self):
dstat_csv = os.path.join(self.directory, "dstat.csv")
result = []
if os.path.exists(dstat_csv):
with open(dstat_csv, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
# we need to skip the first 7 lines
for x in range(0, 8):
next(csvreader)
for row in csvreader:
if len(row) > 6: # protection if column doesn't exist
result.append(KeyValue(row[0], float(row[5]) + float(row[6])))
return result
class Benchmark:
# the directory where the original files can be found
src_dir = ""
workers = None
name = ""
def __init__(self, src_dir, name):
self.src_dir = src_dir
self.name = name
# load all workers
self.workers = []
for subdir_name in os.listdir(src_dir):
subdir = os.path.join(src_dir, subdir_name)
if not os.path.isdir(subdir):
continue
if not subdir_name.startswith("C_A"):
continue
self.workers.append(Worker(subdir_name, subdir))
# making sure there are workers; otherwise it is an invalid benchmark
if len(self.workers) == 0:
print("Invalid Benchmark " + self.name + " from directory [" + self.src_dir + "]; no workers found")
exit(1)
# look for all latency info
refs = []
self.ts_references = refs
refs.append(SeriesHandle("throughput", "throughput", "Throughput", "Operations/sec", self.aggregated_throughput))
for file_name in os.listdir(self.src_dir):
if not file_name.endswith(".hgrm"):
continue
file_name = os.path.splitext(file_name)[0]
file_path = os.path.join(self.src_dir, file_name)
print(file_path)
name = file_name.split('-')[1]
refs.append(SeriesHandle("latency", "latency_interval_25_" + name, "Interval 25%", "Latency (μs)",
self.load_latency_ts, args=[file_path, 3]))
refs.append(SeriesHandle("latency", "latency_interval_50_" + name, "Interval 50%", "Latency (μs)",
self.load_latency_ts, args=[file_path, 4]))
refs.append(SeriesHandle("latency", "latency_interval_75_" + name, "Interval 75%", "Latency (μs)",
self.load_latency_ts, args=[file_path, 5]))
refs.append(SeriesHandle("latency", "latency_interval_90_" + name, "Interval 90%", "Latency (μs)",
self.load_latency_ts, args=[file_path, 6]))
refs.append(SeriesHandle("latency", "latency_interval_99_" + name, "Interval 99%", "Latency (μs)",
self.load_latency_ts, args=[file_path, 7]))
refs.append(SeriesHandle("latency", "latency_interval_999_" + name, "Interval 99.9%", "Latency (μs)",
self.load_latency_ts, args=[file_path, 8]))
refs.append(SeriesHandle("latency", "latency_interval_9999_" + name, "Interval 99.99%", "Latency (μs)",
self.load_latency_ts, args=[file_path, 9]))
refs.append(SeriesHandle("latency", "latency_interval_99999_" + name, "Interval 99.999%", "Latency (μs)",
self.load_latency_ts, args=[file_path, 10]))
refs.append(SeriesHandle("latency", "latency_interval_min_" + name, "Interval Min", "Latency (μs)",
self.load_latency_ts, args=[file_path, 11]))
refs.append(SeriesHandle("latency", "latency_interval_max_" + name, "Interval Max", "Latency (μs)",
self.load_latency_ts, args=[file_path, 12]))
refs.append(SeriesHandle("latency", "latency_interval_mean_" + name, "Interval Mean", "Latency (μs)",
self.load_latency_ts, args=[file_path, 13]))
refs.append(
SeriesHandle("latency", "latency_interval_std_deviation_" + name, "Interval Standard Deviation", "Latency (μs)",
self.load_latency_ts, args=[file_path, 14]))
hgrm_path = os.path.join(src_dir, file_name + ".hgrm")
refs.append(
SeriesHandle("latency-distribution", "latency_distribution_" + name, "Latency distribution", "Latency (μs)",
self.load_latency_distribution_ts, args=[hgrm_path]))
agents = {}
for worker in self.workers:
agent = worker.agent()
if not agents.get(agent):
agents[agent] = worker
for agent, worker in agents.iteritems():
for ref in worker.ts_references:
if ref.src == "dstat":
refs.append(SeriesHandle("dstat", ref.name + "_" + agent, ref.title, ref.ylabel, self.x, args=[ref],
is_bytes=ref.is_bytes))
def x(self, ref):
return ref.load().items
def aggregated_throughput(self):
list = []
for worker in self.workers:
for ref in worker.ts_references:
if ref.src == "throughput":
list.append(ref.load())
return Series("", "", False, ts_list=list).items
def load_latency_ts(self, path, column):
result = []
with open(path, 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
# we need to skip the first 7 lines
for x in range(0, 3):
next(csvreader)
for row in csvreader:
result.append(KeyValue(row[0], row[column]))
return result
def load_latency_distribution_ts(self, path):
result = []
line_index = 0
with open(path) as f:
for line in f:
line = line.rstrip()
line_index += 1
if line_index < 4 or line.startswith("#"):
continue
row = re.split(" +", line)
if len(row) < 5:
continue
result.append(KeyValue(row[4], row[1]))
print path
return result
class Comparison:
def __init__(self):
benchmark_dirs = []
benchmark_names = {}
last_benchmark = None
print("Loading benchmarks")
# collect all benchmark directories and the names for the benchmarks
for arg in args:
if arg.startswith("[") and arg.endswith("]"):
if not last_benchmark:
print("Benchmark name " + arg + " must be preceded with a benchmark directory.")
exit()
benchmark_names[last_benchmark] = arg[1:len(arg) - 1]
last_benchmark = None
else:
benchmark_dir = arg
if not os.path.exists(benchmark_dir):
print("benchmark directory '" + benchmark_dir + "' does not exist!")
exit(1)
last_benchmark = arg
benchmark_dirs.append(benchmark_dir)
name = os.path.basename(os.path.normpath(benchmark_dir))
benchmark_names[benchmark_dir] = name
# Make the benchmarks
self.benchmarks = []
for benchmark_dir in benchmark_dirs:
self.benchmarks.append(Benchmark(benchmark_dir, benchmark_names[benchmark_dir]))
def output_dir(self, name):
output_dir = os.path.join(report_dir, name)
ensure_dir(output_dir)
return output_dir
def compare(self):
plots = {}
for benchmark in self.benchmarks:
if len(benchmark.ts_references) == 0:
print(" benchmark [" + benchmark.name + "] benchmark.dir [" + benchmark.src_dir + "] has no data")
exit(1)
for ref in benchmark.ts_references:
plot = plots.get(ref.name)
if not plot:
if ref.src == "latency-distribution":
plot = LatencyDistributionGnuplot(self.output_dir("latency"), ref.title)
else:
plot = TimeseriesGnuplot(self.output_dir(ref.src), ref.title)
plots[ref.name] = plot
plot.add(ref.load(), title=benchmark.name)
for benchmark in self.benchmarks:
for worker in benchmark.workers:
for ref in worker.ts_references:
if ref.src == "throughput":
plot = plots.get("throughput_per_worker")
if not plot:
plot = TimeseriesGnuplot(self.output_dir(ref.src),
"Throughput per member",
basefilename="throughput_per_worker")
plots["throughput_per_worker"] = plot
if len(self.benchmarks) > 1:
plot.add(ref.load(), benchmark.name + "_" + worker.name)
else:
plot.add(ref.load(), worker.name)
# make all plots for each individual worker
for benchmark in self.benchmarks:
for worker in benchmark.workers:
for ref in worker.ts_references:
if ref.src == "dstat":
continue # dstat is already plotted
name = ref.name+"_"+worker.name
plot = plots.get(name)
if not plot:
plot = TimeseriesGnuplot(self.output_dir(ref.src), worker.name + " " + ref.title, basefilename=name)
plots[name] = plot
plot.add(ref.load(), benchmark.name)
for plot in plots.values():
plot.plot()
print("Done writing report [" + report_dir + "]")
for benchmark in self.benchmarks:
print(" benchmark [" + benchmark.name + "] benchmark.dir [" + benchmark.src_dir + "]")
comparison = Comparison()
comparison.compare()
| apache-2.0 | -8,841,456,741,350,808,000 | 4,377,375,627,872,012,000 | 39.0125 | 131 | 0.531147 | false |
smsolivier/VEF | code/hlimit.py | 1 | 2247 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import ld as LD
import dd as DD
from hidespines import *
import sys
''' compares difference between Sn and moment equations as cell width --> 0 '''
if (len(sys.argv) > 1):
outfile = sys.argv[1]
else:
outfile = None
def getDiff(sol, tol=1e-6):
diff = np.zeros(len(sol))
for i in range(len(sol)):
x, phi, it = sol[i].sourceIteration(tol)
# diff[i] = np.linalg.norm(phi - sol[i].phi_SN, 2)/np.linalg.norm(sol[i].phi_SN, 2)
diff[i] = np.linalg.norm(phi - sol[i].phi_SN, 2)/np.linalg.norm(sol[i].phi_SN, 2)
return diff
N = 100
n = 8
xb = 1
Sigmaa = lambda x: .1
Sigmat = lambda x: 1
q = lambda x, mu: 1
tol = 1e-10
N = np.logspace(1, 3, 5)
N = np.array([int(x) for x in N])
ed00 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=0, GAUSS=0) for x in N]
ed01 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=0, GAUSS=1) for x in N]
ed10 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=1, GAUSS=0) for x in N]
ed11 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=1, GAUSS=1) for x in N]
ed20 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=2, GAUSS=0) for x in N]
ed21 = [LD.Eddington(np.linspace(0, xb, x+1), n, Sigmaa,
Sigmat, q, OPT=2, GAUSS=1) for x in N]
diff00 = getDiff(ed00, tol)
diff01 = getDiff(ed01, tol)
diff10 = getDiff(ed10, tol)
diff11 = getDiff(ed11, tol)
diff20 = getDiff(ed20, tol)
diff21 = getDiff(ed21, tol)
fontsize=16
plt.loglog(xb/N, diff00, '-o', clip_on=False, label='MHFEM Edges, No Gauss')
plt.loglog(xb/N, diff01, '-o', clip_on=False, label='Maintain Slopes, No Gauss')
plt.loglog(xb/N, diff10, '-o', clip_on=False, label='MHFEM Edges, Gauss')
plt.loglog(xb/N, diff11, '-o', clip_on=False, label='Maintain Slopes, Gauss')
plt.loglog(xb/N, diff20, '-o', clip_on=False, label='vanLeer, No Gauss')
plt.loglog(xb/N, diff21, '-o', clip_on=False, label='vanLeer, Gauss')
plt.xlabel(r'$h$', fontsize=fontsize)
plt.ylabel('SN/MHFEM Convergence', fontsize=fontsize)
plt.legend(loc='best', frameon=False)
hidespines(plt.gca())
if (outfile != None):
plt.savefig(outfile, transparent=True)
else:
plt.show()
| mit | -6,124,445,888,122,934,000 | 4,629,353,418,110,265,000 | 24.827586 | 85 | 0.653316 | false |
instana/python-sensor | instana/instrumentation/urllib3.py | 1 | 3690 | # (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2017
from __future__ import absolute_import
import opentracing
import opentracing.ext.tags as ext
import wrapt
from ..log import logger
from ..singletons import agent
from ..util.traceutils import get_active_tracer
from ..util.secrets import strip_secrets_from_query
try:
import urllib3
def collect(instance, args, kwargs):
""" Build and return a fully qualified URL for this request """
kvs = dict()
try:
kvs['host'] = instance.host
kvs['port'] = instance.port
if args is not None and len(args) == 2:
kvs['method'] = args[0]
kvs['path'] = args[1]
else:
kvs['method'] = kwargs.get('method')
kvs['path'] = kwargs.get('path')
if kvs['path'] is None:
kvs['path'] = kwargs.get('url')
# Strip any secrets from potential query params
if kvs.get('path') is not None and ('?' in kvs['path']):
parts = kvs['path'].split('?')
kvs['path'] = parts[0]
if len(parts) == 2:
kvs['query'] = strip_secrets_from_query(parts[1], agent.options.secrets_matcher, agent.options.secrets_list)
if type(instance) is urllib3.connectionpool.HTTPSConnectionPool:
kvs['url'] = 'https://%s:%d%s' % (kvs['host'], kvs['port'], kvs['path'])
else:
kvs['url'] = 'http://%s:%d%s' % (kvs['host'], kvs['port'], kvs['path'])
except Exception:
logger.debug("urllib3 collect error", exc_info=True)
return kvs
else:
return kvs
def collect_response(scope, response):
try:
scope.span.set_tag(ext.HTTP_STATUS_CODE, response.status)
if agent.options.extra_http_headers is not None:
for custom_header in agent.options.extra_http_headers:
if custom_header in response.headers:
scope.span.set_tag("http.header.%s" % custom_header, response.headers[custom_header])
if 500 <= response.status <= 599:
scope.span.mark_as_errored()
except Exception:
logger.debug("collect_response", exc_info=True)
@wrapt.patch_function_wrapper('urllib3', 'HTTPConnectionPool.urlopen')
def urlopen_with_instana(wrapped, instance, args, kwargs):
active_tracer = get_active_tracer()
# If we're not tracing, just return; boto3 has it's own visibility
if active_tracer is None or active_tracer.active_span.operation_name == 'boto3':
return wrapped(*args, **kwargs)
with active_tracer.start_active_span("urllib3", child_of=active_tracer.active_span) as scope:
try:
kvs = collect(instance, args, kwargs)
if 'url' in kvs:
scope.span.set_tag(ext.HTTP_URL, kvs['url'])
if 'query' in kvs:
scope.span.set_tag("http.params", kvs['query'])
if 'method' in kvs:
scope.span.set_tag(ext.HTTP_METHOD, kvs['method'])
if 'headers' in kwargs:
active_tracer.inject(scope.span.context, opentracing.Format.HTTP_HEADERS, kwargs['headers'])
response = wrapped(*args, **kwargs)
collect_response(scope, response)
return response
except Exception as e:
scope.span.mark_as_errored({'message': e})
raise
logger.debug("Instrumenting urllib3")
except ImportError:
pass
| mit | 4,741,157,442,957,616,000 | 1,004,827,133,781,527,600 | 37.041237 | 128 | 0.557995 | false |
pulsar-chem/Pulsar-Core | test/system/TestBasisSet.py | 1 | 2740 | import pulsar as psr
def run_test():
tester = psr.PyTester("Testing the BasisSet and BasisSetShell")
cGTO = psr.ShellType.CartesianGaussian
sGTO = psr.ShellType.SphericalGaussian
alpha=[3.42525091, 0.62391373, 0.16885540]
c=[0.15432897, 0.53532814, 0.44463454]
FakeD=psr.BasisShellInfo(cGTO,2,3,1,alpha,c)
FakeD2=psr.BasisShellInfo(sGTO,2,3,1,alpha,c)
carts=[0.0,0.0,0.0]
H=psr.create_atom(carts,1)
BI=psr.BasisInfo()
BI.shells=[FakeD,FakeD2]
H.basis_sets={"PRIMARY" :BI }
GhH=psr.make_ghost_atom(H)
Atoms=psr.AtomSetUniverse([H,GhH])
Mol=psr.System(Atoms,True)
BS=Mol.get_basis_set("PRIMARY")
BS2=psr.BasisSet(BS)
tester.test_equal("Copy constructors work",BS,BS2)
BS3=psr.BasisSet(1,3,3,3)
tester.test_return("Inequality works",True,True,BS3.__ne__,BS2)
tester.test_return("Get types works",True,{cGTO,sGTO},BS.get_types)
tester.test_return("Get n shells",True,4,BS2.n_shell)
tester.test_return("Get n unique shells",True,2,BS.n_unique_shell)
tester.test_return("Get n primitives",True,12,BS2.n_primitives)
tester.test_return("Get n coeficients",True,12,BS2.n_coefficients)
tester.test_return("Get number of functions",True,22,BS.n_functions)
tester.test_return("Maximum number of primitivs",True,3,BS2.max_n_primitives)
tester.test_return("Max angular momentum",True,2,BS2.max_am)
tester.test_return("All angular momentum",True,{2},BS.all_am)
tester.test_return("Max n functions in a shell",True,6,BS2.max_n_functions)
tester.test_return("Shell start",True,6,BS2.shell_start,1)
tester.test_call("Invalid shell start",False,BS2.shell_start,99)
Si,Sj=BS.shell(3),BS.shell(2)
tester.test_return("Shell has right coordinates",True,carts,Si.get_coords)
tester.test_return("Shell has right coordinate",True,carts[1],Si.get_coord,1)
tester.test_call("Get invalid shell",False,BS.shell,99)
tester.test_return("Get unique shell",True,Si,BS.unique_shell,1)
tester.test_call("Get invalid unique shell",False,BS.unique_shell,99)
i=0
for Sk in BS2:
tester.test_equal("Iterator "+str(i),Sk,Si if i%2==1 else Sj)
i=i+1
tester.test_return("Get valid shell info",True,FakeD,BS2.shell_info,0)
tester.test_call("Get invalid shell info",False,FakeD,BS.shell_info,99)
BS4=psr.BasisSet(1,3,3,3)
tester.test_return("Add shell that fits",True,None,BS4.add_shell,FakeD,carts)
BS3.add_shell(FakeD,carts)
tester.test_return("Shrink to fit",True,BS3,BS4.shrink_fit)
tester.test_call("Add shell no fit",False,BS4.add_shell,FakeD2,carts)
tester.test_return("Hash BS",True,BS.my_hash(),BS2.my_hash)
tester.print_results()
return tester.nfailed()
| bsd-3-clause | 3,566,754,463,415,835,600 | 805,483,804,003,860,500 | 43.193548 | 81 | 0.69854 | false |
jimberlage/servo | components/script/dom/bindings/codegen/parser/tests/test_attr_sequence_type.py | 276 | 1626 | def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface AttrSequenceType {
attribute sequence<object> foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Attribute type must not be a sequence type")
parser.reset()
threw = False
try:
parser.parse("""
interface AttrUnionWithSequenceType {
attribute (sequence<object> or DOMString) foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Attribute type must not be a union with a sequence member type")
parser.reset()
threw = False
try:
parser.parse("""
interface AttrNullableUnionWithSequenceType {
attribute (sequence<object>? or DOMString) foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Attribute type must not be a union with a nullable sequence "
"member type")
parser.reset()
threw = False
try:
parser.parse("""
interface AttrUnionWithUnionWithSequenceType {
attribute ((sequence<object> or DOMString) or AttrUnionWithUnionWithSequenceType) foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Attribute type must not be a union type with a union member "
"type that has a sequence member type")
| mpl-2.0 | 3,328,737,822,950,967,300 | 3,087,464,604,245,165,600 | 23.268657 | 100 | 0.54551 | false |
WillianPaiva/1flow | oneflow/core/migrations/0097_auto__add_field_twitterfeed_backfill_completed.py | 2 | 54712 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TwitterFeed.backfill_completed'
db.add_column(u'core_twitterfeed', 'backfill_completed',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'TwitterFeed.backfill_completed'
db.delete_column(u'core_twitterfeed', 'backfill_completed')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'base.user': {
'Meta': {'object_name': 'User'},
'address_book': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'avatar_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'email_announcements': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'hash_codes': ('jsonfield.fields.JSONField', [], {'default': "{'unsubscribe': '1e102a7faa5a4d499ad1ac93b04bf0fa'}", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'register_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'sent_emails': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.article': {
'Meta': {'object_name': 'Article', '_ormbases': ['core.BaseItem']},
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseItem']", 'unique': 'True', 'primary_key': 'True'}),
'comments_feed_url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'is_orphaned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'publications'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '512'}),
'url_absolute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'word_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'core.author': {
'Meta': {'unique_together': "(('origin_name', 'website'),)", 'object_name': 'Author'},
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Author']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_unsure': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '7168', 'null': 'True', 'blank': 'True'}),
'origin_name': ('django.db.models.fields.CharField', [], {'max_length': '7168', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'authors'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.WebSite']", 'null': 'True', 'blank': 'True'})
},
'core.baseaccount': {
'Meta': {'object_name': 'BaseAccount'},
'conn_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_conn': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_usable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.baseaccount_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'accounts'", 'to': u"orm['base.User']"})
},
'core.basefeed': {
'Meta': {'object_name': 'BaseFeed'},
'closed_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_fetch': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseFeed']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'errors': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'fetch_interval': ('django.db.models.fields.IntegerField', [], {'default': '43200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_good': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.BaseItem']"}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'options': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.basefeed_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'to': u"orm['base.User']"})
},
'core.baseitem': {
'Meta': {'object_name': 'BaseItem'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'authored_items'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Author']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'default_rating': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseItem']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'origin': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.baseitem_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sources_rel_+'", 'null': 'True', 'to': "orm['core.BaseItem']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'text_direction': ('django.db.models.fields.CharField', [], {'default': "u'ltr'", 'max_length': '3', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'})
},
'core.combinedfeed': {
'Meta': {'object_name': 'CombinedFeed'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.combinedfeedrule': {
'Meta': {'ordering': "('position',)", 'object_name': 'CombinedFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']", 'null': 'True', 'blank': 'True'}),
'combinedfeed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.CombinedFeed']"}),
'feeds': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.BaseFeed']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.folder': {
'Meta': {'unique_together': "(('name', 'user', 'parent'),)", 'object_name': 'Folder'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'folders'", 'to': u"orm['base.User']"})
},
'core.helpcontent': {
'Meta': {'ordering': "['ordering', 'id']", 'object_name': 'HelpContent'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_en': ('django.db.models.fields.TextField', [], {}),
'content_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name_nt': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'core.helpwizards': {
'Meta': {'object_name': 'HelpWizards'},
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'wizards'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'show_all': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'welcome_beta_shown': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.historyentry': {
'Meta': {'object_name': 'HistoryEntry'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.historyentry_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.homepreferences': {
'Meta': {'object_name': 'HomePreferences'},
'experimental_features': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'home'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'read_shows': ('django.db.models.fields.IntegerField', [], {'default': '2', 'blank': 'True'}),
'show_advanced_preferences': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'style': ('django.db.models.fields.CharField', [], {'default': "u'RL'", 'max_length': '2', 'blank': 'True'})
},
'core.language': {
'Meta': {'object_name': 'Language'},
'dj_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '16'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso639_1': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'iso639_2': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'iso639_3': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Language']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'core.mailaccount': {
'Meta': {'object_name': 'MailAccount', '_ormbases': ['core.BaseAccount']},
u'baseaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseAccount']", 'unique': 'True', 'primary_key': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'password': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'use_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'core.mailfeed': {
'Meta': {'object_name': 'MailFeed', '_ormbases': ['core.BaseFeed']},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'mail_feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.MailAccount']"}),
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'finish_action': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'match_action': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'rules_operation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'scrape_blacklist': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'scrape_whitelist': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
'core.mailfeedrule': {
'Meta': {'ordering': "('group', 'position')", 'object_name': 'MailFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group_operation': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'header_field': ('django.db.models.fields.IntegerField', [], {'default': '4', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mailfeed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rules'", 'to': "orm['core.MailFeed']"}),
'match_case': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_type': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_value': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'other_header': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.nodepermissions': {
'Meta': {'object_name': 'NodePermissions'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SyncNode']", 'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'5e39177b7d51400ab75eda55d353bae8'", 'max_length': '32', 'blank': 'True'})
},
'core.originaldata': {
'Meta': {'object_name': 'OriginalData'},
'feedparser': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feedparser_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'google_reader': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'google_reader_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'original_data'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.BaseItem']"}),
'raw_email': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'raw_email_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'twitter': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'twitter_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.preferences': {
'Meta': {'object_name': 'Preferences'},
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['base.User']", 'unique': 'True', 'primary_key': 'True'})
},
'core.read': {
'Meta': {'unique_together': "(('user', 'item'),)", 'object_name': 'Read'},
'bookmark_type': ('django.db.models.fields.CharField', [], {'default': "u'U'", 'max_length': '2'}),
'check_set_subscriptions_131004_done': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_analysis': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_archived': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_auto_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_bookmarked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'date_fact': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_fun': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_knowhow': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_knowledge': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_number': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_prospective': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_quote': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_rules': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_starred': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_analysis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_auto_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_bookmarked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_fact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_fun': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_good': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_knowhow': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_number': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_prospective': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_quote': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_rules': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_starred': ('django.db.models.fields.NullBooleanField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reads'", 'to': "orm['core.BaseItem']"}),
'knowledge_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'senders': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'reads_sent'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'all_reads'", 'to': u"orm['base.User']"})
},
'core.readpreferences': {
'Meta': {'object_name': 'ReadPreferences'},
'auto_mark_read_delay': ('django.db.models.fields.IntegerField', [], {'default': '4500', 'blank': 'True'}),
'bookmarked_marks_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bookmarked_marks_unread': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'read'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'read_switches_to_fullscreen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'reading_speed': ('django.db.models.fields.IntegerField', [], {'default': '200', 'blank': 'True'}),
'show_bottom_navbar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'starred_marks_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'starred_marks_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'starred_removes_bookmarked': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'watch_attributes_mark_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.rssatomfeed': {
'Meta': {'object_name': 'RssAtomFeed', '_ormbases': ['core.BaseFeed']},
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'last_etag': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '512'}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'to': "orm['core.WebSite']"})
},
'core.selectorpreferences': {
'Meta': {'object_name': 'SelectorPreferences'},
'extended_folders_depth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'folders_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lists_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'selector'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'show_closed_streams': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subscriptions_in_multiple_folders': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'titles_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.sharepreferences': {
'Meta': {'object_name': 'SharePreferences'},
'default_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'share'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"})
},
'core.simpletag': {
'Meta': {'unique_together': "(('name', 'language'),)", 'object_name': 'SimpleTag'},
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'origin_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'origin_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.SimpleTag']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'core.snappreferences': {
'Meta': {'object_name': 'SnapPreferences'},
'default_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'snap'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'select_paragraph': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.staffpreferences': {
'Meta': {'object_name': 'StaffPreferences'},
'no_home_redirect': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'staff'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'reading_lists_show_bad_articles': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'selector_shows_admin_links': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'super_powers_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'core.subscription': {
'Meta': {'unique_together': "(('feed', 'user'),)", 'object_name': 'Subscription'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptions'", 'blank': 'True', 'to': "orm['core.BaseFeed']"}),
'folders': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Folder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reads': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Read']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'all_subscriptions'", 'blank': 'True', 'to': u"orm['base.User']"})
},
'core.syncnode': {
'Meta': {'object_name': 'SyncNode'},
'broadcast': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_seen': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_local_instance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'local_token': ('django.db.models.fields.CharField', [], {'default': "'f5031ed8ca344beb96c4544be478c632'", 'max_length': '32', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'remote_token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'blank': 'True'}),
'strategy': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'sync_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '384', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'core.tweet': {
'Meta': {'object_name': 'Tweet', '_ormbases': ['core.BaseItem']},
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseItem']", 'unique': 'True', 'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tweet_id': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'unique': 'True', 'blank': 'True'})
},
'core.twitteraccount': {
'Meta': {'object_name': 'TwitterAccount', '_ormbases': ['core.BaseAccount']},
u'baseaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseAccount']", 'unique': 'True', 'primary_key': 'True'}),
'fetch_owned_lists': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fetch_subscribed_lists': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'social_auth': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'twitter_account'", 'unique': 'True', 'to': u"orm['default.UserSocialAuth']"}),
'timeline': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'twitter_account'", 'unique': 'True', 'null': 'True', 'to': "orm['core.TwitterFeed']"})
},
'core.twitterfeed': {
'Meta': {'object_name': 'TwitterFeed', '_ormbases': ['core.BaseFeed']},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'twitter_feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.TwitterAccount']"}),
'backfill_completed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'finish_action': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'is_backfilled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_action': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'rules_operation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'scrape_blacklist': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'scrape_whitelist': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'track_locations': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'track_terms': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'core.twitterfeedrule': {
'Meta': {'ordering': "('group', 'position')", 'object_name': 'TwitterFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.TwitterFeedRule']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group_operation': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'match_case': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_field': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_type': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_value': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'other_field': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'twitterfeed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rules'", 'to': "orm['core.TwitterFeed']"})
},
'core.usercounters': {
'Meta': {'object_name': 'UserCounters'},
'placeholder': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_counters'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"})
},
'core.userfeeds': {
'Meta': {'object_name': 'UserFeeds'},
'blogs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.BaseFeed']", 'null': 'True', 'blank': 'True'}),
'imported_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'imported_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'received_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'received_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'sent_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'sent_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_feeds'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"}),
'written_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'written_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"})
},
'core.userimport': {
'Meta': {'object_name': 'UserImport', '_ormbases': ['core.HistoryEntry']},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'historyentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.HistoryEntry']", 'unique': 'True', 'primary_key': 'True'}),
'lines': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'results': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'urls': ('django.db.models.fields.TextField', [], {})
},
'core.usersubscriptions': {
'Meta': {'object_name': 'UserSubscriptions'},
'blogs': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'blogs'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Subscription']"}),
'imported_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'imported_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'received_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'received_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'sent_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'sent_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_subscriptions'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"}),
'written_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'written_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"})
},
'core.website': {
'Meta': {'object_name': 'WebSite'},
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.WebSite']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'fetch_limit_nr': ('django.db.models.fields.IntegerField', [], {'default': '16', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'mail_warned': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.WebSite']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200', 'blank': 'True'})
},
u'default.usersocialauth': {
'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'UserSocialAuth', 'db_table': "'social_auth_usersocialauth'"},
'extra_data': ('social.apps.django_app.default.fields.JSONField', [], {'default': "'{}'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'social_auth'", 'to': u"orm['base.User']"})
}
}
complete_apps = ['core'] | agpl-3.0 | -3,451,325,340,316,823,600 | -8,288,977,866,609,419,000 | 96.527629 | 226 | 0.556423 | false |
geekboxzone/lollipop_external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/common/find_files.py | 181 | 3872 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module is used to find files used by run-webkit-tests and
perftestrunner. It exposes one public function - find() - which takes
an optional list of paths, optional set of skipped directories and optional
filter callback.
If a list is passed in, the returned list of files is constrained to those
found under the paths passed in. i.e. calling find(["LayoutTests/fast"])
will only return files under that directory.
If a set of skipped directories is passed in, the function will filter out
the files lying in these directories i.e. find(["LayoutTests"], set(["fast"]))
will return everything except files in fast subfolder.
If a callback is passed in, it will be called for the each file and the file
will be included into the result if the callback returns True.
The callback has to take three arguments: filesystem, dirname and filename."""
import itertools
def find(filesystem, base_dir, paths=None, skipped_directories=None, file_filter=None, directory_sort_key=None):
"""Finds the set of tests under a given list of sub-paths.
Args:
paths: a list of path expressions relative to base_dir
to search. Glob patterns are ok, as are path expressions with
forward slashes on Windows. If paths is empty, we look at
everything under the base_dir.
"""
paths = paths or ['*']
skipped_directories = skipped_directories or set(['.svn', '_svn'])
return _normalized_find(filesystem, _normalize(filesystem, base_dir, paths), skipped_directories, file_filter, directory_sort_key)
def _normalize(filesystem, base_dir, paths):
return [filesystem.normpath(filesystem.join(base_dir, path)) for path in paths]
def _normalized_find(filesystem, paths, skipped_directories, file_filter, directory_sort_key):
"""Finds the set of tests under the list of paths.
Args:
paths: a list of absolute path expressions to search.
Glob patterns are ok.
"""
paths_to_walk = itertools.chain(*(filesystem.glob(path) for path in paths))
def sort_by_directory_key(files_list):
if directory_sort_key:
files_list.sort(key=directory_sort_key)
return files_list
all_files = itertools.chain(*(sort_by_directory_key(filesystem.files_under(path, skipped_directories, file_filter)) for path in paths_to_walk))
return all_files
| bsd-3-clause | 8,995,232,974,955,576,000 | -3,374,413,898,144,083,000 | 45.095238 | 147 | 0.746643 | false |
Yannig/ansible | test/units/module_utils/facts/test_facts.py | 80 | 22585 | # This file is part of Ansible
# -*- coding: utf-8 -*-
#
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import os
import pytest
# for testing
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock, patch
from ansible.module_utils import facts
from ansible.module_utils.facts import hardware
from ansible.module_utils.facts import network
from ansible.module_utils.facts import virtual
class BaseTestFactsPlatform(unittest.TestCase):
platform_id = 'Generic'
fact_class = hardware.base.Hardware
collector_class = None
"""Verify that the automagic in Hardware.__new__ selects the right subclass."""
@patch('platform.system')
def test_new(self, mock_platform):
if not self.fact_class:
pytest.skip('This platform (%s) does not have a fact_class.' % self.platform_id)
mock_platform.return_value = self.platform_id
inst = self.fact_class(module=Mock(), load_on_init=False)
self.assertIsInstance(inst, self.fact_class)
self.assertEqual(inst.platform, self.platform_id)
def test_subclass(self):
if not self.fact_class:
pytest.skip('This platform (%s) does not have a fact_class.' % self.platform_id)
# 'Generic' will try to map to platform.system() that we are not mocking here
if self.platform_id == 'Generic':
return
inst = self.fact_class(module=Mock(), load_on_init=False)
self.assertIsInstance(inst, self.fact_class)
self.assertEqual(inst.platform, self.platform_id)
def test_collector(self):
if not self.collector_class:
pytest.skip('This test class needs to be updated to specify collector_class')
inst = self.collector_class()
self.assertIsInstance(inst, self.collector_class)
self.assertEqual(inst._platform, self.platform_id)
class TestLinuxFactsPlatform(BaseTestFactsPlatform):
platform_id = 'Linux'
fact_class = hardware.linux.LinuxHardware
collector_class = hardware.linux.LinuxHardwareCollector
class TestHurdFactsPlatform(BaseTestFactsPlatform):
platform_id = 'GNU'
fact_class = hardware.hurd.HurdHardware
collector_class = hardware.hurd.HurdHardwareCollector
class TestSunOSHardware(BaseTestFactsPlatform):
platform_id = 'SunOS'
fact_class = hardware.sunos.SunOSHardware
collector_class = hardware.sunos.SunOSHardwareCollector
class TestOpenBSDHardware(BaseTestFactsPlatform):
platform_id = 'OpenBSD'
fact_class = hardware.openbsd.OpenBSDHardware
collector_class = hardware.openbsd.OpenBSDHardwareCollector
class TestFreeBSDHardware(BaseTestFactsPlatform):
platform_id = 'FreeBSD'
fact_class = hardware.freebsd.FreeBSDHardware
collector_class = hardware.freebsd.FreeBSDHardwareCollector
class TestDragonFlyHardware(BaseTestFactsPlatform):
platform_id = 'DragonFly'
fact_class = None
collector_class = hardware.dragonfly.DragonFlyHardwareCollector
class TestNetBSDHardware(BaseTestFactsPlatform):
platform_id = 'NetBSD'
fact_class = hardware.netbsd.NetBSDHardware
collector_class = hardware.netbsd.NetBSDHardwareCollector
class TestAIXHardware(BaseTestFactsPlatform):
platform_id = 'AIX'
fact_class = hardware.aix.AIXHardware
collector_class = hardware.aix.AIXHardwareCollector
class TestHPUXHardware(BaseTestFactsPlatform):
platform_id = 'HP-UX'
fact_class = hardware.hpux.HPUXHardware
collector_class = hardware.hpux.HPUXHardwareCollector
class TestDarwinHardware(BaseTestFactsPlatform):
platform_id = 'Darwin'
fact_class = hardware.darwin.DarwinHardware
collector_class = hardware.darwin.DarwinHardwareCollector
class TestGenericNetwork(BaseTestFactsPlatform):
platform_id = 'Generic'
fact_class = network.base.Network
class TestHurdPfinetNetwork(BaseTestFactsPlatform):
platform_id = 'GNU'
fact_class = network.hurd.HurdPfinetNetwork
collector_class = network.hurd.HurdNetworkCollector
class TestLinuxNetwork(BaseTestFactsPlatform):
platform_id = 'Linux'
fact_class = network.linux.LinuxNetwork
collector_class = network.linux.LinuxNetworkCollector
class TestGenericBsdIfconfigNetwork(BaseTestFactsPlatform):
platform_id = 'Generic_BSD_Ifconfig'
fact_class = network.generic_bsd.GenericBsdIfconfigNetwork
collector_class = None
class TestHPUXNetwork(BaseTestFactsPlatform):
platform_id = 'HP-UX'
fact_class = network.hpux.HPUXNetwork
collector_class = network.hpux.HPUXNetworkCollector
class TestDarwinNetwork(BaseTestFactsPlatform):
platform_id = 'Darwin'
fact_class = network.darwin.DarwinNetwork
collector_class = network.darwin.DarwinNetworkCollector
class TestFreeBSDNetwork(BaseTestFactsPlatform):
platform_id = 'FreeBSD'
fact_class = network.freebsd.FreeBSDNetwork
collector_class = network.freebsd.FreeBSDNetworkCollector
class TestDragonFlyNetwork(BaseTestFactsPlatform):
platform_id = 'DragonFly'
fact_class = network.dragonfly.DragonFlyNetwork
collector_class = network.dragonfly.DragonFlyNetworkCollector
class TestAIXNetwork(BaseTestFactsPlatform):
platform_id = 'AIX'
fact_class = network.aix.AIXNetwork
collector_class = network.aix.AIXNetworkCollector
class TestNetBSDNetwork(BaseTestFactsPlatform):
platform_id = 'NetBSD'
fact_class = network.netbsd.NetBSDNetwork
collector_class = network.netbsd.NetBSDNetworkCollector
class TestOpenBSDNetwork(BaseTestFactsPlatform):
platform_id = 'OpenBSD'
fact_class = network.openbsd.OpenBSDNetwork
collector_class = network.openbsd.OpenBSDNetworkCollector
class TestSunOSNetwork(BaseTestFactsPlatform):
platform_id = 'SunOS'
fact_class = network.sunos.SunOSNetwork
collector_class = network.sunos.SunOSNetworkCollector
class TestLinuxVirtual(BaseTestFactsPlatform):
platform_id = 'Linux'
fact_class = virtual.linux.LinuxVirtual
collector_class = virtual.linux.LinuxVirtualCollector
class TestFreeBSDVirtual(BaseTestFactsPlatform):
platform_id = 'FreeBSD'
fact_class = virtual.freebsd.FreeBSDVirtual
collector_class = virtual.freebsd.FreeBSDVirtualCollector
class TestNetBSDVirtual(BaseTestFactsPlatform):
platform_id = 'NetBSD'
fact_class = virtual.netbsd.NetBSDVirtual
collector_class = virtual.netbsd.NetBSDVirtualCollector
class TestOpenBSDVirtual(BaseTestFactsPlatform):
platform_id = 'OpenBSD'
fact_class = virtual.openbsd.OpenBSDVirtual
collector_class = virtual.openbsd.OpenBSDVirtualCollector
class TestHPUXVirtual(BaseTestFactsPlatform):
platform_id = 'HP-UX'
fact_class = virtual.hpux.HPUXVirtual
collector_class = virtual.hpux.HPUXVirtualCollector
class TestSunOSVirtual(BaseTestFactsPlatform):
platform_id = 'SunOS'
fact_class = virtual.sunos.SunOSVirtual
collector_class = virtual.sunos.SunOSVirtualCollector
LSBLK_OUTPUT = b"""
/dev/sda
/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK
/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d
/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce
/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d
/dev/sr0
/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390
/dev/loop1 7c1b0f30-cf34-459f-9a70-2612f82b870a
/dev/loop9 0f031512-ab15-497d-9abd-3a512b4a9390
/dev/loop9 7c1b4444-cf34-459f-9a70-2612f82b870a
/dev/mapper/docker-253:1-1050967-pool
/dev/loop2
/dev/mapper/docker-253:1-1050967-pool
"""
LSBLK_OUTPUT_2 = b"""
/dev/sda
/dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
/dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK
/dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d
/dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce
/dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d
/dev/mapper/an-example-mapper with a space in the name 84639acb-013f-4d2f-9392-526a572b4373
/dev/sr0
/dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390
"""
LSBLK_UUIDS = {'/dev/sda1': '66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK'}
MTAB = """
sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0
proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755 0 0
securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0
devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0
tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
pstore /sys/fs/pstore pstore rw,seclabel,nosuid,nodev,noexec,relatime 0 0
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0
cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0
cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0
cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0
configfs /sys/kernel/config configfs rw,relatime 0 0
/dev/mapper/fedora_dhcp129--186-root / ext4 rw,seclabel,relatime,data=ordered 0 0
selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0
systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct 0 0
debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0
hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
tmpfs /tmp tmpfs rw,seclabel 0 0
mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
/dev/loop0 /var/lib/machines btrfs rw,seclabel,relatime,space_cache,subvolid=5,subvol=/ 0 0
/dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0
/dev/mapper/fedora_dhcp129--186-home /home ext4 rw,seclabel,relatime,data=ordered 0 0
tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000 0 0
gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0
grimlock.g.a: /home/adrian/sshfs-grimlock fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:test_path/path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote-2 fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
grimlock.g.a:/mnt/data/foto's /home/adrian/fotos fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0
"""
MTAB_ENTRIES = [
[
'sysfs',
'/sys',
'sysfs',
'rw,seclabel,nosuid,nodev,noexec,relatime',
'0',
'0'
],
['proc', '/proc', 'proc', 'rw,nosuid,nodev,noexec,relatime', '0', '0'],
[
'devtmpfs',
'/dev',
'devtmpfs',
'rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755',
'0',
'0'
],
[
'securityfs',
'/sys/kernel/security',
'securityfs',
'rw,nosuid,nodev,noexec,relatime',
'0',
'0'
],
['tmpfs', '/dev/shm', 'tmpfs', 'rw,seclabel,nosuid,nodev', '0', '0'],
[
'devpts',
'/dev/pts',
'devpts',
'rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000',
'0',
'0'
],
['tmpfs', '/run', 'tmpfs', 'rw,seclabel,nosuid,nodev,mode=755', '0', '0'],
[
'tmpfs',
'/sys/fs/cgroup',
'tmpfs',
'ro,seclabel,nosuid,nodev,noexec,mode=755',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/systemd',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd',
'0',
'0'
],
[
'pstore',
'/sys/fs/pstore',
'pstore',
'rw,seclabel,nosuid,nodev,noexec,relatime',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/devices',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,devices',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/freezer',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,freezer',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/memory',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,memory',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/pids',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,pids',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/blkio',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,blkio',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/cpuset',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,cpuset',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/cpu,cpuacct',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,cpu,cpuacct',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/hugetlb',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,hugetlb',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/perf_event',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,perf_event',
'0',
'0'
],
[
'cgroup',
'/sys/fs/cgroup/net_cls,net_prio',
'cgroup',
'rw,nosuid,nodev,noexec,relatime,net_cls,net_prio',
'0',
'0'
],
['configfs', '/sys/kernel/config', 'configfs', 'rw,relatime', '0', '0'],
[
'/dev/mapper/fedora_dhcp129--186-root',
'/',
'ext4',
'rw,seclabel,relatime,data=ordered',
'0',
'0'
],
['selinuxfs', '/sys/fs/selinux', 'selinuxfs', 'rw,relatime', '0', '0'],
[
'systemd-1',
'/proc/sys/fs/binfmt_misc',
'autofs',
'rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct',
'0',
'0'
],
['debugfs', '/sys/kernel/debug', 'debugfs', 'rw,seclabel,relatime', '0', '0'],
[
'hugetlbfs',
'/dev/hugepages',
'hugetlbfs',
'rw,seclabel,relatime',
'0',
'0'
],
['tmpfs', '/tmp', 'tmpfs', 'rw,seclabel', '0', '0'],
['mqueue', '/dev/mqueue', 'mqueue', 'rw,seclabel,relatime', '0', '0'],
[
'/dev/loop0',
'/var/lib/machines',
'btrfs',
'rw,seclabel,relatime,space_cache,subvolid=5,subvol=/',
'0',
'0'
],
['/dev/sda1', '/boot', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
# A 'none' fstype
['/dev/sdz3', '/not/a/real/device', 'none', 'rw,seclabel,relatime,data=ordered', '0', '0'],
# lets assume this is a bindmount
['/dev/sdz4', '/not/a/real/bind_mount', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'],
[
'/dev/mapper/fedora_dhcp129--186-home',
'/home',
'ext4',
'rw,seclabel,relatime,data=ordered',
'0',
'0'
],
[
'tmpfs',
'/run/user/1000',
'tmpfs',
'rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000',
'0',
'0'
],
[
'gvfsd-fuse',
'/run/user/1000/gvfs',
'fuse.gvfsd-fuse',
'rw,nosuid,nodev,relatime,user_id=1000,group_id=1000',
'0',
'0'
],
['fusectl', '/sys/fs/fuse/connections', 'fusectl', 'rw,relatime', '0', '0']]
BIND_MOUNTS = ['/not/a/real/bind_mount']
with open(os.path.join(os.path.dirname(__file__), 'fixtures/findmount_output.txt')) as f:
FINDMNT_OUTPUT = f.read()
class TestFactsLinuxHardwareGetMountFacts(unittest.TestCase):
# FIXME: mock.patch instead
def setUp(self):
# The @timeout tracebacks if there isn't a GATHER_TIMEOUT is None (the default until get_all_facts sets it via global)
facts.GATHER_TIMEOUT = 10
def tearDown(self):
facts.GATHER_TIMEOUT = None
# The Hardware subclasses freakout if instaniated directly, so
# mock platform.system and inst Hardware() so we get a LinuxHardware()
# we can test.
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._mtab_entries', return_value=MTAB_ENTRIES)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._find_bind_mounts', return_value=BIND_MOUNTS)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._lsblk_uuid', return_value=LSBLK_UUIDS)
def test_get_mount_facts(self,
mock_lsblk_uuid,
mock_find_bind_mounts,
mock_mtab_entries):
module = Mock()
# Returns a LinuxHardware-ish
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
# Nothing returned, just self.facts modified as a side effect
mount_facts = lh.get_mount_facts()
self.assertIsInstance(mount_facts, dict)
self.assertIn('mounts', mount_facts)
self.assertIsInstance(mount_facts['mounts'], list)
self.assertIsInstance(mount_facts['mounts'][0], dict)
@patch('ansible.module_utils.facts.hardware.linux.get_file_content', return_value=MTAB)
def test_get_mtab_entries(self, mock_get_file_content):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
mtab_entries = lh._mtab_entries()
self.assertIsInstance(mtab_entries, list)
self.assertIsInstance(mtab_entries[0], list)
self.assertEqual(len(mtab_entries), 38)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(0, FINDMNT_OUTPUT, ''))
def test_find_bind_mounts(self, mock_run_findmnt):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
# If bind_mounts becomes another seq type, feel free to change
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 1)
self.assertIn('/not/a/real/bind_mount', bind_mounts)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_findmnt', return_value=(37, '', ''))
def test_find_bind_mounts_non_zero(self, mock_run_findmnt):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 0)
def test_find_bind_mounts_no_findmnts(self):
module = Mock()
module.get_bin_path = Mock(return_value=None)
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
bind_mounts = lh._find_bind_mounts()
self.assertIsInstance(bind_mounts, set)
self.assertEqual(len(bind_mounts), 0)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT, ''))
def test_lsblk_uuid(self, mock_run_lsblk):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertIn(b'/dev/loop9', lsblk_uuids)
self.assertIn(b'/dev/sda1', lsblk_uuids)
self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(37, LSBLK_OUTPUT, ''))
def test_lsblk_uuid_non_zero(self, mock_run_lsblk):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertEqual(len(lsblk_uuids), 0)
def test_lsblk_uuid_no_lsblk(self):
module = Mock()
module.get_bin_path = Mock(return_value=None)
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertEqual(len(lsblk_uuids), 0)
@patch('ansible.module_utils.facts.hardware.linux.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT_2, ''))
def test_lsblk_uuid_dev_with_space_in_name(self, mock_run_lsblk):
module = Mock()
lh = hardware.linux.LinuxHardware(module=module, load_on_init=False)
lsblk_uuids = lh._lsblk_uuid()
self.assertIsInstance(lsblk_uuids, dict)
self.assertIn(b'/dev/loop0', lsblk_uuids)
self.assertIn(b'/dev/sda1', lsblk_uuids)
self.assertEqual(lsblk_uuids[b'/dev/mapper/an-example-mapper with a space in the name'], b'84639acb-013f-4d2f-9392-526a572b4373')
self.assertEqual(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
| gpl-3.0 | 4,369,065,662,000,601,000 | -2,139,435,377,337,007,600 | 34.792393 | 154 | 0.666549 | false |
undp-aprc/undp-alm-old | modules/contrib/proj4js/lib/proj4js/build/build.py | 129 | 3416 | #!/usr/bin/env python
import sys
sys.path.append("../tools")
import mergejs
import optparse
def build(config_file = None, output_file = None, options = None):
have_compressor = []
try:
import jsmin
have_compressor.append("jsmin")
except ImportError:
print "No jsmin"
try:
import closure
have_compressor.append("closure")
except Exception, E:
print "No closure (%s)" % E
try:
import closure_ws
have_compressor.append("closure_ws")
except ImportError:
print "No closure_ws"
try:
import minimize
have_compressor.append("minimize")
except ImportError:
print "No minimize"
use_compressor = None
if options.compressor and options.compressor in have_compressor:
use_compressor = options.compressor
sourceDirectory = "../lib"
configFilename = "library.cfg"
filename = "proj4js-compressed.js"
outputFilename = "../lib/" + filename
if config_file:
configFilename = config_file
extension = configFilename[-4:]
if extension != ".cfg":
configFilename = config_file + ".cfg"
if output_file:
outputFilename = output_file
print "Merging libraries."
merged = mergejs.run(sourceDirectory, None, configFilename)
print "Setting the filename to "+filename
merged = merged.replace('scriptName: "proj4js.js",','scriptName: "'+filename+'",');
print "Compressing using %s" % use_compressor
if use_compressor == "jsmin":
minimized = jsmin.jsmin(merged)
elif use_compressor == "minimize":
minimized = minimize.minimize(merged)
elif use_compressor == "closure_ws":
if len(merged) > 1000000: # The maximum file size for this web service is 1000 KB.
print "\nPre-compressing using jsmin"
merged = jsmin.jsmin(merged)
print "\nIs being compressed using Closure Compiler Service."
try:
minimized = closure_ws.minimize(merged)
except Exception, E:
print "\nAbnormal termination."
sys.exit("ERROR: Closure Compilation using Web service failed!\n%s" % E)
if len(minimized) <= 2:
print "\nAbnormal termination due to compilation errors."
sys.exit("ERROR: Closure Compilation using Web service failed!")
else:
print '\nClosure Compilation using Web service has completed successfully.'
elif use_compressor == "closure":
minimized = closure.minimize(merged)
else: # fallback
minimized = merged
print "Adding license file."
minimized = file("license.txt").read() + minimized
print "Writing to %s." % outputFilename
file(outputFilename, "w").write(minimized)
print "Done."
if __name__ == '__main__':
opt = optparse.OptionParser(usage="%s [options] [config_file] [output_file]\n Default config_file is 'full.cfg', Default output_file is 'OpenLayers.js'")
opt.add_option("-c", "--compressor", dest="compressor", help="compression method: one of 'jsmin', 'minimize', 'closure_ws', 'closure', or 'none'", default="jsmin")
(options, args) = opt.parse_args()
if not len(args):
build(options=options)
elif len(args) == 1:
build(args[0], options=options)
elif len(args) == 2:
build(args[0], args[1], options=options)
else:
print "Wrong number of arguments"
| gpl-2.0 | 2,882,859,465,772,763,600 | 2,042,974,829,307,126,000 | 33.857143 | 165 | 0.633197 | false |
jhaux/tensorflow | tensorflow/contrib/signal/python/kernel_tests/shape_ops_test.py | 23 | 2282 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for shape_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.signal.python.ops import shape_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class FramesTest(test.TestCase):
def test_mapping_of_indices_without_padding(self):
with self.test_session():
tensor = constant_op.constant(np.arange(9152), dtypes.int32)
tensor = array_ops.expand_dims(tensor, 0)
result = shape_ops.frames(tensor, 512, 180)
result = result.eval()
expected = np.tile(np.arange(512), (49, 1))
expected += np.tile(np.arange(49) * 180, (512, 1)).T
expected = np.expand_dims(expected, axis=0)
expected = np.array(expected, dtype=np.int32)
self.assertAllEqual(expected, result)
def test_mapping_of_indices_with_padding(self):
with self.test_session():
tensor = constant_op.constant(np.arange(10000), dtypes.int32)
tensor = array_ops.expand_dims(tensor, 0)
result = shape_ops.frames(tensor, 512, 192)
result = result.eval()
expected = np.tile(np.arange(512), (51, 1))
expected += np.tile(np.arange(51) * 192, (512, 1)).T
expected[expected >= 10000] = 0
expected = np.expand_dims(expected, axis=0)
expected = np.array(expected, dtype=np.int32)
self.assertAllEqual(expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 | 3,459,324,623,871,604,700 | -900,552,052,469,401,600 | 32.558824 | 80 | 0.678352 | false |
tongwang01/tensorflow | tensorflow/contrib/slim/python/slim/data/parallel_reader.py | 12 | 10476 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements a parallel data reader with queues and optional shuffling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import summary
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import gfile
from tensorflow.python.training import input as tf_input
from tensorflow.python.training import queue_runner
class ParallelReader(io_ops.ReaderBase):
"""Reader class that uses multiple readers in parallel to improve speed.
See ReaderBase for supported methods.
"""
def __init__(self,
reader_class,
common_queue,
num_readers=4,
reader_kwargs=None):
"""ParallelReader creates num_readers instances of the reader_class.
Each instance is created by calling the `reader_class` function passing
the arguments specified in `reader_kwargs` as in:
reader_class(**read_kwargs)
When you read from a ParallelReader, with its `read()` method,
you just dequeue examples from the `common_queue`.
The readers will read different files in parallel, asynchronously enqueueing
their output into `common_queue`. The `common_queue.dtypes` must be
[tf.string, tf.string]
Because each reader can read from a different file, the examples in the
`common_queue` could be from different files. Due to the asynchronous
reading there is no guarantee that all the readers will read the same
number of examples.
If the `common_queue` is a shuffling queue, then the examples are shuffled.
Usage:
common_queue = tf.RandomShuffleQueue(
capacity=256,
min_after_dequeue=128,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(tf.TFRecordReader, common_queue)
common_queue = tf.FIFOQueue(
capacity=256,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(readers, common_queue, num_readers=2)
Args:
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
common_queue: a Queue to hold (key, value pairs) with `dtypes` equal to
[tf.string, tf.string]. Must be one of the data_flow_ops.Queues
instances, ex. `tf.FIFOQueue()`, `tf.RandomShuffleQueue()`, ...
num_readers: a integer, number of instances of reader_class to create.
reader_kwargs: an optional dict of kwargs to create the readers.
Raises:
TypeError: if `common_queue.dtypes` is not [tf.string, tf.string].
"""
if len(common_queue.dtypes) != 2:
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
for dtype in common_queue.dtypes:
if not dtype.is_compatible_with(tf_dtypes.string):
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
reader_kwargs = reader_kwargs or {}
self._readers = [reader_class(**reader_kwargs) for _ in range(num_readers)]
self._common_queue = common_queue
@property
def num_readers(self):
return len(self._readers)
@property
def common_queue(self):
return self._common_queue
def read(self, queue, name=None):
"""Returns the next record (key, value pair) produced by the reader.
The multiple reader instances are all configured to `read()` from the
filenames listed in `queue` and enqueue their output into the `common_queue`
passed to the constructor, and this method returns the next record dequeued
from that `common_queue`.
Readers dequeue a work unit from `queue` if necessary (e.g. when a
reader needs to start reading from a new file since it has finished with
the previous file).
A queue runner for enqueing in the `common_queue` is automatically added to
the TF QueueRunners collection.
Args:
queue: A Queue or a mutable string Tensor representing a handle
to a Queue, with string work items.
name: A name for the operation (optional).
Returns:
The next record (i.e. (key, value pair)) from the common_queue.
"""
enqueue_ops = []
for reader in self._readers:
enqueue_ops.append(self._common_queue.enqueue(reader.read(queue)))
queue_runner.add_queue_runner(queue_runner.QueueRunner(
self._common_queue, enqueue_ops))
return self._common_queue.dequeue(name=name)
def num_records_produced(self, name=None):
"""Returns the number of records this reader has produced.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
num_records = [r.num_records_produced() for r in self._readers]
return math_ops.add_n(num_records, name=name)
def num_work_units_completed(self, name=None):
"""Returns the number of work units this reader has finished processing.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
num_work_units = [r.num_work_units_completed() for r in self._readers]
return math_ops.add_n(num_work_units, name=name)
def parallel_read(data_sources,
reader_class,
num_epochs=None,
num_readers=4,
reader_kwargs=None,
shuffle=True,
dtypes=None,
capacity=256,
min_after_dequeue=128,
seed=None):
"""Reads multiple records in parallel from data_sources using n readers.
It uses a ParallelReader to read from multiple files in parallel using
multiple readers created using `reader_class` with `reader_kwargs'.
If shuffle is True the common_queue would be a RandomShuffleQueue otherwise
it would be a FIFOQueue.
Usage:
data_sources = ['path_to/train*']
key, value = parallel_read(data_sources, tf.CSVReader, num_readers=4)
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
num_readers: a integer, number of Readers to create.
reader_kwargs: an optional dict, of kwargs for the reader.
shuffle: boolean, wether should shuffle the files and the records by using
RandomShuffleQueue as common_queue.
dtypes: A list of types. The length of dtypes must equal the number
of elements in each record. If it is None it will default to
[tf.string, tf.string] for (key, value).
capacity: integer, capacity of the common_queue.
min_after_dequeue: integer, minimum number of records in the common_queue
after dequeue. Needed for a good shuffle.
seed: A seed for RandomShuffleQueue.
Returns:
key, value: a tuple of keys and values from the data_source.
"""
data_files = get_data_files(data_sources)
with ops.name_scope('parallel_read'):
filename_queue = tf_input.string_input_producer(
data_files, num_epochs=num_epochs, shuffle=shuffle)
dtypes = dtypes or [tf_dtypes.string, tf_dtypes.string]
if shuffle:
common_queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity,
min_after_dequeue=min_after_dequeue,
dtypes=dtypes,
seed=seed)
else:
common_queue = data_flow_ops.FIFOQueue(capacity=capacity, dtypes=dtypes)
summary.scalar('queue/%s/fraction_of_%d_full' %
(common_queue.name, capacity),
math_ops.to_float(common_queue.size()) * (1. / capacity))
return ParallelReader(reader_class,
common_queue,
num_readers=num_readers,
reader_kwargs=reader_kwargs).read(filename_queue)
def single_pass_read(data_sources,
reader_class,
reader_kwargs=None):
"""Reads sequentially the data_sources using the reader, doing a single pass.
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader.
reader_kwargs: an optional dict, of kwargs for the reader.
Returns:
key, value: a tuple of keys and values from the data_source.
"""
data_files = get_data_files(data_sources)
with ops.name_scope('single_pass_read'):
filename_queue = tf_input.string_input_producer(data_files,
num_epochs=1,
shuffle=False,
capacity=1)
reader_kwargs = reader_kwargs or {}
return reader_class(**reader_kwargs).read(filename_queue)
def get_data_files(data_sources):
"""Get data_files from data_sources.
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
Returns:
a list of data_files.
Raises:
ValueError: if not data files are not found
"""
if isinstance(data_sources, (list, tuple)):
data_files = []
for source in data_sources:
data_files += get_data_files(source)
else:
if '*' in data_sources or '?' in data_sources or '[' in data_sources:
data_files = gfile.Glob(data_sources)
else:
data_files = [data_sources]
if not data_files:
raise ValueError('No data files found in %s', data_sources)
return data_files
| apache-2.0 | -8,240,341,596,367,252,000 | 3,296,164,661,021,121,000 | 36.281139 | 80 | 0.663135 | false |
bintoro/schematics | tests/test_dict_type.py | 12 | 3771 | from schematics.models import Model
from schematics.types import IntType, StringType
from schematics.types.serializable import serializable
from schematics.types.compound import ModelType, DictType
try:
long
except NameError:
long = int
def test_basic_type():
class PlayerInfo(Model):
categories = DictType(StringType)
info = PlayerInfo(dict(categories={
"math": "math",
"batman": "batman",
}))
assert info.categories["math"] == "math"
d = info.serialize()
assert d == {
"categories": {
"math": "math",
"batman": "batman",
}
}
def test_dict_type_with_model_type():
class CategoryStats(Model):
category_slug = StringType()
total_wins = IntType()
class PlayerInfo(Model):
categories = DictType(ModelType(CategoryStats))
# TODO: Maybe it would be cleaner to have
# DictType(CategoryStats) and implicitly convert to ModelType(CategoryStats)
info = PlayerInfo(dict(categories={
"math": {
"category_slug": "math",
"total_wins": 1
},
"batman": {
"category_slug": "batman",
"total_wins": 3
}
}))
math_stats = CategoryStats({"category_slug": "math", "total_wins": 1})
assert info.categories["math"] == math_stats
d = info.serialize()
assert d == {
"categories": {
"math": {
"category_slug": "math",
"total_wins": 1
},
"batman": {
"category_slug": "batman",
"total_wins": 3
}
}
}
def test_dict_type_with_model_type_init_with_instance():
class ExperienceLevel(Model):
level = IntType()
class CategoryStats(Model):
category_slug = StringType()
total_wins = IntType()
@serializable(type=ModelType(ExperienceLevel))
def xp_level(self):
return ExperienceLevel(dict(level=self.total_wins))
class PlayerInfo(Model):
id = IntType()
categories = DictType(ModelType(CategoryStats))
# TODO: Maybe it would be cleaner to have
# DictType(CategoryStats) and implicitly convert to ModelType(CategoryStats)
math_stats = CategoryStats({
"category_slug": "math",
"total_wins": 1
})
info = PlayerInfo(dict(id=1, categories={
"math": math_stats,
}))
assert info.categories["math"] == math_stats
d = info.serialize()
assert d == {
"id": 1,
"categories": {
"math": {
"category_slug": "math",
"total_wins": 1,
"xp_level": {
"level": 1
}
},
}
}
def test_with_empty():
class CategoryStatsInfo(Model):
slug = StringType()
class PlayerInfo(Model):
categories = DictType(
ModelType(CategoryStatsInfo),
default=lambda: {},
serialize_when_none=True,
)
info = PlayerInfo()
assert info.categories == {}
d = info.serialize()
assert d == {
"categories": {},
}
def test_key_type():
def player_id(value):
return long(value)
class CategoryStatsInfo(Model):
slug = StringType()
class PlayerInfo(Model):
categories = DictType(ModelType(CategoryStatsInfo), coerce_key=player_id)
stats = CategoryStatsInfo({
"slug": "math",
})
info = PlayerInfo({
"categories": {
1: {"slug": "math"}
},
})
assert info.categories == {1: stats}
d = info.serialize()
assert d == {
"categories": {1: {"slug": "math"}}
}
| bsd-3-clause | -7,362,566,174,388,654,000 | -3,851,663,341,355,008,500 | 22.42236 | 90 | 0.537523 | false |
jakevdp/pelican-plugins | sub_parts/sub_parts.py | 59 | 2671 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from pelican import signals
import logging
logger = logging.getLogger(__name__)
def patch_subparts(generator):
generator.subparts = []
slugs = {}
for article in generator.articles:
slugs[article.slug] = article
if '--' in article.slug:
generator.subparts.append(article)
for article in generator.subparts:
logger.info('sub_part: Detected %s', article.slug)
(pslug, _) = article.slug.rsplit('--', 1)
if pslug in slugs:
parent = slugs[pslug]
if not hasattr(parent, 'subparts'):
parent.subparts = []
parent.subparts.append(article)
article.subpart_of = parent
article.subtitle = article.title
article.title = article.title + ", " + parent.title
generator.dates.remove(article)
generator.articles.remove(article)
if article.category:
for cat, arts in generator.categories:
if cat.name == article.category.name:
arts.remove(article)
break
else:
logger.error(
'sub_part: Cannot remove sub-part from category %s',
article.category)
if (hasattr(article, 'subphotos') or
hasattr(article, 'photo_gallery')):
parent.subphotos = (
getattr(parent, 'subphotos',
len(getattr(parent, 'photo_gallery', []))) +
getattr(article, 'subphotos', 0) +
len(getattr(article, 'photo_gallery', [])))
else:
logger.error('sub_part: No parent for %s', pslug)
generator._update_context(('articles', 'dates', 'subparts'))
def write_subparts(generator, writer):
for article in generator.subparts:
signals.article_generator_write_article.send(generator,
content=article)
writer.write_file(
article.save_as, generator.get_template(article.template),
generator.context, article=article, category=article.category,
override_output=hasattr(article, 'override_save_as'),
relative_urls=generator.settings['RELATIVE_URLS'])
if len(generator.subparts) > 0:
print('sub_part: processed {} sub-parts.'.format(
len(generator.subparts)))
def register():
signals.article_generator_finalized.connect(patch_subparts)
signals.article_writer_finalized.connect(write_subparts)
| agpl-3.0 | 2,437,918,848,599,915,500 | 3,044,148,219,851,633,000 | 39.469697 | 76 | 0.563834 | false |
otherness-space/myProject002 | my_project_002/lib/python2.7/site-packages/django/utils/translation/__init__.py | 110 | 4690 | """
Internationalization support.
"""
from __future__ import unicode_literals
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils import six
__all__ = [
'activate', 'deactivate', 'override', 'deactivate_all',
'get_language', 'get_language_from_request',
'get_language_info', 'get_language_bidi',
'check_for_language', 'to_locale', 'templatize', 'string_concat',
'gettext', 'gettext_lazy', 'gettext_noop',
'ugettext', 'ugettext_lazy', 'ugettext_noop',
'ngettext', 'ngettext_lazy',
'ungettext', 'ungettext_lazy',
'pgettext', 'pgettext_lazy',
'npgettext', 'npgettext_lazy',
]
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans(object):
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
ugettext_noop = gettext_noop
def gettext(message):
return _trans.gettext(message)
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
def ugettext(message):
return _trans.ugettext(message)
def ungettext(singular, plural, number):
return _trans.ungettext(singular, plural, number)
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = lazy(gettext, str)
ngettext_lazy = lazy(ngettext, str)
ugettext_lazy = lazy(ugettext, six.text_type)
ungettext_lazy = lazy(ungettext, six.text_type)
pgettext_lazy = lazy(pgettext, six.text_type)
npgettext_lazy = lazy(npgettext, six.text_type)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(object):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
self.old_language = get_language()
def __enter__(self):
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_locale(language):
return _trans.to_locale(language)
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def templatize(src, origin=None):
return _trans.templatize(src, origin)
def deactivate_all():
return _trans.deactivate_all()
def _string_concat(*strings):
"""
Lazy variant of string concatenation, needed for translations that are
constructed from multiple parts.
"""
return ''.join([force_text(s) for s in strings])
string_concat = lazy(_string_concat, six.text_type)
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
return LANG_INFO[lang_code]
except KeyError:
raise KeyError("Unknown language code %r." % lang_code)
| mit | -5,119,789,797,637,577,000 | 333,242,343,111,679,940 | 30.266667 | 78 | 0.697015 | false |
biotrump/xbmc | tools/Fake Episode Maker/main.py | 169 | 2669 | # -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 Team XBMC
# http://xbmc.org
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, see
# <http://www.gnu.org/licenses/>.
#
import urllib
import os
import openAnything
from xml.dom import minidom
def parseShow(seriesID, show_name):
safe_show_name = show_name.replace(":", "")
details_url = "http://thetvdb.com/api/EB49E8B9E78EBEE1/series/"+seriesID+"/all/en.xml"
details = openAnything.fetch(details_url)
details_xml = minidom.parseString(details['data'])
seasons = details_xml.getElementsByTagName("SeasonNumber")
episodes = details_xml.getElementsByTagName("EpisodeNumber")
# check to see if parent show path needs to be made
if not os.access(safe_show_name, os.F_OK):
os.makedirs(safe_show_name)
i = 0
for item in episodes:
season = seasons[i].firstChild.data
episode = item.firstChild.data
filename = safe_show_name+" S"+season+"E"+episode+".avi"
# seeif season path exists or not, and make it if not
if os.access(safe_show_name + "\\Season " + season, os.F_OK):
# just go ahead and create the file
file = open(safe_show_name + "\\Season " + season + "\\" + filename, "w")
file.close()
else:
os.makedirs(safe_show_name + "\\Season " + season)
file = open(safe_show_name + "\\Season " + season + "\\" + filename, "w")
file.close()
print "Creating %s" % filename
i = i + 1
show_file = open("shows.txt")
shows = show_file.read().split("\n")
show_file.close()
for item in shows:
show_url = "http://thetvdb.com/api/GetSeries.php?"+urllib.urlencode({"seriesname":item})
print "Building "+item+"..."
show_xml = openAnything.fetch(show_url)
xmldoc = minidom.parseString(show_xml['data'])
node = xmldoc.getElementsByTagName("seriesid")
if ("node" in dir()):
seriesID = node[0].firstChild.data
parseShow(seriesID, item)
else:
print "Could not find any data for "+show_name+" on TVDB.\nURL: "+show_url
| gpl-2.0 | 5,702,744,523,779,671,000 | 6,359,982,179,956,817,000 | 39.439394 | 92 | 0.646684 | false |
nguyentu1602/numpy | numpy/testing/tests/test_decorators.py | 66 | 4157 | from __future__ import division, absolute_import, print_function
from numpy.testing import dec, assert_, assert_raises, run_module_suite
from numpy.testing.noseclasses import KnownFailureTest
import nose
def test_slow():
@dec.slow
def slow_func(x, y, z):
pass
assert_(slow_func.slow)
def test_setastest():
@dec.setastest()
def f_default(a):
pass
@dec.setastest(True)
def f_istest(a):
pass
@dec.setastest(False)
def f_isnottest(a):
pass
assert_(f_default.__test__)
assert_(f_istest.__test__)
assert_(not f_isnottest.__test__)
class DidntSkipException(Exception):
pass
def test_skip_functions_hardcoded():
@dec.skipif(True)
def f1(x):
raise DidntSkipException
try:
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
except nose.SkipTest:
pass
@dec.skipif(False)
def f2(x):
raise DidntSkipException
try:
f2('a')
except DidntSkipException:
pass
except nose.SkipTest:
raise Exception('Skipped when not expected to')
def test_skip_functions_callable():
def skip_tester():
return skip_flag == 'skip me!'
@dec.skipif(skip_tester)
def f1(x):
raise DidntSkipException
try:
skip_flag = 'skip me!'
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
except nose.SkipTest:
pass
@dec.skipif(skip_tester)
def f2(x):
raise DidntSkipException
try:
skip_flag = 'five is right out!'
f2('a')
except DidntSkipException:
pass
except nose.SkipTest:
raise Exception('Skipped when not expected to')
def test_skip_generators_hardcoded():
@dec.knownfailureif(True, "This test is known to fail")
def g1(x):
for i in range(x):
yield i
try:
for j in g1(10):
pass
except KnownFailureTest:
pass
else:
raise Exception('Failed to mark as known failure')
@dec.knownfailureif(False, "This test is NOT known to fail")
def g2(x):
for i in range(x):
yield i
raise DidntSkipException('FAIL')
try:
for j in g2(10):
pass
except KnownFailureTest:
raise Exception('Marked incorretly as known failure')
except DidntSkipException:
pass
def test_skip_generators_callable():
def skip_tester():
return skip_flag == 'skip me!'
@dec.knownfailureif(skip_tester, "This test is known to fail")
def g1(x):
for i in range(x):
yield i
try:
skip_flag = 'skip me!'
for j in g1(10):
pass
except KnownFailureTest:
pass
else:
raise Exception('Failed to mark as known failure')
@dec.knownfailureif(skip_tester, "This test is NOT known to fail")
def g2(x):
for i in range(x):
yield i
raise DidntSkipException('FAIL')
try:
skip_flag = 'do not skip'
for j in g2(10):
pass
except KnownFailureTest:
raise Exception('Marked incorretly as known failure')
except DidntSkipException:
pass
def test_deprecated():
@dec.deprecated(True)
def non_deprecated_func():
pass
@dec.deprecated()
def deprecated_func():
import warnings
warnings.warn("TEST: deprecated func", DeprecationWarning)
@dec.deprecated()
def deprecated_func2():
import warnings
warnings.warn("AHHHH")
raise ValueError
@dec.deprecated()
def deprecated_func3():
import warnings
warnings.warn("AHHHH")
# marked as deprecated, but does not raise DeprecationWarning
assert_raises(AssertionError, non_deprecated_func)
# should be silent
deprecated_func()
# fails if deprecated decorator just disables test. See #1453.
assert_raises(ValueError, deprecated_func2)
# first warnings is not a DeprecationWarning
assert_raises(AssertionError, deprecated_func3)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause | -4,160,371,393,482,290,700 | 7,174,935,412,066,380,000 | 21.840659 | 71 | 0.606928 | false |
tuhangdi/django | tests/model_validation/tests.py | 292 | 2117 | from django.core import management
from django.core.checks import Error, run_checks
from django.db.models.signals import post_init
from django.test import SimpleTestCase
from django.test.utils import override_settings
from django.utils import six
class OnPostInit(object):
def __call__(self, **kwargs):
pass
def on_post_init(**kwargs):
pass
@override_settings(
INSTALLED_APPS=['django.contrib.auth', 'django.contrib.contenttypes'],
SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True)
)
class ModelValidationTest(SimpleTestCase):
def test_models_validate(self):
# All our models should validate properly
# Validation Tests:
# * choices= Iterable of Iterables
# See: https://code.djangoproject.com/ticket/20430
# * related_name='+' doesn't clash with another '+'
# See: https://code.djangoproject.com/ticket/21375
management.call_command("check", stdout=six.StringIO())
def test_model_signal(self):
unresolved_references = post_init.unresolved_references.copy()
post_init.connect(on_post_init, sender='missing-app.Model')
post_init.connect(OnPostInit(), sender='missing-app.Model')
errors = run_checks()
expected = [
Error(
"The 'on_post_init' function was connected to the 'post_init' "
"signal with a lazy reference to the 'missing-app.Model' "
"sender, which has not been installed.",
hint=None,
obj='model_validation.tests',
id='signals.E001',
),
Error(
"An instance of the 'OnPostInit' class was connected to "
"the 'post_init' signal with a lazy reference to the "
"'missing-app.Model' sender, which has not been installed.",
hint=None,
obj='model_validation.tests',
id='signals.E001',
)
]
self.assertEqual(errors, expected)
post_init.unresolved_references = unresolved_references
| bsd-3-clause | -7,387,698,925,826,223,000 | -2,858,868,890,462,791,000 | 35.5 | 79 | 0.613604 | false |
jphilipsen05/zulip | zproject/local_settings.py | 2 | 5562 | # This file is the Zulip local_settings.py configuration for the
# zulip.com installation of Zulip. It shouldn't be used in other
# environments, but you may find it to be a a helpful reference when
# setting up your own Zulip installation to see how Zulip can be
# configured.
#
# On a normal Zulip production server, zproject/local_settings.py is a
# symlink to /etc/zulip/settings.py (based off prod_settings_template.py).
import platform
import six.moves.configparser
from base64 import b64decode
from typing import Set
config_file = six.moves.configparser.RawConfigParser() # type: ignore # https://github.com/python/typeshed/pull/206
config_file.read("/etc/zulip/zulip.conf")
# Whether we're running in a production environment. Note that PRODUCTION does
# **not** mean hosted on Zulip.com; customer sites are PRODUCTION and VOYAGER
# and as such should not assume they are the main Zulip site.
PRODUCTION = config_file.has_option('machine', 'deploy_type')
# The following flags are left over from the various configurations of
# Zulip run by Zulip, Inc. We will eventually be able to get rid of
# them and just have the PRODUCTION flag, but we need them for now.
ZULIP_COM_STAGING = PRODUCTION and config_file.get('machine', 'deploy_type') == 'zulip.com-staging'
ZULIP_COM = ((PRODUCTION and config_file.get('machine', 'deploy_type') == 'zulip.com-prod') or
ZULIP_COM_STAGING)
if not ZULIP_COM:
raise Exception("You should create your own local settings from prod_settings_template.")
ZULIP_FRIENDS_LIST_ID = '84b2f3da6b'
SHARE_THE_LOVE = True
SHOW_OSS_ANNOUNCEMENT = True
REGISTER_LINK_DISABLED = True
CUSTOM_LOGO_URL = "/static/images/logo/zulip-dropbox.png"
VERBOSE_SUPPORT_OFFERS = True
# This can be filled in automatically from the database, maybe
DEPLOYMENT_ROLE_NAME = 'zulip.com'
# XXX: replace me
CAMO_URI = 'https://external-content.zulipcdn.net/'
# Leave EMAIL_HOST unset or empty if you do not wish for emails to be sent
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = "Zulip <[email protected]>"
# The noreply address to be used as Reply-To for certain generated emails.
NOREPLY_EMAIL_ADDRESS = "Zulip <[email protected]>"
WELCOME_EMAIL_SENDER = {'email': '[email protected]', 'name': 'Waseem Daher'}
SESSION_SERIALIZER = "django.contrib.sessions.serializers.PickleSerializer"
REMOTE_POSTGRES_HOST = "postgres.zulip.net"
STATSD_HOST = 'stats.zulip.net'
if ZULIP_COM_STAGING:
EXTERNAL_HOST = 'staging.zulip.com'
STATSD_PREFIX = 'staging'
STAGING_ERROR_NOTIFICATIONS = True
SAVE_FRONTEND_STACKTRACES = True
else:
EXTERNAL_HOST = 'zulip.com'
EXTERNAL_API_PATH = 'api.zulip.com'
STATSD_PREFIX = 'app'
# Terms of Service
TERMS_OF_SERVICE = 'corporate/terms.md'
# Major version number (the stuff before the first '.') has to be an integer.
# Users will be asked to re-sign the TOS only when the major version number increases.
# A TOS_VERSION of None has a major version number of -1.
# TOS_VERSION = '1.0'
# FIRST_TIME_TOS_TEMPLATE = 'zulipchat_migration_tos.html'
# Buckets used for Amazon S3 integration for storing files and user avatars.
S3_AUTH_UPLOADS_BUCKET = "zulip-user-uploads"
S3_AVATAR_BUCKET = "humbug-user-avatars"
APNS_SANDBOX = False
APNS_FEEDBACK = "feedback_production"
APNS_CERT_FILE = "/etc/ssl/django-private/apns-dist.pem"
DBX_APNS_CERT_FILE = "/etc/ssl/django-private/dbx-apns-dist.pem"
GOOGLE_OAUTH2_CLIENT_ID = '835904834568-ag4p18v0sd9a0tero14r3gekn6shoen3.apps.googleusercontent.com'
# The email address pattern to use for auto-generated stream emails
# The %s will be replaced with a unique token.
if ZULIP_COM_STAGING:
EMAIL_GATEWAY_PATTERN = "%[email protected]"
else:
EMAIL_GATEWAY_PATTERN = "%[email protected]"
EMAIL_GATEWAY_EXTRA_PATTERN_HACK = r'@[\w-]*\.zulip\.net'
# Email mirror configuration
# The email of the Zulip bot that the email gateway should post as.
EMAIL_GATEWAY_BOT = "[email protected]"
SSO_APPEND_DOMAIN = None # type: str
AUTHENTICATION_BACKENDS = ('zproject.backends.EmailAuthBackend',
'zproject.backends.GoogleMobileOauth2Backend')
# ALLOWED_HOSTS is used by django to determine which addresses
# Zulip can serve. This is a security measure.
# The following are the zulip.com hosts
ALLOWED_HOSTS = ['localhost', '.humbughq.com', '54.214.48.144', '54.213.44.54',
'54.213.41.54', '54.213.44.58', '54.213.44.73',
'54.200.19.65', '54.201.95.104', '54.201.95.206',
'54.201.186.29', '54.200.111.22',
'54.245.120.64', '54.213.44.83', '.zulip.com', '.zulip.net',
'54.244.50.66', '54.244.50.67', '54.244.50.68', '54.244.50.69', '54.244.50.70',
'54.244.50.64', '54.244.50.65', '54.244.50.74',
'chat.dropboxer.net']
NOTIFICATION_BOT = "[email protected]"
ERROR_BOT = "[email protected]"
NEW_USER_BOT = "[email protected]"
NAGIOS_SEND_BOT = '[email protected]'
NAGIOS_RECEIVE_BOT = '[email protected]'
# Our internal deployment has nagios checks for both staging and prod
NAGIOS_STAGING_SEND_BOT = '[email protected]'
NAGIOS_STAGING_RECEIVE_BOT = '[email protected]'
# Also used for support email in emails templates
ZULIP_ADMINISTRATOR = '[email protected]'
ADMINS = (
('Zulip Error Reports', '[email protected]'),
)
EXTRA_INSTALLED_APPS = [
'analytics',
'zilencer',
'corporate',
]
EVENT_LOGS_ENABLED = True
SYSTEM_ONLY_REALMS = set() # type: Set[str]
| apache-2.0 | 5,148,005,324,754,460,000 | -8,805,705,792,836,062,000 | 37.625 | 116 | 0.714491 | false |
LaoZhongGu/kbengine | kbe/src/lib/python/Tools/scripts/mailerdaemon.py | 97 | 8039 | #!/usr/bin/env python3
"""Classes to parse mailer-daemon messages."""
import calendar
import email.message
import re
import os
import sys
class Unparseable(Exception):
pass
class ErrorMessage(email.message.Message):
def __init__(self):
email.message.Message.__init__(self)
self.sub = ''
def is_warning(self):
sub = self.get('Subject')
if not sub:
return 0
sub = sub.lower()
if sub.startswith('waiting mail'):
return 1
if 'warning' in sub:
return 1
self.sub = sub
return 0
def get_errors(self):
for p in EMPARSERS:
self.rewindbody()
try:
return p(self.fp, self.sub)
except Unparseable:
pass
raise Unparseable
# List of re's or tuples of re's.
# If a re, it should contain at least a group (?P<email>...) which
# should refer to the email address. The re can also contain a group
# (?P<reason>...) which should refer to the reason (error message).
# If no reason is present, the emparse_list_reason list is used to
# find a reason.
# If a tuple, the tuple should contain 2 re's. The first re finds a
# location, the second re is repeated one or more times to find
# multiple email addresses. The second re is matched (not searched)
# where the previous match ended.
# The re's are compiled using the re module.
emparse_list_list = [
'error: (?P<reason>unresolvable): (?P<email>.+)',
('----- The following addresses had permanent fatal errors -----\n',
'(?P<email>[^ \n].*)\n( .*\n)?'),
'remote execution.*\n.*rmail (?P<email>.+)',
('The following recipients did not receive your message:\n\n',
' +(?P<email>.*)\n(The following recipients did not receive your message:\n\n)?'),
'------- Failure Reasons --------\n\n(?P<reason>.*)\n(?P<email>.*)',
'^<(?P<email>.*)>:\n(?P<reason>.*)',
'^(?P<reason>User mailbox exceeds allowed size): (?P<email>.+)',
'^5\\d{2} <(?P<email>[^\n>]+)>\\.\\.\\. (?P<reason>.+)',
'^Original-Recipient: rfc822;(?P<email>.*)',
'^did not reach the following recipient\\(s\\):\n\n(?P<email>.*) on .*\n +(?P<reason>.*)',
'^ <(?P<email>[^\n>]+)> \\.\\.\\. (?P<reason>.*)',
'^Report on your message to: (?P<email>.*)\nReason: (?P<reason>.*)',
'^Your message was not delivered to +(?P<email>.*)\n +for the following reason:\n +(?P<reason>.*)',
'^ was not +(?P<email>[^ \n].*?) *\n.*\n.*\n.*\n because:.*\n +(?P<reason>[^ \n].*?) *\n',
]
# compile the re's in the list and store them in-place.
for i in range(len(emparse_list_list)):
x = emparse_list_list[i]
if type(x) is type(''):
x = re.compile(x, re.MULTILINE)
else:
xl = []
for x in x:
xl.append(re.compile(x, re.MULTILINE))
x = tuple(xl)
del xl
emparse_list_list[i] = x
del x
del i
# list of re's used to find reasons (error messages).
# if a string, "<>" is replaced by a copy of the email address.
# The expressions are searched for in order. After the first match,
# no more expressions are searched for. So, order is important.
emparse_list_reason = [
r'^5\d{2} <>\.\.\. (?P<reason>.*)',
'<>\.\.\. (?P<reason>.*)',
re.compile(r'^<<< 5\d{2} (?P<reason>.*)', re.MULTILINE),
re.compile('===== stderr was =====\nrmail: (?P<reason>.*)'),
re.compile('^Diagnostic-Code: (?P<reason>.*)', re.MULTILINE),
]
emparse_list_from = re.compile('^From:', re.IGNORECASE|re.MULTILINE)
def emparse_list(fp, sub):
data = fp.read()
res = emparse_list_from.search(data)
if res is None:
from_index = len(data)
else:
from_index = res.start(0)
errors = []
emails = []
reason = None
for regexp in emparse_list_list:
if type(regexp) is type(()):
res = regexp[0].search(data, 0, from_index)
if res is not None:
try:
reason = res.group('reason')
except IndexError:
pass
while 1:
res = regexp[1].match(data, res.end(0), from_index)
if res is None:
break
emails.append(res.group('email'))
break
else:
res = regexp.search(data, 0, from_index)
if res is not None:
emails.append(res.group('email'))
try:
reason = res.group('reason')
except IndexError:
pass
break
if not emails:
raise Unparseable
if not reason:
reason = sub
if reason[:15] == 'returned mail: ':
reason = reason[15:]
for regexp in emparse_list_reason:
if type(regexp) is type(''):
for i in range(len(emails)-1,-1,-1):
email = emails[i]
exp = re.compile(re.escape(email).join(regexp.split('<>')), re.MULTILINE)
res = exp.search(data)
if res is not None:
errors.append(' '.join((email.strip()+': '+res.group('reason')).split()))
del emails[i]
continue
res = regexp.search(data)
if res is not None:
reason = res.group('reason')
break
for email in emails:
errors.append(' '.join((email.strip()+': '+reason).split()))
return errors
EMPARSERS = [emparse_list]
def sort_numeric(a, b):
a = int(a)
b = int(b)
if a < b:
return -1
elif a > b:
return 1
else:
return 0
def parsedir(dir, modify):
os.chdir(dir)
pat = re.compile('^[0-9]*$')
errordict = {}
errorfirst = {}
errorlast = {}
nok = nwarn = nbad = 0
# find all numeric file names and sort them
files = list(filter(lambda fn, pat=pat: pat.match(fn) is not None, os.listdir('.')))
files.sort(sort_numeric)
for fn in files:
# Lets try to parse the file.
fp = open(fn)
m = email.message_from_file(fp, _class=ErrorMessage)
sender = m.getaddr('From')
print('%s\t%-40s\t'%(fn, sender[1]), end=' ')
if m.is_warning():
fp.close()
print('warning only')
nwarn = nwarn + 1
if modify:
os.rename(fn, ','+fn)
## os.unlink(fn)
continue
try:
errors = m.get_errors()
except Unparseable:
print('** Not parseable')
nbad = nbad + 1
fp.close()
continue
print(len(errors), 'errors')
# Remember them
for e in errors:
try:
mm, dd = m.getdate('date')[1:1+2]
date = '%s %02d' % (calendar.month_abbr[mm], dd)
except:
date = '??????'
if e not in errordict:
errordict[e] = 1
errorfirst[e] = '%s (%s)' % (fn, date)
else:
errordict[e] = errordict[e] + 1
errorlast[e] = '%s (%s)' % (fn, date)
fp.close()
nok = nok + 1
if modify:
os.rename(fn, ','+fn)
## os.unlink(fn)
print('--------------')
print(nok, 'files parsed,',nwarn,'files warning-only,', end=' ')
print(nbad,'files unparseable')
print('--------------')
list = []
for e in errordict.keys():
list.append((errordict[e], errorfirst[e], errorlast[e], e))
list.sort()
for num, first, last, e in list:
print('%d %s - %s\t%s' % (num, first, last, e))
def main():
modify = 0
if len(sys.argv) > 1 and sys.argv[1] == '-d':
modify = 1
del sys.argv[1]
if len(sys.argv) > 1:
for folder in sys.argv[1:]:
parsedir(folder, modify)
else:
parsedir('/ufs/jack/Mail/errorsinbox', modify)
if __name__ == '__main__' or sys.argv[0] == __name__:
main()
| lgpl-3.0 | 315,492,472,956,174,660 | 4,321,658,693,408,564,700 | 31.678862 | 103 | 0.509765 | false |
lidabing/xgyp | Python27/Tools/Scripts/checkappend.py | 100 | 4658 | #! /usr/bin/env python
# Released to the public domain, by Tim Peters, 28 February 2000.
"""checkappend.py -- search for multi-argument .append() calls.
Usage: specify one or more file or directory paths:
checkappend [-v] file_or_dir [file_or_dir] ...
Each file_or_dir is checked for multi-argument .append() calls. When
a directory, all .py files in the directory, and recursively in its
subdirectories, are checked.
Use -v for status msgs. Use -vv for more status msgs.
In the absence of -v, the only output is pairs of the form
filename(linenumber):
line containing the suspicious append
Note that this finds multi-argument append calls regardless of whether
they're attached to list objects. If a module defines a class with an
append method that takes more than one argument, calls to that method
will be listed.
Note that this will not find multi-argument list.append calls made via a
bound method object. For example, this is not caught:
somelist = []
push = somelist.append
push(1, 2, 3)
"""
__version__ = 1, 0, 0
import os
import sys
import getopt
import tokenize
verbose = 0
def errprint(*args):
msg = ' '.join(args)
sys.stderr.write(msg)
sys.stderr.write("\n")
def main():
args = sys.argv[1:]
global verbose
try:
opts, args = getopt.getopt(sys.argv[1:], "v")
except getopt.error, msg:
errprint(str(msg) + "\n\n" + __doc__)
return
for opt, optarg in opts:
if opt == '-v':
verbose = verbose + 1
if not args:
errprint(__doc__)
return
for arg in args:
check(arg)
def check(file):
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print "%r: listing directory" % (file,)
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if ((os.path.isdir(fullname) and
not os.path.islink(fullname))
or os.path.normcase(name[-3:]) == ".py"):
check(fullname)
return
try:
f = open(file)
except IOError, msg:
errprint("%r: I/O Error: %s" % (file, msg))
return
if verbose > 1:
print "checking %r ..." % (file,)
ok = AppendChecker(file, f).run()
if verbose and ok:
print "%r: Clean bill of health." % (file,)
[FIND_DOT,
FIND_APPEND,
FIND_LPAREN,
FIND_COMMA,
FIND_STMT] = range(5)
class AppendChecker:
def __init__(self, fname, file):
self.fname = fname
self.file = file
self.state = FIND_DOT
self.nerrors = 0
def run(self):
try:
tokenize.tokenize(self.file.readline, self.tokeneater)
except tokenize.TokenError, msg:
errprint("%r: Token Error: %s" % (self.fname, msg))
self.nerrors = self.nerrors + 1
return self.nerrors == 0
def tokeneater(self, type, token, start, end, line,
NEWLINE=tokenize.NEWLINE,
JUNK=(tokenize.COMMENT, tokenize.NL),
OP=tokenize.OP,
NAME=tokenize.NAME):
state = self.state
if type in JUNK:
pass
elif state is FIND_DOT:
if type is OP and token == ".":
state = FIND_APPEND
elif state is FIND_APPEND:
if type is NAME and token == "append":
self.line = line
self.lineno = start[0]
state = FIND_LPAREN
else:
state = FIND_DOT
elif state is FIND_LPAREN:
if type is OP and token == "(":
self.level = 1
state = FIND_COMMA
else:
state = FIND_DOT
elif state is FIND_COMMA:
if type is OP:
if token in ("(", "{", "["):
self.level = self.level + 1
elif token in (")", "}", "]"):
self.level = self.level - 1
if self.level == 0:
state = FIND_DOT
elif token == "," and self.level == 1:
self.nerrors = self.nerrors + 1
print "%s(%d):\n%s" % (self.fname, self.lineno,
self.line)
# don't gripe about this stmt again
state = FIND_STMT
elif state is FIND_STMT:
if type is NEWLINE:
state = FIND_DOT
else:
raise SystemError("unknown internal state '%r'" % (state,))
self.state = state
if __name__ == '__main__':
main()
| bsd-3-clause | 6,728,976,105,621,241,000 | 4,104,810,391,488,346,000 | 26.892216 | 72 | 0.536067 | false |
ubuntu/ubuntu-make | tests/small/test_ui.py | 15 | 6281 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Tests for the generic ui module"""
from concurrent import futures
from gi.repository import GLib
from time import time
from unittest.mock import Mock, patch
from ..tools import LoggedTestCase
import threading
from umake.tools import MainLoop, Singleton
from umake.ui import UI
class TestUI(LoggedTestCase):
"""This will test the UI generic module"""
def setUp(self):
super().setUp()
self.mockUIPlug = Mock()
self.mockUIPlug._display.side_effect = self.display_UIPlug
self.contentType = Mock()
self.ui = UI(self.mockUIPlug)
self.mainloop_object = MainLoop()
self.mainloop_thread = None
self.function_thread = None
self.display_thread = None
self.time_display_call = 0
def tearDown(self):
Singleton._instances = {}
super().tearDown()
# function that will complete once the mainloop is started
def wait_for_mainloop_function(self):
timeout_time = time() + 5
while not self.mainloop_object.mainloop.is_running():
if time() > timeout_time:
raise(BaseException("Mainloop not started in 5 seconds"))
def wait_for_mainloop_shutdown(self):
timeout_time = time() + 5
while self.mainloop_object.mainloop.is_running():
if time() > timeout_time:
raise(BaseException("Mainloop not stopped in 5 seconds"))
def get_mainloop_thread(self):
self.mainloop_thread = threading.current_thread().ident
def start_glib_mainloop(self):
# quit after 5 seconds if nothing made the mainloop to end
GLib.timeout_add_seconds(5, self.mainloop_object.mainloop.quit)
GLib.idle_add(self.get_mainloop_thread)
self.mainloop_object.run()
def display_UIPlug(self, contentType):
"""handler to mock _display and save the current thread"""
self.time_display_call = time()
self.assertEqual(self.contentType, contentType)
self.display_thread = threading.current_thread().ident
self.mainloop_object.quit(raise_exception=False)
def test_singleton(self):
"""Ensure we are delivering a singleton for UI"""
other = UI(self.mockUIPlug)
self.assertEqual(self.ui, other)
def test_return_to_mainscreen(self):
"""We call the return to main screen on the UIPlug"""
UI.return_main_screen()
self.assertTrue(self.mockUIPlug._return_main_screen.called)
@patch("umake.tools.sys")
def test_call_display(self, mocksys):
"""We call the display method from the UIPlug"""
UI.display(self.contentType)
self.start_glib_mainloop()
self.wait_for_mainloop_shutdown()
self.assertTrue(self.mockUIPlug._display.called)
self.assertIsNotNone(self.mainloop_thread)
self.assertIsNotNone(self.display_thread)
self.assertEqual(self.mainloop_thread, self.display_thread)
@patch("umake.tools.sys")
def test_call_display_other_thread(self, mocksys):
"""We call the display method on UIPlug in the main thread from another thread"""
def run_display(future):
self.function_thread = threading.current_thread().ident
UI.display(self.contentType)
executor = futures.ThreadPoolExecutor(max_workers=1)
future = executor.submit(self.wait_for_mainloop_function)
future.add_done_callback(run_display)
self.start_glib_mainloop()
self.wait_for_mainloop_shutdown()
self.assertTrue(self.mockUIPlug._display.called)
self.assertIsNotNone(self.mainloop_thread)
self.assertIsNotNone(self.function_thread)
self.assertIsNotNone(self.display_thread)
self.assertNotEqual(self.mainloop_thread, self.function_thread)
self.assertEqual(self.mainloop_thread, self.display_thread)
@patch("umake.tools.sys")
def test_call_delayed_display(self, mocksys):
"""We call the display method from the UIPlug in delayed_display with 50ms waiting"""
UI.delayed_display(self.contentType)
now = time()
self.start_glib_mainloop()
self.wait_for_mainloop_shutdown()
self.assertTrue(self.mockUIPlug._display.called)
self.assertIsNotNone(self.mainloop_thread)
self.assertIsNotNone(self.display_thread)
self.assertEqual(self.mainloop_thread, self.display_thread)
self.assertTrue(self.time_display_call - now > 0.05)
@patch("umake.tools.sys")
def test_call_delayed_display_from_other_thread(self, mocksys):
"""We call the display method from the UIPlug in delayed_display with 50ms waiting, even on other thread"""
now = 0
def run_display(future):
nonlocal now
self.function_thread = threading.current_thread().ident
now = time()
UI.delayed_display(self.contentType)
executor = futures.ThreadPoolExecutor(max_workers=1)
future = executor.submit(self.wait_for_mainloop_function)
future.add_done_callback(run_display)
self.start_glib_mainloop()
self.wait_for_mainloop_shutdown()
self.assertTrue(self.mockUIPlug._display.called)
self.assertIsNotNone(self.mainloop_thread)
self.assertIsNotNone(self.function_thread)
self.assertIsNotNone(self.display_thread)
self.assertNotEqual(self.mainloop_thread, self.function_thread)
self.assertEqual(self.mainloop_thread, self.display_thread)
self.assertTrue(self.time_display_call - now > 0.05)
| gpl-3.0 | 75,099,059,666,230,530 | -940,996,455,842,165,000 | 38.503145 | 115 | 0.678714 | false |
thorwhalen/ut | pdict/special.py | 1 | 5569 | """Special dicts"""
__author__ = 'thor'
from collections import defaultdict, UserDict
from ut.pdict.get import set_value_in_nested_key_path
val_unlikely_to_be_value_of_dict = (1987654321, 8239080923)
class keydefaultdict(defaultdict):
def __missing__(self, key):
ret = self[key] = self.default_factory(key)
return ret
class DictDefaultDict(dict):
"""
Acts similarly to collections.defaultdict, except
(1) the defaults depend on the key (given by a dict of key-->default_val at construction)
(2) it is not a function that is called to create the default value (so careful with referenced variables)
"""
def __init__(self, default_dict):
super(DictDefaultDict, self).__init__()
self.default_dict = default_dict
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
return self.default_dict[item]
class KeyPathDict(dict):
"""
NOTE: Might want to check out key_path.py (in https://github.com/i2mint/py2mint/) instead.
A dict where you can get and set values from key_paths (i.e. dot-separated strings or lists of nested keys).
Use with care.
Some functionalities that would be expected from such a subclass of dict aren't implemented yet, or only partially.
Further, operating with KeyPathDict is slower. One test showed that getting a value was 80 times slower
But, to be fair, it was in micro-seconds instead of nano-seconds, so this class can still be useful for
convenience when it is not in a bottle neck of a process.
>>> input_dict = {
... 'a': {
... 'b': 1,
... 'c': 'val of a.c',
... 'd': [1, 2]
... },
... 'b': {
... 'A': 'val of b.A',
... 'B': {
... 'AA': 'val of b.B.AA'
... }
... },
... 10: 'val for 10',
... '10': 10
... }
>>>
>>> d = KeyPathDict(input_dict)
>>> d
{'a': {'b': 1, 'c': 'val of a.c', 'd': [1, 2]}, 'b': {'A': 'val of b.A', 'B': {'AA': 'val of b.B.AA'}}, 10: 'val for 10', '10': 10}
>>> d.get('a.c')
'val of a.c'
>>> d.get(['a', 'c']) == d['a.c']
True
>>> d[['a', 'c']] == d['a.c']
True
>>> d.get('non.existent.key', 'default')
'default'
>>> d['b.B.AA']
'val of b.B.AA'
>>> d['b.B.AA'] = 3 # assigning another value to EXISTING key path
>>> d['b.B.AA']
3
>>> d['10'] = 0 # assigning another value to EXISTING key path
>>> d['10']
0
>>> d['new_key'] = 7 # assigning another value to new SINGLE key
>>> d['new_key']
7
>>> d['new.key.path'] = 8 # assigning a value to new key path
>>> d['new.key']
{'path': 8}
>>> d['new.key.old.path'] = 9 # assigning a value to new key path, intersecting with another
>>> d['new.key']
{'path': 8, 'old': {'path': 9}}
>>> d['new.key'] = 'something new' # assigning a value to a key (sub-)path that already exists
>>> d['new.key']
'something new'
"""
def get(self, key_path, d=None):
# return get_value_in_key_path(dict(KeyPathDict), key_path, d)
if isinstance(key_path, str):
key_path = key_path.split('.')
if isinstance(key_path, list):
k_length = len(key_path)
if k_length == 0:
return super(KeyPathDict, self).get(key_path[0], d)
else:
val_so_far = super(KeyPathDict, self).get(key_path[0], d)
for key in key_path[1:]:
if isinstance(val_so_far, dict):
val_so_far = val_so_far.get(key, val_unlikely_to_be_value_of_dict)
if val_so_far == val_unlikely_to_be_value_of_dict:
return d
else:
return d
return val_so_far
else:
return super(KeyPathDict, self).get(key_path, d)
def __getitem__(self, val):
return self.get(val, None)
def __setitem__(self, key_path, val):
"""
Only works with EXISTING key_paths or SINGLE keys
:param key_path:
:param val:
:return:
"""
if isinstance(key_path, str):
key_path = key_path.split('.')
if isinstance(key_path, list):
first_key = key_path[0]
if len(key_path) == 1:
super(KeyPathDict, self).__setitem__(first_key, val)
# self[first_key] = val
else:
if first_key in self:
set_value_in_nested_key_path(self[first_key], key_path[1:], val)
else:
self[first_key] = {}
set_value_in_nested_key_path(self[first_key], key_path[1:], val)
else:
super(KeyPathDict, self).__setitem__(key_path, val)
def __contains__(self, key_path):
if isinstance(key_path, str):
key_path = key_path.split('.')
if isinstance(key_path, list):
if len(key_path) == 1:
return super(KeyPathDict, self).__contains__(key_path[0])
else:
tmp = super(KeyPathDict, self).__getitem__(key_path[0])
for k in key_path[1:]:
if not isinstance(tmp, dict) or k not in tmp:
return False
tmp = tmp[k]
return True
else:
return super(KeyPathDict, self).__contains__(key_path)
| mit | -318,725,029,955,865,100 | 6,469,690,871,460,692,000 | 34.929032 | 135 | 0.514455 | false |
MOA-2011/enigma2.pli4.0 | lib/python/Components/Sources/CurrentService.py | 72 | 1404 | from Components.PerServiceDisplay import PerServiceBase
from enigma import iPlayableService
from Source import Source
from Components.Element import cached
import NavigationInstance
class CurrentService(PerServiceBase, Source):
def __init__(self, navcore):
Source.__init__(self)
PerServiceBase.__init__(self, navcore,
{
iPlayableService.evStart: self.serviceEvent,
iPlayableService.evEnd: self.serviceEvent,
# FIXME: we should check 'interesting_events'
# which is not always provided.
iPlayableService.evUpdatedInfo: self.serviceEvent,
iPlayableService.evUpdatedEventInfo: self.serviceEvent,
iPlayableService.evNewProgramInfo: self.serviceEvent,
iPlayableService.evCuesheetChanged: self.serviceEvent,
iPlayableService.evVideoSizeChanged: self.serviceEvent,
iPlayableService.evHBBTVInfo: self.serviceEvent
}, with_event=True)
self.navcore = navcore
def serviceEvent(self, event):
self.changed((self.CHANGED_SPECIFIC, event))
@cached
def getCurrentService(self):
return self.navcore.getCurrentService()
service = property(getCurrentService)
@cached
def getCurrentServiceRef(self):
if NavigationInstance.instance is not None:
return NavigationInstance.instance.getCurrentlyPlayingServiceOrGroup()
return None
serviceref = property(getCurrentServiceRef)
def destroy(self):
PerServiceBase.destroy(self)
Source.destroy(self)
| gpl-2.0 | -5,608,869,061,453,647,000 | -3,180,817,591,058,310,700 | 30.2 | 73 | 0.788462 | false |
Yelp/elastalert | tests/loaders_test.py | 1 | 18593 | # -*- coding: utf-8 -*-
import copy
import datetime
import os
import mock
import pytest
import elastalert.alerts
import elastalert.ruletypes
from elastalert.config import load_conf
from elastalert.loaders import FileRulesLoader
from elastalert.util import EAException
test_config = {'rules_folder': 'test_folder',
'run_every': {'minutes': 10},
'buffer_time': {'minutes': 10},
'es_host': 'elasticsearch.test',
'es_port': 12345,
'writeback_index': 'test_index',
'writeback_alias': 'test_alias'}
test_rule = {'es_host': 'test_host',
'es_port': 12345,
'name': 'testrule',
'type': 'spike',
'spike_height': 2,
'spike_type': 'up',
'timeframe': {'minutes': 10},
'index': 'test_index',
'query_key': 'testkey',
'compare_key': 'comparekey',
'filter': [{'term': {'key': 'value'}}],
'alert': 'email',
'use_count_query': True,
'doc_type': 'blsh',
'email': '[email protected]',
'aggregation': {'hours': 2},
'include': ['comparekey', '@timestamp']}
test_args = mock.Mock()
test_args.config = 'test_config'
test_args.rule = None
test_args.debug = False
test_args.es_debug_trace = None
def test_import_rules():
rules_loader = FileRulesLoader(test_config)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy['type'] = 'testing.test.RuleType'
with mock.patch.object(rules_loader, 'load_yaml') as mock_open:
mock_open.return_value = test_rule_copy
# Test that type is imported
with mock.patch('builtins.__import__') as mock_import:
mock_import.return_value = elastalert.ruletypes
rules_loader.load_configuration('test_config', test_config)
assert mock_import.call_args_list[0][0][0] == 'testing.test'
assert mock_import.call_args_list[0][0][3] == ['RuleType']
# Test that alerts are imported
test_rule_copy = copy.deepcopy(test_rule)
mock_open.return_value = test_rule_copy
test_rule_copy['alert'] = 'testing2.test2.Alerter'
with mock.patch('builtins.__import__') as mock_import:
mock_import.return_value = elastalert.alerts
rules_loader.load_configuration('test_config', test_config)
assert mock_import.call_args_list[0][0][0] == 'testing2.test2'
assert mock_import.call_args_list[0][0][3] == ['Alerter']
def test_import_import():
rules_loader = FileRulesLoader(test_config)
import_rule = copy.deepcopy(test_rule)
del(import_rule['es_host'])
del(import_rule['es_port'])
import_rule['import'] = 'importme.ymlt'
import_me = {
'es_host': 'imported_host',
'es_port': 12349,
'email': 'ignored@email', # overwritten by the email in import_rule
}
with mock.patch.object(rules_loader, 'get_yaml') as mock_open:
mock_open.side_effect = [import_rule, import_me]
rules = rules_loader.load_configuration('blah.yaml', test_config)
assert mock_open.call_args_list[0][0] == ('blah.yaml',)
assert mock_open.call_args_list[1][0] == ('importme.ymlt',)
assert len(mock_open.call_args_list) == 2
assert rules['es_port'] == 12349
assert rules['es_host'] == 'imported_host'
assert rules['email'] == ['[email protected]']
assert rules['filter'] == import_rule['filter']
# check global import_rule dependency
assert rules_loader.import_rules == {'blah.yaml': ['importme.ymlt']}
def test_import_absolute_import():
rules_loader = FileRulesLoader(test_config)
import_rule = copy.deepcopy(test_rule)
del(import_rule['es_host'])
del(import_rule['es_port'])
import_rule['import'] = '/importme.ymlt'
import_me = {
'es_host': 'imported_host',
'es_port': 12349,
'email': 'ignored@email', # overwritten by the email in import_rule
}
with mock.patch.object(rules_loader, 'get_yaml') as mock_open:
mock_open.side_effect = [import_rule, import_me]
rules = rules_loader.load_configuration('blah.yaml', test_config)
assert mock_open.call_args_list[0][0] == ('blah.yaml',)
assert mock_open.call_args_list[1][0] == ('/importme.ymlt',)
assert len(mock_open.call_args_list) == 2
assert rules['es_port'] == 12349
assert rules['es_host'] == 'imported_host'
assert rules['email'] == ['[email protected]']
assert rules['filter'] == import_rule['filter']
def test_import_filter():
# Check that if a filter is specified the rules are merged:
rules_loader = FileRulesLoader(test_config)
import_rule = copy.deepcopy(test_rule)
del(import_rule['es_host'])
del(import_rule['es_port'])
import_rule['import'] = 'importme.ymlt'
import_me = {
'es_host': 'imported_host',
'es_port': 12349,
'filter': [{'term': {'ratchet': 'clank'}}],
}
with mock.patch.object(rules_loader, 'get_yaml') as mock_open:
mock_open.side_effect = [import_rule, import_me]
rules = rules_loader.load_configuration('blah.yaml', test_config)
assert rules['filter'] == [{'term': {'ratchet': 'clank'}}, {'term': {'key': 'value'}}]
def test_load_inline_alert_rule():
rules_loader = FileRulesLoader(test_config)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy['alert'] = [
{
'email': {
'email': '[email protected]'
}
},
{
'email': {
'email': '[email protected]'
}
}
]
test_config_copy = copy.deepcopy(test_config)
with mock.patch.object(rules_loader, 'get_yaml') as mock_open:
mock_open.side_effect = [test_config_copy, test_rule_copy]
rules_loader.load_modules(test_rule_copy)
assert isinstance(test_rule_copy['alert'][0], elastalert.alerts.EmailAlerter)
assert isinstance(test_rule_copy['alert'][1], elastalert.alerts.EmailAlerter)
assert '[email protected]' in test_rule_copy['alert'][0].rule['email']
assert '[email protected]' in test_rule_copy['alert'][1].rule['email']
def test_file_rules_loader_get_names_recursive():
conf = {'scan_subdirectories': True, 'rules_folder': 'root'}
rules_loader = FileRulesLoader(conf)
walk_paths = (('root', ('folder_a', 'folder_b'), ('rule.yaml',)),
('root/folder_a', (), ('a.yaml', 'ab.yaml')),
('root/folder_b', (), ('b.yaml',)))
with mock.patch('os.walk') as mock_walk:
mock_walk.return_value = walk_paths
paths = rules_loader.get_names(conf)
paths = [p.replace(os.path.sep, '/') for p in paths]
assert 'root/rule.yaml' in paths
assert 'root/folder_a/a.yaml' in paths
assert 'root/folder_a/ab.yaml' in paths
assert 'root/folder_b/b.yaml' in paths
assert len(paths) == 4
def test_file_rules_loader_get_names():
# Check for no subdirectory
conf = {'scan_subdirectories': False, 'rules_folder': 'root'}
rules_loader = FileRulesLoader(conf)
files = ['badfile', 'a.yaml', 'b.yaml']
with mock.patch('os.listdir') as mock_list:
with mock.patch('os.path.isfile') as mock_path:
mock_path.return_value = True
mock_list.return_value = files
paths = rules_loader.get_names(conf)
paths = [p.replace(os.path.sep, '/') for p in paths]
assert 'root/a.yaml' in paths
assert 'root/b.yaml' in paths
assert len(paths) == 2
def test_load_rules():
test_rule_copy = copy.deepcopy(test_rule)
test_config_copy = copy.deepcopy(test_config)
with mock.patch('elastalert.config.yaml_loader') as mock_conf_open:
mock_conf_open.return_value = test_config_copy
with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open:
mock_rule_open.return_value = test_rule_copy
with mock.patch('os.walk') as mock_ls:
mock_ls.return_value = [('', [], ['testrule.yaml'])]
rules = load_conf(test_args)
rules['rules'] = rules['rules_loader'].load(rules)
assert isinstance(rules['rules'][0]['type'], elastalert.ruletypes.RuleType)
assert isinstance(rules['rules'][0]['alert'][0], elastalert.alerts.Alerter)
assert isinstance(rules['rules'][0]['timeframe'], datetime.timedelta)
assert isinstance(rules['run_every'], datetime.timedelta)
for included_key in ['comparekey', 'testkey', '@timestamp']:
assert included_key in rules['rules'][0]['include']
# Assert include doesn't contain duplicates
assert rules['rules'][0]['include'].count('@timestamp') == 1
assert rules['rules'][0]['include'].count('comparekey') == 1
def test_load_default_host_port():
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('es_host')
test_rule_copy.pop('es_port')
test_config_copy = copy.deepcopy(test_config)
with mock.patch('elastalert.config.yaml_loader') as mock_conf_open:
mock_conf_open.return_value = test_config_copy
with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open:
mock_rule_open.return_value = test_rule_copy
with mock.patch('os.walk') as mock_ls:
mock_ls.return_value = [('', [], ['testrule.yaml'])]
rules = load_conf(test_args)
rules['rules'] = rules['rules_loader'].load(rules)
# Assert include doesn't contain duplicates
assert rules['es_port'] == 12345
assert rules['es_host'] == 'elasticsearch.test'
def test_load_ssl_env_false():
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('es_host')
test_rule_copy.pop('es_port')
test_config_copy = copy.deepcopy(test_config)
with mock.patch('elastalert.config.yaml_loader') as mock_conf_open:
mock_conf_open.return_value = test_config_copy
with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open:
mock_rule_open.return_value = test_rule_copy
with mock.patch('os.listdir') as mock_ls:
with mock.patch.dict(os.environ, {'ES_USE_SSL': 'false'}):
mock_ls.return_value = ['testrule.yaml']
rules = load_conf(test_args)
rules['rules'] = rules['rules_loader'].load(rules)
assert rules['use_ssl'] is False
def test_load_ssl_env_true():
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('es_host')
test_rule_copy.pop('es_port')
test_config_copy = copy.deepcopy(test_config)
with mock.patch('elastalert.config.yaml_loader') as mock_conf_open:
mock_conf_open.return_value = test_config_copy
with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open:
mock_rule_open.return_value = test_rule_copy
with mock.patch('os.listdir') as mock_ls:
with mock.patch.dict(os.environ, {'ES_USE_SSL': 'true'}):
mock_ls.return_value = ['testrule.yaml']
rules = load_conf(test_args)
rules['rules'] = rules['rules_loader'].load(rules)
assert rules['use_ssl'] is True
def test_load_url_prefix_env():
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('es_host')
test_rule_copy.pop('es_port')
test_config_copy = copy.deepcopy(test_config)
with mock.patch('elastalert.config.yaml_loader') as mock_conf_open:
mock_conf_open.return_value = test_config_copy
with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open:
mock_rule_open.return_value = test_rule_copy
with mock.patch('os.listdir') as mock_ls:
with mock.patch.dict(os.environ, {'ES_URL_PREFIX': 'es/'}):
mock_ls.return_value = ['testrule.yaml']
rules = load_conf(test_args)
rules['rules'] = rules['rules_loader'].load(rules)
assert rules['es_url_prefix'] == 'es/'
def test_load_disabled_rules():
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy['is_enabled'] = False
test_config_copy = copy.deepcopy(test_config)
with mock.patch('elastalert.config.yaml_loader') as mock_conf_open:
mock_conf_open.return_value = test_config_copy
with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open:
mock_rule_open.return_value = test_rule_copy
with mock.patch('os.listdir') as mock_ls:
mock_ls.return_value = ['testrule.yaml']
rules = load_conf(test_args)
rules['rules'] = rules['rules_loader'].load(rules)
# The rule is not loaded for it has "is_enabled=False"
assert len(rules['rules']) == 0
def test_raises_on_missing_config():
optional_keys = ('aggregation', 'use_count_query', 'query_key', 'compare_key', 'filter', 'include', 'es_host', 'es_port', 'name')
test_rule_copy = copy.deepcopy(test_rule)
for key in list(test_rule_copy.keys()):
test_rule_copy = copy.deepcopy(test_rule)
test_config_copy = copy.deepcopy(test_config)
test_rule_copy.pop(key)
# Non required keys
if key in optional_keys:
continue
with mock.patch('elastalert.config.yaml_loader') as mock_conf_open:
mock_conf_open.return_value = test_config_copy
with mock.patch('elastalert.loaders.yaml_loader') as mock_rule_open:
mock_rule_open.return_value = test_rule_copy
with mock.patch('os.walk') as mock_walk:
mock_walk.return_value = [('', [], ['testrule.yaml'])]
with pytest.raises(EAException, message='key %s should be required' % key):
rules = load_conf(test_args)
rules['rules'] = rules['rules_loader'].load(rules)
def test_compound_query_key():
test_config_copy = copy.deepcopy(test_config)
rules_loader = FileRulesLoader(test_config_copy)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('use_count_query')
test_rule_copy['query_key'] = ['field1', 'field2']
rules_loader.load_options(test_rule_copy, test_config, 'filename.yaml')
assert 'field1' in test_rule_copy['include']
assert 'field2' in test_rule_copy['include']
assert test_rule_copy['query_key'] == 'field1,field2'
assert test_rule_copy['compound_query_key'] == ['field1', 'field2']
def test_query_key_with_single_value():
test_config_copy = copy.deepcopy(test_config)
rules_loader = FileRulesLoader(test_config_copy)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('use_count_query')
test_rule_copy['query_key'] = ['field1']
rules_loader.load_options(test_rule_copy, test_config, 'filename.yaml')
assert 'field1' in test_rule_copy['include']
assert test_rule_copy['query_key'] == 'field1'
assert 'compound_query_key' not in test_rule_copy
def test_query_key_with_no_values():
test_config_copy = copy.deepcopy(test_config)
rules_loader = FileRulesLoader(test_config_copy)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('use_count_query')
test_rule_copy['query_key'] = []
rules_loader.load_options(test_rule_copy, test_config, 'filename.yaml')
assert 'query_key' not in test_rule_copy
assert 'compound_query_key' not in test_rule_copy
def test_name_inference():
test_config_copy = copy.deepcopy(test_config)
rules_loader = FileRulesLoader(test_config_copy)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy.pop('name')
rules_loader.load_options(test_rule_copy, test_config, 'msmerc woz ere.yaml')
assert test_rule_copy['name'] == 'msmerc woz ere'
def test_raises_on_bad_generate_kibana_filters():
test_rule['generate_kibana_link'] = True
bad_filters = [[{'not': {'terms': {'blah': 'blah'}}}],
[{'terms': {'blah': 'blah'}}],
[{'query': {'not_querystring': 'this:that'}}],
[{'query': {'wildcard': 'this*that'}}],
[{'blah': 'blah'}]]
good_filters = [[{'term': {'field': 'value'}}],
[{'not': {'term': {'this': 'that'}}}],
[{'not': {'query': {'query_string': {'query': 'this:that'}}}}],
[{'query': {'query_string': {'query': 'this:that'}}}],
[{'range': {'blah': {'from': 'a', 'to': 'b'}}}],
[{'not': {'range': {'blah': {'from': 'a', 'to': 'b'}}}}]]
# Test that all the good filters work, but fail with a bad filter added
for good in good_filters:
test_config_copy = copy.deepcopy(test_config)
rules_loader = FileRulesLoader(test_config_copy)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy['filter'] = good
with mock.patch.object(rules_loader, 'get_yaml') as mock_open:
mock_open.return_value = test_rule_copy
rules_loader.load_configuration('blah', test_config)
for bad in bad_filters:
test_rule_copy['filter'] = good + bad
with pytest.raises(EAException):
rules_loader.load_configuration('blah', test_config)
def test_kibana_discover_from_timedelta():
test_config_copy = copy.deepcopy(test_config)
rules_loader = FileRulesLoader(test_config_copy)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy['kibana_discover_from_timedelta'] = {'minutes': 2}
rules_loader.load_options(test_rule_copy, test_config, 'filename.yaml')
assert isinstance(test_rule_copy['kibana_discover_from_timedelta'], datetime.timedelta)
assert test_rule_copy['kibana_discover_from_timedelta'] == datetime.timedelta(minutes=2)
def test_kibana_discover_to_timedelta():
test_config_copy = copy.deepcopy(test_config)
rules_loader = FileRulesLoader(test_config_copy)
test_rule_copy = copy.deepcopy(test_rule)
test_rule_copy['kibana_discover_to_timedelta'] = {'minutes': 2}
rules_loader.load_options(test_rule_copy, test_config, 'filename.yaml')
assert isinstance(test_rule_copy['kibana_discover_to_timedelta'], datetime.timedelta)
assert test_rule_copy['kibana_discover_to_timedelta'] == datetime.timedelta(minutes=2)
| apache-2.0 | -8,357,822,607,501,854,000 | -3,346,323,918,417,699,000 | 41.256818 | 133 | 0.60313 | false |
nelmiux/CarnotKE | jyhton/lib-python/2.7/test/test_datetime.py | 72 | 134923 | """Test date/time type.
See http://www.zope.org/Members/fdrake/DateTimeWiki/TestCases
"""
from __future__ import division
import sys
import pickle
import cPickle
import unittest
from test import test_support
from datetime import MINYEAR, MAXYEAR
from datetime import timedelta
from datetime import tzinfo
from datetime import time
from datetime import date, datetime
pickle_choices = [(pickler, unpickler, proto)
for pickler in pickle, cPickle
for unpickler in pickle, cPickle
for proto in range(3)]
assert len(pickle_choices) == 2*2*3
# An arbitrary collection of objects of non-datetime types, for testing
# mixed-type comparisons.
OTHERSTUFF = (10, 10L, 34.5, "abc", {}, [], ())
#############################################################################
# module tests
class TestModule(unittest.TestCase):
def test_constants(self):
import datetime
self.assertEqual(datetime.MINYEAR, 1)
self.assertEqual(datetime.MAXYEAR, 9999)
#############################################################################
# tzinfo tests
class FixedOffset(tzinfo):
def __init__(self, offset, name, dstoffset=42):
if isinstance(offset, int):
offset = timedelta(minutes=offset)
if isinstance(dstoffset, int):
dstoffset = timedelta(minutes=dstoffset)
self.__offset = offset
self.__name = name
self.__dstoffset = dstoffset
def __repr__(self):
return self.__name.lower()
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return self.__dstoffset
class PicklableFixedOffset(FixedOffset):
def __init__(self, offset=None, name=None, dstoffset=None):
FixedOffset.__init__(self, offset, name, dstoffset)
class TestTZInfo(unittest.TestCase):
def test_non_abstractness(self):
# In order to allow subclasses to get pickled, the C implementation
# wasn't able to get away with having __init__ raise
# NotImplementedError.
useless = tzinfo()
dt = datetime.max
self.assertRaises(NotImplementedError, useless.tzname, dt)
self.assertRaises(NotImplementedError, useless.utcoffset, dt)
self.assertRaises(NotImplementedError, useless.dst, dt)
def test_subclass_must_override(self):
class NotEnough(tzinfo):
def __init__(self, offset, name):
self.__offset = offset
self.__name = name
self.assertTrue(issubclass(NotEnough, tzinfo))
ne = NotEnough(3, "NotByALongShot")
self.assertIsInstance(ne, tzinfo)
dt = datetime.now()
self.assertRaises(NotImplementedError, ne.tzname, dt)
self.assertRaises(NotImplementedError, ne.utcoffset, dt)
self.assertRaises(NotImplementedError, ne.dst, dt)
def test_normal(self):
fo = FixedOffset(3, "Three")
self.assertIsInstance(fo, tzinfo)
for dt in datetime.now(), None:
self.assertEqual(fo.utcoffset(dt), timedelta(minutes=3))
self.assertEqual(fo.tzname(dt), "Three")
self.assertEqual(fo.dst(dt), timedelta(minutes=42))
def test_pickling_base(self):
# There's no point to pickling tzinfo objects on their own (they
# carry no data), but they need to be picklable anyway else
# concrete subclasses can't be pickled.
orig = tzinfo.__new__(tzinfo)
self.assertTrue(type(orig) is tzinfo)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertTrue(type(derived) is tzinfo)
def test_pickling_subclass(self):
# Make sure we can pickle/unpickle an instance of a subclass.
offset = timedelta(minutes=-300)
orig = PicklableFixedOffset(offset, 'cookie')
self.assertIsInstance(orig, tzinfo)
self.assertTrue(type(orig) is PicklableFixedOffset)
self.assertEqual(orig.utcoffset(None), offset)
self.assertEqual(orig.tzname(None), 'cookie')
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertIsInstance(derived, tzinfo)
self.assertTrue(type(derived) is PicklableFixedOffset)
self.assertEqual(derived.utcoffset(None), offset)
self.assertEqual(derived.tzname(None), 'cookie')
#############################################################################
# Base clase for testing a particular aspect of timedelta, time, date and
# datetime comparisons.
class HarmlessMixedComparison:
# Test that __eq__ and __ne__ don't complain for mixed-type comparisons.
# Subclasses must define 'theclass', and theclass(1, 1, 1) must be a
# legit constructor.
def test_harmless_mixed_comparison(self):
me = self.theclass(1, 1, 1)
self.assertFalse(me == ())
self.assertTrue(me != ())
self.assertFalse(() == me)
self.assertTrue(() != me)
self.assertIn(me, [1, 20L, [], me])
self.assertIn([], [me, 1, 20L, []])
def test_harmful_mixed_comparison(self):
me = self.theclass(1, 1, 1)
self.assertRaises(TypeError, lambda: me < ())
self.assertRaises(TypeError, lambda: me <= ())
self.assertRaises(TypeError, lambda: me > ())
self.assertRaises(TypeError, lambda: me >= ())
self.assertRaises(TypeError, lambda: () < me)
self.assertRaises(TypeError, lambda: () <= me)
self.assertRaises(TypeError, lambda: () > me)
self.assertRaises(TypeError, lambda: () >= me)
self.assertRaises(TypeError, cmp, (), me)
self.assertRaises(TypeError, cmp, me, ())
#############################################################################
# timedelta tests
class TestTimeDelta(HarmlessMixedComparison, unittest.TestCase):
theclass = timedelta
def test_constructor(self):
eq = self.assertEqual
td = timedelta
# Check keyword args to constructor
eq(td(), td(weeks=0, days=0, hours=0, minutes=0, seconds=0,
milliseconds=0, microseconds=0))
eq(td(1), td(days=1))
eq(td(0, 1), td(seconds=1))
eq(td(0, 0, 1), td(microseconds=1))
eq(td(weeks=1), td(days=7))
eq(td(days=1), td(hours=24))
eq(td(hours=1), td(minutes=60))
eq(td(minutes=1), td(seconds=60))
eq(td(seconds=1), td(milliseconds=1000))
eq(td(milliseconds=1), td(microseconds=1000))
# Check float args to constructor
eq(td(weeks=1.0/7), td(days=1))
eq(td(days=1.0/24), td(hours=1))
eq(td(hours=1.0/60), td(minutes=1))
eq(td(minutes=1.0/60), td(seconds=1))
eq(td(seconds=0.001), td(milliseconds=1))
eq(td(milliseconds=0.001), td(microseconds=1))
def test_computations(self):
eq = self.assertEqual
td = timedelta
a = td(7) # One week
b = td(0, 60) # One minute
c = td(0, 0, 1000) # One millisecond
eq(a+b+c, td(7, 60, 1000))
eq(a-b, td(6, 24*3600 - 60))
eq(-a, td(-7))
eq(+a, td(7))
eq(-b, td(-1, 24*3600 - 60))
eq(-c, td(-1, 24*3600 - 1, 999000))
eq(abs(a), a)
eq(abs(-a), a)
eq(td(6, 24*3600), a)
eq(td(0, 0, 60*1000000), b)
eq(a*10, td(70))
eq(a*10, 10*a)
eq(a*10L, 10*a)
eq(b*10, td(0, 600))
eq(10*b, td(0, 600))
eq(b*10L, td(0, 600))
eq(c*10, td(0, 0, 10000))
eq(10*c, td(0, 0, 10000))
eq(c*10L, td(0, 0, 10000))
eq(a*-1, -a)
eq(b*-2, -b-b)
eq(c*-2, -c+-c)
eq(b*(60*24), (b*60)*24)
eq(b*(60*24), (60*b)*24)
eq(c*1000, td(0, 1))
eq(1000*c, td(0, 1))
eq(a//7, td(1))
eq(b//10, td(0, 6))
eq(c//1000, td(0, 0, 1))
eq(a//10, td(0, 7*24*360))
eq(a//3600000, td(0, 0, 7*24*1000))
# Issue #11576
eq(td(999999999, 86399, 999999) - td(999999999, 86399, 999998),
td(0, 0, 1))
eq(td(999999999, 1, 1) - td(999999999, 1, 0),
td(0, 0, 1))
def test_disallowed_computations(self):
a = timedelta(42)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# Mul/div by float isn't supported.
x = 2.3
self.assertRaises(TypeError, lambda: a*x)
self.assertRaises(TypeError, lambda: x*a)
self.assertRaises(TypeError, lambda: a/x)
self.assertRaises(TypeError, lambda: x/a)
self.assertRaises(TypeError, lambda: a // x)
self.assertRaises(TypeError, lambda: x // a)
# Division of int by timedelta doesn't make sense.
# Division by zero doesn't make sense.
for zero in 0, 0L:
self.assertRaises(TypeError, lambda: zero // a)
self.assertRaises(ZeroDivisionError, lambda: a // zero)
def test_basic_attributes(self):
days, seconds, us = 1, 7, 31
td = timedelta(days, seconds, us)
self.assertEqual(td.days, days)
self.assertEqual(td.seconds, seconds)
self.assertEqual(td.microseconds, us)
def test_total_seconds(self):
td = timedelta(days=365)
self.assertEqual(td.total_seconds(), 31536000.0)
for total_seconds in [123456.789012, -123456.789012, 0.123456, 0, 1e6]:
td = timedelta(seconds=total_seconds)
self.assertEqual(td.total_seconds(), total_seconds)
# Issue8644: Test that td.total_seconds() has the same
# accuracy as td / timedelta(seconds=1).
for ms in [-1, -2, -123]:
td = timedelta(microseconds=ms)
self.assertEqual(td.total_seconds(),
((24*3600*td.days + td.seconds)*10**6
+ td.microseconds)/10**6)
def test_carries(self):
t1 = timedelta(days=100,
weeks=-7,
hours=-24*(100-49),
minutes=-3,
seconds=12,
microseconds=(3*60 - 12) * 1e6 + 1)
t2 = timedelta(microseconds=1)
self.assertEqual(t1, t2)
def test_hash_equality(self):
t1 = timedelta(days=100,
weeks=-7,
hours=-24*(100-49),
minutes=-3,
seconds=12,
microseconds=(3*60 - 12) * 1000000)
t2 = timedelta()
self.assertEqual(hash(t1), hash(t2))
t1 += timedelta(weeks=7)
t2 += timedelta(days=7*7)
self.assertEqual(t1, t2)
self.assertEqual(hash(t1), hash(t2))
d = {t1: 1}
d[t2] = 2
self.assertEqual(len(d), 1)
self.assertEqual(d[t1], 2)
def test_pickling(self):
args = 12, 34, 56
orig = timedelta(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_compare(self):
t1 = timedelta(2, 3, 4)
t2 = timedelta(2, 3, 4)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertTrue(not t1 != t2)
self.assertTrue(not t1 < t2)
self.assertTrue(not t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for args in (3, 3, 3), (2, 4, 4), (2, 3, 5):
t2 = timedelta(*args) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertTrue(not t1 == t2)
self.assertTrue(not t2 == t1)
self.assertTrue(not t1 > t2)
self.assertTrue(not t2 < t1)
self.assertTrue(not t1 >= t2)
self.assertTrue(not t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 <= badarg)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_str(self):
td = timedelta
eq = self.assertEqual
eq(str(td(1)), "1 day, 0:00:00")
eq(str(td(-1)), "-1 day, 0:00:00")
eq(str(td(2)), "2 days, 0:00:00")
eq(str(td(-2)), "-2 days, 0:00:00")
eq(str(td(hours=12, minutes=58, seconds=59)), "12:58:59")
eq(str(td(hours=2, minutes=3, seconds=4)), "2:03:04")
eq(str(td(weeks=-30, hours=23, minutes=12, seconds=34)),
"-210 days, 23:12:34")
eq(str(td(milliseconds=1)), "0:00:00.001000")
eq(str(td(microseconds=3)), "0:00:00.000003")
eq(str(td(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)),
"999999999 days, 23:59:59.999999")
def test_roundtrip(self):
for td in (timedelta(days=999999999, hours=23, minutes=59,
seconds=59, microseconds=999999),
timedelta(days=-999999999),
timedelta(days=1, seconds=2, microseconds=3)):
# Verify td -> string -> td identity.
s = repr(td)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
td2 = eval(s)
self.assertEqual(td, td2)
# Verify identity via reconstructing from pieces.
td2 = timedelta(td.days, td.seconds, td.microseconds)
self.assertEqual(td, td2)
def test_resolution_info(self):
self.assertIsInstance(timedelta.min, timedelta)
self.assertIsInstance(timedelta.max, timedelta)
self.assertIsInstance(timedelta.resolution, timedelta)
self.assertTrue(timedelta.max > timedelta.min)
self.assertEqual(timedelta.min, timedelta(-999999999))
self.assertEqual(timedelta.max, timedelta(999999999, 24*3600-1, 1e6-1))
self.assertEqual(timedelta.resolution, timedelta(0, 0, 1))
def test_overflow(self):
tiny = timedelta.resolution
td = timedelta.min + tiny
td -= tiny # no problem
self.assertRaises(OverflowError, td.__sub__, tiny)
self.assertRaises(OverflowError, td.__add__, -tiny)
td = timedelta.max - tiny
td += tiny # no problem
self.assertRaises(OverflowError, td.__add__, tiny)
self.assertRaises(OverflowError, td.__sub__, -tiny)
self.assertRaises(OverflowError, lambda: -timedelta.max)
def test_microsecond_rounding(self):
td = timedelta
eq = self.assertEqual
# Single-field rounding.
eq(td(milliseconds=0.4/1000), td(0)) # rounds to 0
eq(td(milliseconds=-0.4/1000), td(0)) # rounds to 0
eq(td(milliseconds=0.6/1000), td(microseconds=1))
eq(td(milliseconds=-0.6/1000), td(microseconds=-1))
# Rounding due to contributions from more than one field.
us_per_hour = 3600e6
us_per_day = us_per_hour * 24
eq(td(days=.4/us_per_day), td(0))
eq(td(hours=.2/us_per_hour), td(0))
eq(td(days=.4/us_per_day, hours=.2/us_per_hour), td(microseconds=1))
eq(td(days=-.4/us_per_day), td(0))
eq(td(hours=-.2/us_per_hour), td(0))
eq(td(days=-.4/us_per_day, hours=-.2/us_per_hour), td(microseconds=-1))
def test_massive_normalization(self):
td = timedelta(microseconds=-1)
self.assertEqual((td.days, td.seconds, td.microseconds),
(-1, 24*3600-1, 999999))
def test_bool(self):
self.assertTrue(timedelta(1))
self.assertTrue(timedelta(0, 1))
self.assertTrue(timedelta(0, 0, 1))
self.assertTrue(timedelta(microseconds=1))
self.assertTrue(not timedelta(0))
def test_subclass_timedelta(self):
class T(timedelta):
@staticmethod
def from_td(td):
return T(td.days, td.seconds, td.microseconds)
def as_hours(self):
sum = (self.days * 24 +
self.seconds / 3600.0 +
self.microseconds / 3600e6)
return round(sum)
t1 = T(days=1)
self.assertTrue(type(t1) is T)
self.assertEqual(t1.as_hours(), 24)
t2 = T(days=-1, seconds=-3600)
self.assertTrue(type(t2) is T)
self.assertEqual(t2.as_hours(), -25)
t3 = t1 + t2
self.assertTrue(type(t3) is timedelta)
t4 = T.from_td(t3)
self.assertTrue(type(t4) is T)
self.assertEqual(t3.days, t4.days)
self.assertEqual(t3.seconds, t4.seconds)
self.assertEqual(t3.microseconds, t4.microseconds)
self.assertEqual(str(t3), str(t4))
self.assertEqual(t4.as_hours(), -1)
#############################################################################
# date tests
class TestDateOnly(unittest.TestCase):
# Tests here won't pass if also run on datetime objects, so don't
# subclass this to test datetimes too.
def test_delta_non_days_ignored(self):
dt = date(2000, 1, 2)
delta = timedelta(days=1, hours=2, minutes=3, seconds=4,
microseconds=5)
days = timedelta(delta.days)
self.assertEqual(days, timedelta(1))
dt2 = dt + delta
self.assertEqual(dt2, dt + days)
dt2 = delta + dt
self.assertEqual(dt2, dt + days)
dt2 = dt - delta
self.assertEqual(dt2, dt - days)
delta = -delta
days = timedelta(delta.days)
self.assertEqual(days, timedelta(-2))
dt2 = dt + delta
self.assertEqual(dt2, dt + days)
dt2 = delta + dt
self.assertEqual(dt2, dt + days)
dt2 = dt - delta
self.assertEqual(dt2, dt - days)
class SubclassDate(date):
sub_var = 1
class TestDate(HarmlessMixedComparison, unittest.TestCase):
# Tests here should pass for both dates and datetimes, except for a
# few tests that TestDateTime overrides.
theclass = date
def test_basic_attributes(self):
dt = self.theclass(2002, 3, 1)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
def test_roundtrip(self):
for dt in (self.theclass(1, 2, 3),
self.theclass.today()):
# Verify dt -> string -> date identity.
s = repr(dt)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
dt2 = eval(s)
self.assertEqual(dt, dt2)
# Verify identity via reconstructing from pieces.
dt2 = self.theclass(dt.year, dt.month, dt.day)
self.assertEqual(dt, dt2)
def test_ordinal_conversions(self):
# Check some fixed values.
for y, m, d, n in [(1, 1, 1, 1), # calendar origin
(1, 12, 31, 365),
(2, 1, 1, 366),
# first example from "Calendrical Calculations"
(1945, 11, 12, 710347)]:
d = self.theclass(y, m, d)
self.assertEqual(n, d.toordinal())
fromord = self.theclass.fromordinal(n)
self.assertEqual(d, fromord)
if hasattr(fromord, "hour"):
# if we're checking something fancier than a date, verify
# the extra fields have been zeroed out
self.assertEqual(fromord.hour, 0)
self.assertEqual(fromord.minute, 0)
self.assertEqual(fromord.second, 0)
self.assertEqual(fromord.microsecond, 0)
# Check first and last days of year spottily across the whole
# range of years supported.
for year in xrange(MINYEAR, MAXYEAR+1, 7):
# Verify (year, 1, 1) -> ordinal -> y, m, d is identity.
d = self.theclass(year, 1, 1)
n = d.toordinal()
d2 = self.theclass.fromordinal(n)
self.assertEqual(d, d2)
# Verify that moving back a day gets to the end of year-1.
if year > 1:
d = self.theclass.fromordinal(n-1)
d2 = self.theclass(year-1, 12, 31)
self.assertEqual(d, d2)
self.assertEqual(d2.toordinal(), n-1)
# Test every day in a leap-year and a non-leap year.
dim = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
for year, isleap in (2000, True), (2002, False):
n = self.theclass(year, 1, 1).toordinal()
for month, maxday in zip(range(1, 13), dim):
if month == 2 and isleap:
maxday += 1
for day in range(1, maxday+1):
d = self.theclass(year, month, day)
self.assertEqual(d.toordinal(), n)
self.assertEqual(d, self.theclass.fromordinal(n))
n += 1
def test_extreme_ordinals(self):
a = self.theclass.min
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord - 1))
b = a + timedelta(days=1)
self.assertEqual(b.toordinal(), aord + 1)
self.assertEqual(b, self.theclass.fromordinal(aord + 1))
a = self.theclass.max
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord + 1))
b = a - timedelta(days=1)
self.assertEqual(b.toordinal(), aord - 1)
self.assertEqual(b, self.theclass.fromordinal(aord - 1))
def test_bad_constructor_arguments(self):
# bad years
self.theclass(MINYEAR, 1, 1) # no exception
self.theclass(MAXYEAR, 1, 1) # no exception
self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1)
self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1)
# bad months
self.theclass(2000, 1, 1) # no exception
self.theclass(2000, 12, 1) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 0, 1)
self.assertRaises(ValueError, self.theclass, 2000, 13, 1)
# bad days
self.theclass(2000, 2, 29) # no exception
self.theclass(2004, 2, 29) # no exception
self.theclass(2400, 2, 29) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 2, 30)
self.assertRaises(ValueError, self.theclass, 2001, 2, 29)
self.assertRaises(ValueError, self.theclass, 2100, 2, 29)
self.assertRaises(ValueError, self.theclass, 1900, 2, 29)
self.assertRaises(ValueError, self.theclass, 2000, 1, 0)
self.assertRaises(ValueError, self.theclass, 2000, 1, 32)
def test_hash_equality(self):
d = self.theclass(2000, 12, 31)
# same thing
e = self.theclass(2000, 12, 31)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(2001, 1, 1)
# same thing
e = self.theclass(2001, 1, 1)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_computations(self):
a = self.theclass(2002, 1, 31)
b = self.theclass(1956, 1, 31)
diff = a-b
self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4)))
self.assertEqual(diff.seconds, 0)
self.assertEqual(diff.microseconds, 0)
day = timedelta(1)
week = timedelta(7)
a = self.theclass(2002, 3, 2)
self.assertEqual(a + day, self.theclass(2002, 3, 3))
self.assertEqual(day + a, self.theclass(2002, 3, 3))
self.assertEqual(a - day, self.theclass(2002, 3, 1))
self.assertEqual(-day + a, self.theclass(2002, 3, 1))
self.assertEqual(a + week, self.theclass(2002, 3, 9))
self.assertEqual(a - week, self.theclass(2002, 2, 23))
self.assertEqual(a + 52*week, self.theclass(2003, 3, 1))
self.assertEqual(a - 52*week, self.theclass(2001, 3, 3))
self.assertEqual((a + week) - a, week)
self.assertEqual((a + day) - a, day)
self.assertEqual((a - week) - a, -week)
self.assertEqual((a - day) - a, -day)
self.assertEqual(a - (a + week), -week)
self.assertEqual(a - (a + day), -day)
self.assertEqual(a - (a - week), week)
self.assertEqual(a - (a - day), day)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# delta - date is senseless.
self.assertRaises(TypeError, lambda: day - a)
# mixing date and (delta or date) via * or // is senseless
self.assertRaises(TypeError, lambda: day * a)
self.assertRaises(TypeError, lambda: a * day)
self.assertRaises(TypeError, lambda: day // a)
self.assertRaises(TypeError, lambda: a // day)
self.assertRaises(TypeError, lambda: a * a)
self.assertRaises(TypeError, lambda: a // a)
# date + date is senseless
self.assertRaises(TypeError, lambda: a + a)
def test_overflow(self):
tiny = self.theclass.resolution
for delta in [tiny, timedelta(1), timedelta(2)]:
dt = self.theclass.min + delta
dt -= delta # no problem
self.assertRaises(OverflowError, dt.__sub__, delta)
self.assertRaises(OverflowError, dt.__add__, -delta)
dt = self.theclass.max - delta
dt += delta # no problem
self.assertRaises(OverflowError, dt.__add__, delta)
self.assertRaises(OverflowError, dt.__sub__, -delta)
def test_fromtimestamp(self):
import time
# Try an arbitrary fixed value.
year, month, day = 1999, 9, 19
ts = time.mktime((year, month, day, 0, 0, 0, 0, 0, -1))
d = self.theclass.fromtimestamp(ts)
self.assertEqual(d.year, year)
self.assertEqual(d.month, month)
self.assertEqual(d.day, day)
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.fromtimestamp,
insane)
def test_today(self):
import time
# We claim that today() is like fromtimestamp(time.time()), so
# prove it.
for dummy in range(3):
today = self.theclass.today()
ts = time.time()
todayagain = self.theclass.fromtimestamp(ts)
if today == todayagain:
break
# There are several legit reasons that could fail:
# 1. It recently became midnight, between the today() and the
# time() calls.
# 2. The platform time() has such fine resolution that we'll
# never get the same value twice.
# 3. The platform time() has poor resolution, and we just
# happened to call today() right before a resolution quantum
# boundary.
# 4. The system clock got fiddled between calls.
# In any case, wait a little while and try again.
time.sleep(0.1)
# It worked or it didn't. If it didn't, assume it's reason #2, and
# let the test pass if they're within half a second of each other.
self.assertTrue(today == todayagain or
abs(todayagain - today) < timedelta(seconds=0.5))
def test_weekday(self):
for i in range(7):
# March 4, 2002 is a Monday
self.assertEqual(self.theclass(2002, 3, 4+i).weekday(), i)
self.assertEqual(self.theclass(2002, 3, 4+i).isoweekday(), i+1)
# January 2, 1956 is a Monday
self.assertEqual(self.theclass(1956, 1, 2+i).weekday(), i)
self.assertEqual(self.theclass(1956, 1, 2+i).isoweekday(), i+1)
def test_isocalendar(self):
# Check examples from
# http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
for i in range(7):
d = self.theclass(2003, 12, 22+i)
self.assertEqual(d.isocalendar(), (2003, 52, i+1))
d = self.theclass(2003, 12, 29) + timedelta(i)
self.assertEqual(d.isocalendar(), (2004, 1, i+1))
d = self.theclass(2004, 1, 5+i)
self.assertEqual(d.isocalendar(), (2004, 2, i+1))
d = self.theclass(2009, 12, 21+i)
self.assertEqual(d.isocalendar(), (2009, 52, i+1))
d = self.theclass(2009, 12, 28) + timedelta(i)
self.assertEqual(d.isocalendar(), (2009, 53, i+1))
d = self.theclass(2010, 1, 4+i)
self.assertEqual(d.isocalendar(), (2010, 1, i+1))
def test_iso_long_years(self):
# Calculate long ISO years and compare to table from
# http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
ISO_LONG_YEARS_TABLE = """
4 32 60 88
9 37 65 93
15 43 71 99
20 48 76
26 54 82
105 133 161 189
111 139 167 195
116 144 172
122 150 178
128 156 184
201 229 257 285
207 235 263 291
212 240 268 296
218 246 274
224 252 280
303 331 359 387
308 336 364 392
314 342 370 398
320 348 376
325 353 381
"""
iso_long_years = map(int, ISO_LONG_YEARS_TABLE.split())
iso_long_years.sort()
L = []
for i in range(400):
d = self.theclass(2000+i, 12, 31)
d1 = self.theclass(1600+i, 12, 31)
self.assertEqual(d.isocalendar()[1:], d1.isocalendar()[1:])
if d.isocalendar()[1] == 53:
L.append(i)
self.assertEqual(L, iso_long_years)
def test_isoformat(self):
t = self.theclass(2, 3, 2)
self.assertEqual(t.isoformat(), "0002-03-02")
def test_ctime(self):
t = self.theclass(2002, 3, 2)
self.assertEqual(t.ctime(), "Sat Mar 2 00:00:00 2002")
def test_strftime(self):
t = self.theclass(2005, 3, 2)
self.assertEqual(t.strftime("m:%m d:%d y:%y"), "m:03 d:02 y:05")
self.assertEqual(t.strftime(""), "") # SF bug #761337
self.assertEqual(t.strftime('x'*1000), 'x'*1000) # SF bug #1556784
self.assertRaises(TypeError, t.strftime) # needs an arg
self.assertRaises(TypeError, t.strftime, "one", "two") # too many args
self.assertRaises(TypeError, t.strftime, 42) # arg wrong type
# test that unicode input is allowed (issue 2782)
self.assertEqual(t.strftime(u"%m"), "03")
# A naive object replaces %z and %Z w/ empty strings.
self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''")
#make sure that invalid format specifiers are handled correctly
#self.assertRaises(ValueError, t.strftime, "%e")
#self.assertRaises(ValueError, t.strftime, "%")
#self.assertRaises(ValueError, t.strftime, "%#")
#oh well, some systems just ignore those invalid ones.
#at least, excercise them to make sure that no crashes
#are generated
for f in ["%e", "%", "%#"]:
try:
t.strftime(f)
except ValueError:
pass
#check that this standard extension works
t.strftime("%f")
def test_format(self):
dt = self.theclass(2007, 9, 10)
self.assertEqual(dt.__format__(''), str(dt))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(2007, 9, 10)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(2007, 9, 10)
self.assertEqual(b.__format__(''), str(dt))
for fmt in ["m:%m d:%d y:%y",
"m:%m d:%d y:%y H:%H M:%M S:%S",
"%z %Z",
]:
self.assertEqual(dt.__format__(fmt), dt.strftime(fmt))
self.assertEqual(a.__format__(fmt), dt.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_resolution_info(self):
self.assertIsInstance(self.theclass.min, self.theclass)
self.assertIsInstance(self.theclass.max, self.theclass)
self.assertIsInstance(self.theclass.resolution, timedelta)
self.assertTrue(self.theclass.max > self.theclass.min)
def test_extreme_timedelta(self):
big = self.theclass.max - self.theclass.min
# 3652058 days, 23 hours, 59 minutes, 59 seconds, 999999 microseconds
n = (big.days*24*3600 + big.seconds)*1000000 + big.microseconds
# n == 315537897599999999 ~= 2**58.13
justasbig = timedelta(0, 0, n)
self.assertEqual(big, justasbig)
self.assertEqual(self.theclass.min + big, self.theclass.max)
self.assertEqual(self.theclass.max - big, self.theclass.min)
def test_timetuple(self):
for i in range(7):
# January 2, 1956 is a Monday (0)
d = self.theclass(1956, 1, 2+i)
t = d.timetuple()
self.assertEqual(t, (1956, 1, 2+i, 0, 0, 0, i, 2+i, -1))
# February 1, 1956 is a Wednesday (2)
d = self.theclass(1956, 2, 1+i)
t = d.timetuple()
self.assertEqual(t, (1956, 2, 1+i, 0, 0, 0, (2+i)%7, 32+i, -1))
# March 1, 1956 is a Thursday (3), and is the 31+29+1 = 61st day
# of the year.
d = self.theclass(1956, 3, 1+i)
t = d.timetuple()
self.assertEqual(t, (1956, 3, 1+i, 0, 0, 0, (3+i)%7, 61+i, -1))
self.assertEqual(t.tm_year, 1956)
self.assertEqual(t.tm_mon, 3)
self.assertEqual(t.tm_mday, 1+i)
self.assertEqual(t.tm_hour, 0)
self.assertEqual(t.tm_min, 0)
self.assertEqual(t.tm_sec, 0)
self.assertEqual(t.tm_wday, (3+i)%7)
self.assertEqual(t.tm_yday, 61+i)
self.assertEqual(t.tm_isdst, -1)
def test_pickling(self):
args = 6, 7, 23
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_compare(self):
t1 = self.theclass(2, 3, 4)
t2 = self.theclass(2, 3, 4)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertTrue(not t1 != t2)
self.assertTrue(not t1 < t2)
self.assertTrue(not t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for args in (3, 3, 3), (2, 4, 4), (2, 3, 5):
t2 = self.theclass(*args) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertTrue(not t1 == t2)
self.assertTrue(not t2 == t1)
self.assertTrue(not t1 > t2)
self.assertTrue(not t2 < t1)
self.assertTrue(not t1 >= t2)
self.assertTrue(not t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_mixed_compare(self):
our = self.theclass(2000, 4, 5)
self.assertRaises(TypeError, cmp, our, 1)
self.assertRaises(TypeError, cmp, 1, our)
class AnotherDateTimeClass(object):
def __cmp__(self, other):
# Return "equal" so calling this can't be confused with
# compare-by-address (which never says "equal" for distinct
# objects).
return 0
__hash__ = None # Silence Py3k warning
# This still errors, because date and datetime comparison raise
# TypeError instead of NotImplemented when they don't know what to
# do, in order to stop comparison from falling back to the default
# compare-by-address.
their = AnotherDateTimeClass()
self.assertRaises(TypeError, cmp, our, their)
# Oops: The next stab raises TypeError in the C implementation,
# but not in the Python implementation of datetime. The difference
# is due to that the Python implementation defines __cmp__ but
# the C implementation defines tp_richcompare. This is more pain
# to fix than it's worth, so commenting out the test.
# self.assertEqual(cmp(their, our), 0)
# But date and datetime comparison return NotImplemented instead if the
# other object has a timetuple attr. This gives the other object a
# chance to do the comparison.
class Comparable(AnotherDateTimeClass):
def timetuple(self):
return ()
their = Comparable()
self.assertEqual(cmp(our, their), 0)
self.assertEqual(cmp(their, our), 0)
self.assertTrue(our == their)
self.assertTrue(their == our)
def test_bool(self):
# All dates are considered true.
self.assertTrue(self.theclass.min)
self.assertTrue(self.theclass.max)
def test_strftime_out_of_range(self):
# For nasty technical reasons, we can't handle years before 1900.
cls = self.theclass
self.assertEqual(cls(1900, 1, 1).strftime("%Y"), "1900")
for y in 1, 49, 51, 99, 100, 1000, 1899:
self.assertRaises(ValueError, cls(y, 1, 1).strftime, "%Y")
def test_replace(self):
cls = self.theclass
args = [1, 2, 3]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_subclass_date(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.year + self.month
args = 2003, 4, 14
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.toordinal(), dt2.toordinal())
self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month - 7)
def test_pickling_subclass_date(self):
args = 6, 7, 23
orig = SubclassDate(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_backdoor_resistance(self):
# For fast unpickling, the constructor accepts a pickle string.
# This is a low-overhead backdoor. A user can (by intent or
# mistake) pass a string directly, which (if it's the right length)
# will get treated like a pickle, and bypass the normal sanity
# checks in the constructor. This can create insane objects.
# The constructor doesn't want to burn the time to validate all
# fields, but does check the month field. This stops, e.g.,
# datetime.datetime('1995-03-25') from yielding an insane object.
base = '1995-03-25'
if not issubclass(self.theclass, datetime):
base = base[:4]
for month_byte in '9', chr(0), chr(13), '\xff':
self.assertRaises(TypeError, self.theclass,
base[:2] + month_byte + base[3:])
for ord_byte in range(1, 13):
# This shouldn't blow up because of the month byte alone. If
# the implementation changes to do more-careful checking, it may
# blow up because other fields are insane.
self.theclass(base[:2] + chr(ord_byte) + base[3:])
#############################################################################
# datetime tests
class SubclassDatetime(datetime):
sub_var = 1
class TestDateTime(TestDate):
theclass = datetime
def test_basic_attributes(self):
dt = self.theclass(2002, 3, 1, 12, 0)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
self.assertEqual(dt.hour, 12)
self.assertEqual(dt.minute, 0)
self.assertEqual(dt.second, 0)
self.assertEqual(dt.microsecond, 0)
def test_basic_attributes_nonzero(self):
# Make sure all attributes are non-zero so bugs in
# bit-shifting access show up.
dt = self.theclass(2002, 3, 1, 12, 59, 59, 8000)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
self.assertEqual(dt.hour, 12)
self.assertEqual(dt.minute, 59)
self.assertEqual(dt.second, 59)
self.assertEqual(dt.microsecond, 8000)
def test_roundtrip(self):
for dt in (self.theclass(1, 2, 3, 4, 5, 6, 7),
self.theclass.now()):
# Verify dt -> string -> datetime identity.
s = repr(dt)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
dt2 = eval(s)
self.assertEqual(dt, dt2)
# Verify identity via reconstructing from pieces.
dt2 = self.theclass(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.microsecond)
self.assertEqual(dt, dt2)
def test_isoformat(self):
t = self.theclass(2, 3, 2, 4, 5, 1, 123)
self.assertEqual(t.isoformat(), "0002-03-02T04:05:01.000123")
self.assertEqual(t.isoformat('T'), "0002-03-02T04:05:01.000123")
self.assertEqual(t.isoformat(' '), "0002-03-02 04:05:01.000123")
self.assertEqual(t.isoformat('\x00'), "0002-03-02\x0004:05:01.000123")
# str is ISO format with the separator forced to a blank.
self.assertEqual(str(t), "0002-03-02 04:05:01.000123")
t = self.theclass(2, 3, 2)
self.assertEqual(t.isoformat(), "0002-03-02T00:00:00")
self.assertEqual(t.isoformat('T'), "0002-03-02T00:00:00")
self.assertEqual(t.isoformat(' '), "0002-03-02 00:00:00")
# str is ISO format with the separator forced to a blank.
self.assertEqual(str(t), "0002-03-02 00:00:00")
def test_format(self):
dt = self.theclass(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(dt.__format__(''), str(dt))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(b.__format__(''), str(dt))
for fmt in ["m:%m d:%d y:%y",
"m:%m d:%d y:%y H:%H M:%M S:%S",
"%z %Z",
]:
self.assertEqual(dt.__format__(fmt), dt.strftime(fmt))
self.assertEqual(a.__format__(fmt), dt.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_more_ctime(self):
# Test fields that TestDate doesn't touch.
import time
t = self.theclass(2002, 3, 2, 18, 3, 5, 123)
self.assertEqual(t.ctime(), "Sat Mar 2 18:03:05 2002")
# Oops! The next line fails on Win2K under MSVC 6, so it's commented
# out. The difference is that t.ctime() produces " 2" for the day,
# but platform ctime() produces "02" for the day. According to
# C99, t.ctime() is correct here.
# self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple())))
# So test a case where that difference doesn't matter.
t = self.theclass(2002, 3, 22, 18, 3, 5, 123)
self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple())))
def test_tz_independent_comparing(self):
dt1 = self.theclass(2002, 3, 1, 9, 0, 0)
dt2 = self.theclass(2002, 3, 1, 10, 0, 0)
dt3 = self.theclass(2002, 3, 1, 9, 0, 0)
self.assertEqual(dt1, dt3)
self.assertTrue(dt2 > dt3)
# Make sure comparison doesn't forget microseconds, and isn't done
# via comparing a float timestamp (an IEEE double doesn't have enough
# precision to span microsecond resolution across years 1 thru 9999,
# so comparing via timestamp necessarily calls some distinct values
# equal).
dt1 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999998)
us = timedelta(microseconds=1)
dt2 = dt1 + us
self.assertEqual(dt2 - dt1, us)
self.assertTrue(dt1 < dt2)
def test_strftime_with_bad_tzname_replace(self):
# verify ok if tzinfo.tzname().replace() returns a non-string
class MyTzInfo(FixedOffset):
def tzname(self, dt):
class MyStr(str):
def replace(self, *args):
return None
return MyStr('name')
t = self.theclass(2005, 3, 2, 0, 0, 0, 0, MyTzInfo(3, 'name'))
self.assertRaises(TypeError, t.strftime, '%Z')
def test_bad_constructor_arguments(self):
# bad years
self.theclass(MINYEAR, 1, 1) # no exception
self.theclass(MAXYEAR, 1, 1) # no exception
self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1)
self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1)
# bad months
self.theclass(2000, 1, 1) # no exception
self.theclass(2000, 12, 1) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 0, 1)
self.assertRaises(ValueError, self.theclass, 2000, 13, 1)
# bad days
self.theclass(2000, 2, 29) # no exception
self.theclass(2004, 2, 29) # no exception
self.theclass(2400, 2, 29) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 2, 30)
self.assertRaises(ValueError, self.theclass, 2001, 2, 29)
self.assertRaises(ValueError, self.theclass, 2100, 2, 29)
self.assertRaises(ValueError, self.theclass, 1900, 2, 29)
self.assertRaises(ValueError, self.theclass, 2000, 1, 0)
self.assertRaises(ValueError, self.theclass, 2000, 1, 32)
# bad hours
self.theclass(2000, 1, 31, 0) # no exception
self.theclass(2000, 1, 31, 23) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 24)
# bad minutes
self.theclass(2000, 1, 31, 23, 0) # no exception
self.theclass(2000, 1, 31, 23, 59) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 60)
# bad seconds
self.theclass(2000, 1, 31, 23, 59, 0) # no exception
self.theclass(2000, 1, 31, 23, 59, 59) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, 60)
# bad microseconds
self.theclass(2000, 1, 31, 23, 59, 59, 0) # no exception
self.theclass(2000, 1, 31, 23, 59, 59, 999999) # no exception
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, 23, 59, 59, -1)
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, 23, 59, 59,
1000000)
def test_hash_equality(self):
d = self.theclass(2000, 12, 31, 23, 30, 17)
e = self.theclass(2000, 12, 31, 23, 30, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(2001, 1, 1, 0, 5, 17)
e = self.theclass(2001, 1, 1, 0, 5, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_computations(self):
a = self.theclass(2002, 1, 31)
b = self.theclass(1956, 1, 31)
diff = a-b
self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4)))
self.assertEqual(diff.seconds, 0)
self.assertEqual(diff.microseconds, 0)
a = self.theclass(2002, 3, 2, 17, 6)
millisec = timedelta(0, 0, 1000)
hour = timedelta(0, 3600)
day = timedelta(1)
week = timedelta(7)
self.assertEqual(a + hour, self.theclass(2002, 3, 2, 18, 6))
self.assertEqual(hour + a, self.theclass(2002, 3, 2, 18, 6))
self.assertEqual(a + 10*hour, self.theclass(2002, 3, 3, 3, 6))
self.assertEqual(a - hour, self.theclass(2002, 3, 2, 16, 6))
self.assertEqual(-hour + a, self.theclass(2002, 3, 2, 16, 6))
self.assertEqual(a - hour, a + -hour)
self.assertEqual(a - 20*hour, self.theclass(2002, 3, 1, 21, 6))
self.assertEqual(a + day, self.theclass(2002, 3, 3, 17, 6))
self.assertEqual(a - day, self.theclass(2002, 3, 1, 17, 6))
self.assertEqual(a + week, self.theclass(2002, 3, 9, 17, 6))
self.assertEqual(a - week, self.theclass(2002, 2, 23, 17, 6))
self.assertEqual(a + 52*week, self.theclass(2003, 3, 1, 17, 6))
self.assertEqual(a - 52*week, self.theclass(2001, 3, 3, 17, 6))
self.assertEqual((a + week) - a, week)
self.assertEqual((a + day) - a, day)
self.assertEqual((a + hour) - a, hour)
self.assertEqual((a + millisec) - a, millisec)
self.assertEqual((a - week) - a, -week)
self.assertEqual((a - day) - a, -day)
self.assertEqual((a - hour) - a, -hour)
self.assertEqual((a - millisec) - a, -millisec)
self.assertEqual(a - (a + week), -week)
self.assertEqual(a - (a + day), -day)
self.assertEqual(a - (a + hour), -hour)
self.assertEqual(a - (a + millisec), -millisec)
self.assertEqual(a - (a - week), week)
self.assertEqual(a - (a - day), day)
self.assertEqual(a - (a - hour), hour)
self.assertEqual(a - (a - millisec), millisec)
self.assertEqual(a + (week + day + hour + millisec),
self.theclass(2002, 3, 10, 18, 6, 0, 1000))
self.assertEqual(a + (week + day + hour + millisec),
(((a + week) + day) + hour) + millisec)
self.assertEqual(a - (week + day + hour + millisec),
self.theclass(2002, 2, 22, 16, 5, 59, 999000))
self.assertEqual(a - (week + day + hour + millisec),
(((a - week) - day) - hour) - millisec)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# delta - datetime is senseless.
self.assertRaises(TypeError, lambda: day - a)
# mixing datetime and (delta or datetime) via * or // is senseless
self.assertRaises(TypeError, lambda: day * a)
self.assertRaises(TypeError, lambda: a * day)
self.assertRaises(TypeError, lambda: day // a)
self.assertRaises(TypeError, lambda: a // day)
self.assertRaises(TypeError, lambda: a * a)
self.assertRaises(TypeError, lambda: a // a)
# datetime + datetime is senseless
self.assertRaises(TypeError, lambda: a + a)
def test_pickling(self):
args = 6, 7, 23, 20, 59, 1, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_more_pickling(self):
a = self.theclass(2003, 2, 7, 16, 48, 37, 444116)
s = pickle.dumps(a)
b = pickle.loads(s)
self.assertEqual(b.year, 2003)
self.assertEqual(b.month, 2)
self.assertEqual(b.day, 7)
def test_pickling_subclass_datetime(self):
args = 6, 7, 23, 20, 59, 1, 64**2
orig = SubclassDatetime(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_more_compare(self):
# The test_compare() inherited from TestDate covers the error cases.
# We just want to test lexicographic ordering on the members datetime
# has that date lacks.
args = [2000, 11, 29, 20, 58, 16, 999998]
t1 = self.theclass(*args)
t2 = self.theclass(*args)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertTrue(not t1 != t2)
self.assertTrue(not t1 < t2)
self.assertTrue(not t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for i in range(len(args)):
newargs = args[:]
newargs[i] = args[i] + 1
t2 = self.theclass(*newargs) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertTrue(not t1 == t2)
self.assertTrue(not t2 == t1)
self.assertTrue(not t1 > t2)
self.assertTrue(not t2 < t1)
self.assertTrue(not t1 >= t2)
self.assertTrue(not t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
# A helper for timestamp constructor tests.
def verify_field_equality(self, expected, got):
self.assertEqual(expected.tm_year, got.year)
self.assertEqual(expected.tm_mon, got.month)
self.assertEqual(expected.tm_mday, got.day)
self.assertEqual(expected.tm_hour, got.hour)
self.assertEqual(expected.tm_min, got.minute)
self.assertEqual(expected.tm_sec, got.second)
def test_fromtimestamp(self):
import time
ts = time.time()
expected = time.localtime(ts)
got = self.theclass.fromtimestamp(ts)
self.verify_field_equality(expected, got)
def test_utcfromtimestamp(self):
import time
ts = time.time()
expected = time.gmtime(ts)
got = self.theclass.utcfromtimestamp(ts)
self.verify_field_equality(expected, got)
def test_microsecond_rounding(self):
# Test whether fromtimestamp "rounds up" floats that are less
# than one microsecond smaller than an integer.
self.assertEqual(self.theclass.fromtimestamp(0.9999999),
self.theclass.fromtimestamp(1))
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.fromtimestamp,
insane)
def test_insane_utcfromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.utcfromtimestamp,
insane)
@unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps")
def test_negative_float_fromtimestamp(self):
# The result is tz-dependent; at least test that this doesn't
# fail (like it did before bug 1646728 was fixed).
self.theclass.fromtimestamp(-1.05)
@unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps")
def test_negative_float_utcfromtimestamp(self):
d = self.theclass.utcfromtimestamp(-1.05)
self.assertEqual(d, self.theclass(1969, 12, 31, 23, 59, 58, 950000))
def test_utcnow(self):
import time
# Call it a success if utcnow() and utcfromtimestamp() are within
# a second of each other.
tolerance = timedelta(seconds=1)
for dummy in range(3):
from_now = self.theclass.utcnow()
from_timestamp = self.theclass.utcfromtimestamp(time.time())
if abs(from_timestamp - from_now) <= tolerance:
break
# Else try again a few times.
self.assertTrue(abs(from_timestamp - from_now) <= tolerance)
def test_strptime(self):
import _strptime
string = '2004-12-01 13:02:47.197'
format = '%Y-%m-%d %H:%M:%S.%f'
result, frac = _strptime._strptime(string, format)
expected = self.theclass(*(result[0:6]+(frac,)))
got = self.theclass.strptime(string, format)
self.assertEqual(expected, got)
def test_more_timetuple(self):
# This tests fields beyond those tested by the TestDate.test_timetuple.
t = self.theclass(2004, 12, 31, 6, 22, 33)
self.assertEqual(t.timetuple(), (2004, 12, 31, 6, 22, 33, 4, 366, -1))
self.assertEqual(t.timetuple(),
(t.year, t.month, t.day,
t.hour, t.minute, t.second,
t.weekday(),
t.toordinal() - date(t.year, 1, 1).toordinal() + 1,
-1))
tt = t.timetuple()
self.assertEqual(tt.tm_year, t.year)
self.assertEqual(tt.tm_mon, t.month)
self.assertEqual(tt.tm_mday, t.day)
self.assertEqual(tt.tm_hour, t.hour)
self.assertEqual(tt.tm_min, t.minute)
self.assertEqual(tt.tm_sec, t.second)
self.assertEqual(tt.tm_wday, t.weekday())
self.assertEqual(tt.tm_yday, t.toordinal() -
date(t.year, 1, 1).toordinal() + 1)
self.assertEqual(tt.tm_isdst, -1)
def test_more_strftime(self):
# This tests fields beyond those tested by the TestDate.test_strftime.
t = self.theclass(2004, 12, 31, 6, 22, 33, 47)
self.assertEqual(t.strftime("%m %d %y %f %S %M %H %j"),
"12 31 04 000047 33 22 06 366")
def test_extract(self):
dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234)
self.assertEqual(dt.date(), date(2002, 3, 4))
self.assertEqual(dt.time(), time(18, 45, 3, 1234))
def test_combine(self):
d = date(2002, 3, 4)
t = time(18, 45, 3, 1234)
expected = self.theclass(2002, 3, 4, 18, 45, 3, 1234)
combine = self.theclass.combine
dt = combine(d, t)
self.assertEqual(dt, expected)
dt = combine(time=t, date=d)
self.assertEqual(dt, expected)
self.assertEqual(d, dt.date())
self.assertEqual(t, dt.time())
self.assertEqual(dt, combine(dt.date(), dt.time()))
self.assertRaises(TypeError, combine) # need an arg
self.assertRaises(TypeError, combine, d) # need two args
self.assertRaises(TypeError, combine, t, d) # args reversed
self.assertRaises(TypeError, combine, d, t, 1) # too many args
self.assertRaises(TypeError, combine, "date", "time") # wrong types
def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4, 5, 6, 7]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4),
("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_astimezone(self):
# Pretty boring! The TZ test is more interesting here. astimezone()
# simply can't be applied to a naive object.
dt = self.theclass.now()
f = FixedOffset(44, "")
self.assertRaises(TypeError, dt.astimezone) # not enough args
self.assertRaises(TypeError, dt.astimezone, f, f) # too many args
self.assertRaises(TypeError, dt.astimezone, dt) # arg wrong type
self.assertRaises(ValueError, dt.astimezone, f) # naive
self.assertRaises(ValueError, dt.astimezone, tz=f) # naive
class Bogus(tzinfo):
def utcoffset(self, dt): return None
def dst(self, dt): return timedelta(0)
bog = Bogus()
self.assertRaises(ValueError, dt.astimezone, bog) # naive
class AlsoBogus(tzinfo):
def utcoffset(self, dt): return timedelta(0)
def dst(self, dt): return None
alsobog = AlsoBogus()
self.assertRaises(ValueError, dt.astimezone, alsobog) # also naive
def test_subclass_datetime(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.year + self.month + self.second
args = 2003, 4, 14, 12, 13, 41
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.toordinal(), dt2.toordinal())
self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month +
dt1.second - 7)
class SubclassTime(time):
sub_var = 1
class TestTime(HarmlessMixedComparison, unittest.TestCase):
theclass = time
def test_basic_attributes(self):
t = self.theclass(12, 0)
self.assertEqual(t.hour, 12)
self.assertEqual(t.minute, 0)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 0)
def test_basic_attributes_nonzero(self):
# Make sure all attributes are non-zero so bugs in
# bit-shifting access show up.
t = self.theclass(12, 59, 59, 8000)
self.assertEqual(t.hour, 12)
self.assertEqual(t.minute, 59)
self.assertEqual(t.second, 59)
self.assertEqual(t.microsecond, 8000)
def test_roundtrip(self):
t = self.theclass(1, 2, 3, 4)
# Verify t -> string -> time identity.
s = repr(t)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
t2 = eval(s)
self.assertEqual(t, t2)
# Verify identity via reconstructing from pieces.
t2 = self.theclass(t.hour, t.minute, t.second,
t.microsecond)
self.assertEqual(t, t2)
def test_comparing(self):
args = [1, 2, 3, 4]
t1 = self.theclass(*args)
t2 = self.theclass(*args)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertTrue(not t1 != t2)
self.assertTrue(not t1 < t2)
self.assertTrue(not t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for i in range(len(args)):
newargs = args[:]
newargs[i] = args[i] + 1
t2 = self.theclass(*newargs) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertTrue(not t1 == t2)
self.assertTrue(not t2 == t1)
self.assertTrue(not t1 > t2)
self.assertTrue(not t2 < t1)
self.assertTrue(not t1 >= t2)
self.assertTrue(not t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 <= badarg)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_bad_constructor_arguments(self):
# bad hours
self.theclass(0, 0) # no exception
self.theclass(23, 0) # no exception
self.assertRaises(ValueError, self.theclass, -1, 0)
self.assertRaises(ValueError, self.theclass, 24, 0)
# bad minutes
self.theclass(23, 0) # no exception
self.theclass(23, 59) # no exception
self.assertRaises(ValueError, self.theclass, 23, -1)
self.assertRaises(ValueError, self.theclass, 23, 60)
# bad seconds
self.theclass(23, 59, 0) # no exception
self.theclass(23, 59, 59) # no exception
self.assertRaises(ValueError, self.theclass, 23, 59, -1)
self.assertRaises(ValueError, self.theclass, 23, 59, 60)
# bad microseconds
self.theclass(23, 59, 59, 0) # no exception
self.theclass(23, 59, 59, 999999) # no exception
self.assertRaises(ValueError, self.theclass, 23, 59, 59, -1)
self.assertRaises(ValueError, self.theclass, 23, 59, 59, 1000000)
def test_hash_equality(self):
d = self.theclass(23, 30, 17)
e = self.theclass(23, 30, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(0, 5, 17)
e = self.theclass(0, 5, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_isoformat(self):
t = self.theclass(4, 5, 1, 123)
self.assertEqual(t.isoformat(), "04:05:01.000123")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass()
self.assertEqual(t.isoformat(), "00:00:00")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=1)
self.assertEqual(t.isoformat(), "00:00:00.000001")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=10)
self.assertEqual(t.isoformat(), "00:00:00.000010")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=100)
self.assertEqual(t.isoformat(), "00:00:00.000100")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=1000)
self.assertEqual(t.isoformat(), "00:00:00.001000")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=10000)
self.assertEqual(t.isoformat(), "00:00:00.010000")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=100000)
self.assertEqual(t.isoformat(), "00:00:00.100000")
self.assertEqual(t.isoformat(), str(t))
def test_1653736(self):
# verify it doesn't accept extra keyword arguments
t = self.theclass(second=1)
self.assertRaises(TypeError, t.isoformat, foo=3)
def test_strftime(self):
t = self.theclass(1, 2, 3, 4)
self.assertEqual(t.strftime('%H %M %S %f'), "01 02 03 000004")
# A naive object replaces %z and %Z with empty strings.
self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''")
def test_format(self):
t = self.theclass(1, 2, 3, 4)
self.assertEqual(t.__format__(''), str(t))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(1, 2, 3, 4)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(1, 2, 3, 4)
self.assertEqual(b.__format__(''), str(t))
for fmt in ['%H %M %S',
]:
self.assertEqual(t.__format__(fmt), t.strftime(fmt))
self.assertEqual(a.__format__(fmt), t.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_str(self):
self.assertEqual(str(self.theclass(1, 2, 3, 4)), "01:02:03.000004")
self.assertEqual(str(self.theclass(10, 2, 3, 4000)), "10:02:03.004000")
self.assertEqual(str(self.theclass(0, 2, 3, 400000)), "00:02:03.400000")
self.assertEqual(str(self.theclass(12, 2, 3, 0)), "12:02:03")
self.assertEqual(str(self.theclass(23, 15, 0, 0)), "23:15:00")
def test_repr(self):
name = 'datetime.' + self.theclass.__name__
self.assertEqual(repr(self.theclass(1, 2, 3, 4)),
"%s(1, 2, 3, 4)" % name)
self.assertEqual(repr(self.theclass(10, 2, 3, 4000)),
"%s(10, 2, 3, 4000)" % name)
self.assertEqual(repr(self.theclass(0, 2, 3, 400000)),
"%s(0, 2, 3, 400000)" % name)
self.assertEqual(repr(self.theclass(12, 2, 3, 0)),
"%s(12, 2, 3)" % name)
self.assertEqual(repr(self.theclass(23, 15, 0, 0)),
"%s(23, 15)" % name)
def test_resolution_info(self):
self.assertIsInstance(self.theclass.min, self.theclass)
self.assertIsInstance(self.theclass.max, self.theclass)
self.assertIsInstance(self.theclass.resolution, timedelta)
self.assertTrue(self.theclass.max > self.theclass.min)
def test_pickling(self):
args = 20, 59, 16, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_pickling_subclass_time(self):
args = 20, 59, 16, 64**2
orig = SubclassTime(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_bool(self):
cls = self.theclass
self.assertTrue(cls(1))
self.assertTrue(cls(0, 1))
self.assertTrue(cls(0, 0, 1))
self.assertTrue(cls(0, 0, 0, 1))
self.assertTrue(not cls(0))
self.assertTrue(not cls())
def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(1)
self.assertRaises(ValueError, base.replace, hour=24)
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
def test_subclass_time(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.second
args = 4, 5, 6
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.isoformat(), dt2.isoformat())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7)
def test_backdoor_resistance(self):
# see TestDate.test_backdoor_resistance().
base = '2:59.0'
for hour_byte in ' ', '9', chr(24), '\xff':
self.assertRaises(TypeError, self.theclass,
hour_byte + base[1:])
# A mixin for classes with a tzinfo= argument. Subclasses must define
# theclass as a class atribute, and theclass(1, 1, 1, tzinfo=whatever)
# must be legit (which is true for time and datetime).
class TZInfoBase:
def test_argument_passing(self):
cls = self.theclass
# A datetime passes itself on, a time passes None.
class introspective(tzinfo):
def tzname(self, dt): return dt and "real" or "none"
def utcoffset(self, dt):
return timedelta(minutes = dt and 42 or -42)
dst = utcoffset
obj = cls(1, 2, 3, tzinfo=introspective())
expected = cls is time and "none" or "real"
self.assertEqual(obj.tzname(), expected)
expected = timedelta(minutes=(cls is time and -42 or 42))
self.assertEqual(obj.utcoffset(), expected)
self.assertEqual(obj.dst(), expected)
def test_bad_tzinfo_classes(self):
cls = self.theclass
self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=12)
class NiceTry(object):
def __init__(self): pass
def utcoffset(self, dt): pass
self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=NiceTry)
class BetterTry(tzinfo):
def __init__(self): pass
def utcoffset(self, dt): pass
b = BetterTry()
t = cls(1, 1, 1, tzinfo=b)
self.assertTrue(t.tzinfo is b)
def test_utc_offset_out_of_bounds(self):
class Edgy(tzinfo):
def __init__(self, offset):
self.offset = timedelta(minutes=offset)
def utcoffset(self, dt):
return self.offset
cls = self.theclass
for offset, legit in ((-1440, False),
(-1439, True),
(1439, True),
(1440, False)):
if cls is time:
t = cls(1, 2, 3, tzinfo=Edgy(offset))
elif cls is datetime:
t = cls(6, 6, 6, 1, 2, 3, tzinfo=Edgy(offset))
else:
assert 0, "impossible"
if legit:
aofs = abs(offset)
h, m = divmod(aofs, 60)
tag = "%c%02d:%02d" % (offset < 0 and '-' or '+', h, m)
if isinstance(t, datetime):
t = t.timetz()
self.assertEqual(str(t), "01:02:03" + tag)
else:
self.assertRaises(ValueError, str, t)
def test_tzinfo_classes(self):
cls = self.theclass
class C1(tzinfo):
def utcoffset(self, dt): return None
def dst(self, dt): return None
def tzname(self, dt): return None
for t in (cls(1, 1, 1),
cls(1, 1, 1, tzinfo=None),
cls(1, 1, 1, tzinfo=C1())):
self.assertTrue(t.utcoffset() is None)
self.assertTrue(t.dst() is None)
self.assertTrue(t.tzname() is None)
class C3(tzinfo):
def utcoffset(self, dt): return timedelta(minutes=-1439)
def dst(self, dt): return timedelta(minutes=1439)
def tzname(self, dt): return "aname"
t = cls(1, 1, 1, tzinfo=C3())
self.assertEqual(t.utcoffset(), timedelta(minutes=-1439))
self.assertEqual(t.dst(), timedelta(minutes=1439))
self.assertEqual(t.tzname(), "aname")
# Wrong types.
class C4(tzinfo):
def utcoffset(self, dt): return "aname"
def dst(self, dt): return 7
def tzname(self, dt): return 0
t = cls(1, 1, 1, tzinfo=C4())
self.assertRaises(TypeError, t.utcoffset)
self.assertRaises(TypeError, t.dst)
self.assertRaises(TypeError, t.tzname)
# Offset out of range.
class C6(tzinfo):
def utcoffset(self, dt): return timedelta(hours=-24)
def dst(self, dt): return timedelta(hours=24)
t = cls(1, 1, 1, tzinfo=C6())
self.assertRaises(ValueError, t.utcoffset)
self.assertRaises(ValueError, t.dst)
# Not a whole number of minutes.
class C7(tzinfo):
def utcoffset(self, dt): return timedelta(seconds=61)
def dst(self, dt): return timedelta(microseconds=-81)
t = cls(1, 1, 1, tzinfo=C7())
self.assertRaises(ValueError, t.utcoffset)
self.assertRaises(ValueError, t.dst)
def test_aware_compare(self):
cls = self.theclass
# Ensure that utcoffset() gets ignored if the comparands have
# the same tzinfo member.
class OperandDependentOffset(tzinfo):
def utcoffset(self, t):
if t.minute < 10:
# d0 and d1 equal after adjustment
return timedelta(minutes=t.minute)
else:
# d2 off in the weeds
return timedelta(minutes=59)
base = cls(8, 9, 10, tzinfo=OperandDependentOffset())
d0 = base.replace(minute=3)
d1 = base.replace(minute=9)
d2 = base.replace(minute=11)
for x in d0, d1, d2:
for y in d0, d1, d2:
got = cmp(x, y)
expected = cmp(x.minute, y.minute)
self.assertEqual(got, expected)
# However, if they're different members, uctoffset is not ignored.
# Note that a time can't actually have an operand-depedent offset,
# though (and time.utcoffset() passes None to tzinfo.utcoffset()),
# so skip this test for time.
if cls is not time:
d0 = base.replace(minute=3, tzinfo=OperandDependentOffset())
d1 = base.replace(minute=9, tzinfo=OperandDependentOffset())
d2 = base.replace(minute=11, tzinfo=OperandDependentOffset())
for x in d0, d1, d2:
for y in d0, d1, d2:
got = cmp(x, y)
if (x is d0 or x is d1) and (y is d0 or y is d1):
expected = 0
elif x is y is d2:
expected = 0
elif x is d2:
expected = -1
else:
assert y is d2
expected = 1
self.assertEqual(got, expected)
# Testing time objects with a non-None tzinfo.
class TestTimeTZ(TestTime, TZInfoBase, unittest.TestCase):
theclass = time
def test_empty(self):
t = self.theclass()
self.assertEqual(t.hour, 0)
self.assertEqual(t.minute, 0)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 0)
self.assertTrue(t.tzinfo is None)
def test_zones(self):
est = FixedOffset(-300, "EST", 1)
utc = FixedOffset(0, "UTC", -2)
met = FixedOffset(60, "MET", 3)
t1 = time( 7, 47, tzinfo=est)
t2 = time(12, 47, tzinfo=utc)
t3 = time(13, 47, tzinfo=met)
t4 = time(microsecond=40)
t5 = time(microsecond=40, tzinfo=utc)
self.assertEqual(t1.tzinfo, est)
self.assertEqual(t2.tzinfo, utc)
self.assertEqual(t3.tzinfo, met)
self.assertTrue(t4.tzinfo is None)
self.assertEqual(t5.tzinfo, utc)
self.assertEqual(t1.utcoffset(), timedelta(minutes=-300))
self.assertEqual(t2.utcoffset(), timedelta(minutes=0))
self.assertEqual(t3.utcoffset(), timedelta(minutes=60))
self.assertTrue(t4.utcoffset() is None)
self.assertRaises(TypeError, t1.utcoffset, "no args")
self.assertEqual(t1.tzname(), "EST")
self.assertEqual(t2.tzname(), "UTC")
self.assertEqual(t3.tzname(), "MET")
self.assertTrue(t4.tzname() is None)
self.assertRaises(TypeError, t1.tzname, "no args")
self.assertEqual(t1.dst(), timedelta(minutes=1))
self.assertEqual(t2.dst(), timedelta(minutes=-2))
self.assertEqual(t3.dst(), timedelta(minutes=3))
self.assertTrue(t4.dst() is None)
self.assertRaises(TypeError, t1.dst, "no args")
self.assertEqual(hash(t1), hash(t2))
self.assertEqual(hash(t1), hash(t3))
self.assertEqual(hash(t2), hash(t3))
self.assertEqual(t1, t2)
self.assertEqual(t1, t3)
self.assertEqual(t2, t3)
self.assertRaises(TypeError, lambda: t4 == t5) # mixed tz-aware & naive
self.assertRaises(TypeError, lambda: t4 < t5) # mixed tz-aware & naive
self.assertRaises(TypeError, lambda: t5 < t4) # mixed tz-aware & naive
self.assertEqual(str(t1), "07:47:00-05:00")
self.assertEqual(str(t2), "12:47:00+00:00")
self.assertEqual(str(t3), "13:47:00+01:00")
self.assertEqual(str(t4), "00:00:00.000040")
self.assertEqual(str(t5), "00:00:00.000040+00:00")
self.assertEqual(t1.isoformat(), "07:47:00-05:00")
self.assertEqual(t2.isoformat(), "12:47:00+00:00")
self.assertEqual(t3.isoformat(), "13:47:00+01:00")
self.assertEqual(t4.isoformat(), "00:00:00.000040")
self.assertEqual(t5.isoformat(), "00:00:00.000040+00:00")
d = 'datetime.time'
self.assertEqual(repr(t1), d + "(7, 47, tzinfo=est)")
self.assertEqual(repr(t2), d + "(12, 47, tzinfo=utc)")
self.assertEqual(repr(t3), d + "(13, 47, tzinfo=met)")
self.assertEqual(repr(t4), d + "(0, 0, 0, 40)")
self.assertEqual(repr(t5), d + "(0, 0, 0, 40, tzinfo=utc)")
self.assertEqual(t1.strftime("%H:%M:%S %%Z=%Z %%z=%z"),
"07:47:00 %Z=EST %z=-0500")
self.assertEqual(t2.strftime("%H:%M:%S %Z %z"), "12:47:00 UTC +0000")
self.assertEqual(t3.strftime("%H:%M:%S %Z %z"), "13:47:00 MET +0100")
yuck = FixedOffset(-1439, "%z %Z %%z%%Z")
t1 = time(23, 59, tzinfo=yuck)
self.assertEqual(t1.strftime("%H:%M %%Z='%Z' %%z='%z'"),
"23:59 %Z='%z %Z %%z%%Z' %z='-2359'")
# Check that an invalid tzname result raises an exception.
class Badtzname(tzinfo):
def tzname(self, dt): return 42
t = time(2, 3, 4, tzinfo=Badtzname())
self.assertEqual(t.strftime("%H:%M:%S"), "02:03:04")
self.assertRaises(TypeError, t.strftime, "%Z")
def test_hash_edge_cases(self):
# Offsets that overflow a basic time.
t1 = self.theclass(0, 1, 2, 3, tzinfo=FixedOffset(1439, ""))
t2 = self.theclass(0, 0, 2, 3, tzinfo=FixedOffset(1438, ""))
self.assertEqual(hash(t1), hash(t2))
t1 = self.theclass(23, 58, 6, 100, tzinfo=FixedOffset(-1000, ""))
t2 = self.theclass(23, 48, 6, 100, tzinfo=FixedOffset(-1010, ""))
self.assertEqual(hash(t1), hash(t2))
def test_pickling(self):
# Try one without a tzinfo.
args = 20, 59, 16, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
# Try one with a tzinfo.
tinfo = PicklableFixedOffset(-300, 'cookie')
orig = self.theclass(5, 6, 7, tzinfo=tinfo)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertIsInstance(derived.tzinfo, PicklableFixedOffset)
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
def test_more_bool(self):
# Test cases with non-None tzinfo.
cls = self.theclass
t = cls(0, tzinfo=FixedOffset(-300, ""))
self.assertTrue(t)
t = cls(5, tzinfo=FixedOffset(-300, ""))
self.assertTrue(t)
t = cls(5, tzinfo=FixedOffset(300, ""))
self.assertTrue(not t)
t = cls(23, 59, tzinfo=FixedOffset(23*60 + 59, ""))
self.assertTrue(not t)
# Mostly ensuring this doesn't overflow internally.
t = cls(0, tzinfo=FixedOffset(23*60 + 59, ""))
self.assertTrue(t)
# But this should yield a value error -- the utcoffset is bogus.
t = cls(0, tzinfo=FixedOffset(24*60, ""))
self.assertRaises(ValueError, lambda: bool(t))
# Likewise.
t = cls(0, tzinfo=FixedOffset(-24*60, ""))
self.assertRaises(ValueError, lambda: bool(t))
def test_replace(self):
cls = self.theclass
z100 = FixedOffset(100, "+100")
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, z100]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8),
("tzinfo", zm200)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.assertTrue(base2.tzinfo is None)
self.assertTrue(base2.tzname() is None)
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.assertTrue(base.tzinfo is base3.tzinfo)
# Out of bounds.
base = cls(1)
self.assertRaises(ValueError, base.replace, hour=24)
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
def test_mixed_compare(self):
t1 = time(1, 2, 3)
t2 = time(1, 2, 3)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=None)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(None, ""))
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(0, ""))
self.assertRaises(TypeError, lambda: t1 == t2)
# In time w/ identical tzinfo objects, utcoffset is ignored.
class Varies(tzinfo):
def __init__(self):
self.offset = timedelta(minutes=22)
def utcoffset(self, t):
self.offset += timedelta(minutes=1)
return self.offset
v = Varies()
t1 = t2.replace(tzinfo=v)
t2 = t2.replace(tzinfo=v)
self.assertEqual(t1.utcoffset(), timedelta(minutes=23))
self.assertEqual(t2.utcoffset(), timedelta(minutes=24))
self.assertEqual(t1, t2)
# But if they're not identical, it isn't ignored.
t2 = t2.replace(tzinfo=Varies())
self.assertTrue(t1 < t2) # t1's offset counter still going up
def test_subclass_timetz(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.second
args = 4, 5, 6, 500, FixedOffset(-300, "EST", 1)
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.utcoffset(), dt2.utcoffset())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7)
# Testing datetime objects with a non-None tzinfo.
class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase):
theclass = datetime
def test_trivial(self):
dt = self.theclass(1, 2, 3, 4, 5, 6, 7)
self.assertEqual(dt.year, 1)
self.assertEqual(dt.month, 2)
self.assertEqual(dt.day, 3)
self.assertEqual(dt.hour, 4)
self.assertEqual(dt.minute, 5)
self.assertEqual(dt.second, 6)
self.assertEqual(dt.microsecond, 7)
self.assertEqual(dt.tzinfo, None)
def test_even_more_compare(self):
# The test_compare() and test_more_compare() inherited from TestDate
# and TestDateTime covered non-tzinfo cases.
# Smallest possible after UTC adjustment.
t1 = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, ""))
# Largest possible after UTC adjustment.
t2 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, ""))
# Make sure those compare correctly, and w/o overflow.
self.assertTrue(t1 < t2)
self.assertTrue(t1 != t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 == t1)
self.assertTrue(t2 == t2)
# Equal afer adjustment.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""))
t2 = self.theclass(2, 1, 1, 3, 13, tzinfo=FixedOffset(3*60+13+2, ""))
self.assertEqual(t1, t2)
# Change t1 not to subtract a minute, and t1 should be larger.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(0, ""))
self.assertTrue(t1 > t2)
# Change t1 to subtract 2 minutes, and t1 should be smaller.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(2, ""))
self.assertTrue(t1 < t2)
# Back to the original t1, but make seconds resolve it.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""),
second=1)
self.assertTrue(t1 > t2)
# Likewise, but make microseconds resolve it.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""),
microsecond=1)
self.assertTrue(t1 > t2)
# Make t2 naive and it should fail.
t2 = self.theclass.min
self.assertRaises(TypeError, lambda: t1 == t2)
self.assertEqual(t2, t2)
# It's also naive if it has tzinfo but tzinfo.utcoffset() is None.
class Naive(tzinfo):
def utcoffset(self, dt): return None
t2 = self.theclass(5, 6, 7, tzinfo=Naive())
self.assertRaises(TypeError, lambda: t1 == t2)
self.assertEqual(t2, t2)
# OTOH, it's OK to compare two of these mixing the two ways of being
# naive.
t1 = self.theclass(5, 6, 7)
self.assertEqual(t1, t2)
# Try a bogus uctoffset.
class Bogus(tzinfo):
def utcoffset(self, dt):
return timedelta(minutes=1440) # out of bounds
t1 = self.theclass(2, 2, 2, tzinfo=Bogus())
t2 = self.theclass(2, 2, 2, tzinfo=FixedOffset(0, ""))
self.assertRaises(ValueError, lambda: t1 == t2)
def test_pickling(self):
# Try one without a tzinfo.
args = 6, 7, 23, 20, 59, 1, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
# Try one with a tzinfo.
tinfo = PicklableFixedOffset(-300, 'cookie')
orig = self.theclass(*args, **{'tzinfo': tinfo})
derived = self.theclass(1, 1, 1, tzinfo=FixedOffset(0, "", 0))
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertIsInstance(derived.tzinfo, PicklableFixedOffset)
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
def test_extreme_hashes(self):
# If an attempt is made to hash these via subtracting the offset
# then hashing a datetime object, OverflowError results. The
# Python implementation used to blow up here.
t = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, ""))
hash(t)
t = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, ""))
hash(t)
# OTOH, an OOB offset should blow up.
t = self.theclass(5, 5, 5, tzinfo=FixedOffset(-1440, ""))
self.assertRaises(ValueError, hash, t)
def test_zones(self):
est = FixedOffset(-300, "EST")
utc = FixedOffset(0, "UTC")
met = FixedOffset(60, "MET")
t1 = datetime(2002, 3, 19, 7, 47, tzinfo=est)
t2 = datetime(2002, 3, 19, 12, 47, tzinfo=utc)
t3 = datetime(2002, 3, 19, 13, 47, tzinfo=met)
self.assertEqual(t1.tzinfo, est)
self.assertEqual(t2.tzinfo, utc)
self.assertEqual(t3.tzinfo, met)
self.assertEqual(t1.utcoffset(), timedelta(minutes=-300))
self.assertEqual(t2.utcoffset(), timedelta(minutes=0))
self.assertEqual(t3.utcoffset(), timedelta(minutes=60))
self.assertEqual(t1.tzname(), "EST")
self.assertEqual(t2.tzname(), "UTC")
self.assertEqual(t3.tzname(), "MET")
self.assertEqual(hash(t1), hash(t2))
self.assertEqual(hash(t1), hash(t3))
self.assertEqual(hash(t2), hash(t3))
self.assertEqual(t1, t2)
self.assertEqual(t1, t3)
self.assertEqual(t2, t3)
self.assertEqual(str(t1), "2002-03-19 07:47:00-05:00")
self.assertEqual(str(t2), "2002-03-19 12:47:00+00:00")
self.assertEqual(str(t3), "2002-03-19 13:47:00+01:00")
d = 'datetime.datetime(2002, 3, 19, '
self.assertEqual(repr(t1), d + "7, 47, tzinfo=est)")
self.assertEqual(repr(t2), d + "12, 47, tzinfo=utc)")
self.assertEqual(repr(t3), d + "13, 47, tzinfo=met)")
def test_combine(self):
met = FixedOffset(60, "MET")
d = date(2002, 3, 4)
tz = time(18, 45, 3, 1234, tzinfo=met)
dt = datetime.combine(d, tz)
self.assertEqual(dt, datetime(2002, 3, 4, 18, 45, 3, 1234,
tzinfo=met))
def test_extract(self):
met = FixedOffset(60, "MET")
dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234, tzinfo=met)
self.assertEqual(dt.date(), date(2002, 3, 4))
self.assertEqual(dt.time(), time(18, 45, 3, 1234))
self.assertEqual(dt.timetz(), time(18, 45, 3, 1234, tzinfo=met))
def test_tz_aware_arithmetic(self):
import random
now = self.theclass.now()
tz55 = FixedOffset(-330, "west 5:30")
timeaware = now.time().replace(tzinfo=tz55)
nowaware = self.theclass.combine(now.date(), timeaware)
self.assertTrue(nowaware.tzinfo is tz55)
self.assertEqual(nowaware.timetz(), timeaware)
# Can't mix aware and non-aware.
self.assertRaises(TypeError, lambda: now - nowaware)
self.assertRaises(TypeError, lambda: nowaware - now)
# And adding datetime's doesn't make sense, aware or not.
self.assertRaises(TypeError, lambda: now + nowaware)
self.assertRaises(TypeError, lambda: nowaware + now)
self.assertRaises(TypeError, lambda: nowaware + nowaware)
# Subtracting should yield 0.
self.assertEqual(now - now, timedelta(0))
self.assertEqual(nowaware - nowaware, timedelta(0))
# Adding a delta should preserve tzinfo.
delta = timedelta(weeks=1, minutes=12, microseconds=5678)
nowawareplus = nowaware + delta
self.assertTrue(nowaware.tzinfo is tz55)
nowawareplus2 = delta + nowaware
self.assertTrue(nowawareplus2.tzinfo is tz55)
self.assertEqual(nowawareplus, nowawareplus2)
# that - delta should be what we started with, and that - what we
# started with should be delta.
diff = nowawareplus - delta
self.assertTrue(diff.tzinfo is tz55)
self.assertEqual(nowaware, diff)
self.assertRaises(TypeError, lambda: delta - nowawareplus)
self.assertEqual(nowawareplus - nowaware, delta)
# Make up a random timezone.
tzr = FixedOffset(random.randrange(-1439, 1440), "randomtimezone")
# Attach it to nowawareplus.
nowawareplus = nowawareplus.replace(tzinfo=tzr)
self.assertTrue(nowawareplus.tzinfo is tzr)
# Make sure the difference takes the timezone adjustments into account.
got = nowaware - nowawareplus
# Expected: (nowaware base - nowaware offset) -
# (nowawareplus base - nowawareplus offset) =
# (nowaware base - nowawareplus base) +
# (nowawareplus offset - nowaware offset) =
# -delta + nowawareplus offset - nowaware offset
expected = nowawareplus.utcoffset() - nowaware.utcoffset() - delta
self.assertEqual(got, expected)
# Try max possible difference.
min = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, "min"))
max = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, "max"))
maxdiff = max - min
self.assertEqual(maxdiff, self.theclass.max - self.theclass.min +
timedelta(minutes=2*1439))
def test_tzinfo_now(self):
meth = self.theclass.now
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth()
# Try with and without naming the keyword.
off42 = FixedOffset(42, "42")
another = meth(off42)
again = meth(tz=off42)
self.assertTrue(another.tzinfo is again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, 16)
self.assertRaises(TypeError, meth, tzinfo=16)
# Bad keyword name.
self.assertRaises(TypeError, meth, tinfo=off42)
# Too many args.
self.assertRaises(TypeError, meth, off42, off42)
# We don't know which time zone we're in, and don't have a tzinfo
# class to represent it, so seeing whether a tz argument actually
# does a conversion is tricky.
weirdtz = FixedOffset(timedelta(hours=15, minutes=58), "weirdtz", 0)
utc = FixedOffset(0, "utc", 0)
for dummy in range(3):
now = datetime.now(weirdtz)
self.assertTrue(now.tzinfo is weirdtz)
utcnow = datetime.utcnow().replace(tzinfo=utc)
now2 = utcnow.astimezone(weirdtz)
if abs(now - now2) < timedelta(seconds=30):
break
# Else the code is broken, or more than 30 seconds passed between
# calls; assuming the latter, just try again.
else:
# Three strikes and we're out.
self.fail("utcnow(), now(tz), or astimezone() may be broken")
def test_tzinfo_fromtimestamp(self):
import time
meth = self.theclass.fromtimestamp
ts = time.time()
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth(ts)
# Try with and without naming the keyword.
off42 = FixedOffset(42, "42")
another = meth(ts, off42)
again = meth(ts, tz=off42)
self.assertTrue(another.tzinfo is again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, ts, 16)
self.assertRaises(TypeError, meth, ts, tzinfo=16)
# Bad keyword name.
self.assertRaises(TypeError, meth, ts, tinfo=off42)
# Too many args.
self.assertRaises(TypeError, meth, ts, off42, off42)
# Too few args.
self.assertRaises(TypeError, meth)
# Try to make sure tz= actually does some conversion.
timestamp = 1000000000
utcdatetime = datetime.utcfromtimestamp(timestamp)
# In POSIX (epoch 1970), that's 2001-09-09 01:46:40 UTC, give or take.
# But on some flavor of Mac, it's nowhere near that. So we can't have
# any idea here what time that actually is, we can only test that
# relative changes match.
utcoffset = timedelta(hours=-15, minutes=39) # arbitrary, but not zero
tz = FixedOffset(utcoffset, "tz", 0)
expected = utcdatetime + utcoffset
got = datetime.fromtimestamp(timestamp, tz)
self.assertEqual(expected, got.replace(tzinfo=None))
def test_tzinfo_utcnow(self):
meth = self.theclass.utcnow
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth()
# Try with and without naming the keyword; for whatever reason,
# utcnow() doesn't accept a tzinfo argument.
off42 = FixedOffset(42, "42")
self.assertRaises(TypeError, meth, off42)
self.assertRaises(TypeError, meth, tzinfo=off42)
def test_tzinfo_utcfromtimestamp(self):
import time
meth = self.theclass.utcfromtimestamp
ts = time.time()
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth(ts)
# Try with and without naming the keyword; for whatever reason,
# utcfromtimestamp() doesn't accept a tzinfo argument.
off42 = FixedOffset(42, "42")
self.assertRaises(TypeError, meth, ts, off42)
self.assertRaises(TypeError, meth, ts, tzinfo=off42)
def test_tzinfo_timetuple(self):
# TestDateTime tested most of this. datetime adds a twist to the
# DST flag.
class DST(tzinfo):
def __init__(self, dstvalue):
if isinstance(dstvalue, int):
dstvalue = timedelta(minutes=dstvalue)
self.dstvalue = dstvalue
def dst(self, dt):
return self.dstvalue
cls = self.theclass
for dstvalue, flag in (-33, 1), (33, 1), (0, 0), (None, -1):
d = cls(1, 1, 1, 10, 20, 30, 40, tzinfo=DST(dstvalue))
t = d.timetuple()
self.assertEqual(1, t.tm_year)
self.assertEqual(1, t.tm_mon)
self.assertEqual(1, t.tm_mday)
self.assertEqual(10, t.tm_hour)
self.assertEqual(20, t.tm_min)
self.assertEqual(30, t.tm_sec)
self.assertEqual(0, t.tm_wday)
self.assertEqual(1, t.tm_yday)
self.assertEqual(flag, t.tm_isdst)
# dst() returns wrong type.
self.assertRaises(TypeError, cls(1, 1, 1, tzinfo=DST("x")).timetuple)
# dst() at the edge.
self.assertEqual(cls(1,1,1, tzinfo=DST(1439)).timetuple().tm_isdst, 1)
self.assertEqual(cls(1,1,1, tzinfo=DST(-1439)).timetuple().tm_isdst, 1)
# dst() out of range.
self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(1440)).timetuple)
self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(-1440)).timetuple)
def test_utctimetuple(self):
class DST(tzinfo):
def __init__(self, dstvalue):
if isinstance(dstvalue, int):
dstvalue = timedelta(minutes=dstvalue)
self.dstvalue = dstvalue
def dst(self, dt):
return self.dstvalue
cls = self.theclass
# This can't work: DST didn't implement utcoffset.
self.assertRaises(NotImplementedError,
cls(1, 1, 1, tzinfo=DST(0)).utcoffset)
class UOFS(DST):
def __init__(self, uofs, dofs=None):
DST.__init__(self, dofs)
self.uofs = timedelta(minutes=uofs)
def utcoffset(self, dt):
return self.uofs
# Ensure tm_isdst is 0 regardless of what dst() says: DST is never
# in effect for a UTC time.
for dstvalue in -33, 33, 0, None:
d = cls(1, 2, 3, 10, 20, 30, 40, tzinfo=UOFS(-53, dstvalue))
t = d.utctimetuple()
self.assertEqual(d.year, t.tm_year)
self.assertEqual(d.month, t.tm_mon)
self.assertEqual(d.day, t.tm_mday)
self.assertEqual(11, t.tm_hour) # 20mm + 53mm = 1hn + 13mm
self.assertEqual(13, t.tm_min)
self.assertEqual(d.second, t.tm_sec)
self.assertEqual(d.weekday(), t.tm_wday)
self.assertEqual(d.toordinal() - date(1, 1, 1).toordinal() + 1,
t.tm_yday)
self.assertEqual(0, t.tm_isdst)
# At the edges, UTC adjustment can normalize into years out-of-range
# for a datetime object. Ensure that a correct timetuple is
# created anyway.
tiny = cls(MINYEAR, 1, 1, 0, 0, 37, tzinfo=UOFS(1439))
# That goes back 1 minute less than a full day.
t = tiny.utctimetuple()
self.assertEqual(t.tm_year, MINYEAR-1)
self.assertEqual(t.tm_mon, 12)
self.assertEqual(t.tm_mday, 31)
self.assertEqual(t.tm_hour, 0)
self.assertEqual(t.tm_min, 1)
self.assertEqual(t.tm_sec, 37)
self.assertEqual(t.tm_yday, 366) # "year 0" is a leap year
self.assertEqual(t.tm_isdst, 0)
huge = cls(MAXYEAR, 12, 31, 23, 59, 37, 999999, tzinfo=UOFS(-1439))
# That goes forward 1 minute less than a full day.
t = huge.utctimetuple()
self.assertEqual(t.tm_year, MAXYEAR+1)
self.assertEqual(t.tm_mon, 1)
self.assertEqual(t.tm_mday, 1)
self.assertEqual(t.tm_hour, 23)
self.assertEqual(t.tm_min, 58)
self.assertEqual(t.tm_sec, 37)
self.assertEqual(t.tm_yday, 1)
self.assertEqual(t.tm_isdst, 0)
def test_tzinfo_isoformat(self):
zero = FixedOffset(0, "+00:00")
plus = FixedOffset(220, "+03:40")
minus = FixedOffset(-231, "-03:51")
unknown = FixedOffset(None, "")
cls = self.theclass
datestr = '0001-02-03'
for ofs in None, zero, plus, minus, unknown:
for us in 0, 987001:
d = cls(1, 2, 3, 4, 5, 59, us, tzinfo=ofs)
timestr = '04:05:59' + (us and '.987001' or '')
ofsstr = ofs is not None and d.tzname() or ''
tailstr = timestr + ofsstr
iso = d.isoformat()
self.assertEqual(iso, datestr + 'T' + tailstr)
self.assertEqual(iso, d.isoformat('T'))
self.assertEqual(d.isoformat('k'), datestr + 'k' + tailstr)
self.assertEqual(str(d), datestr + ' ' + tailstr)
def test_replace(self):
cls = self.theclass
z100 = FixedOffset(100, "+100")
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, 5, 6, 7, z100]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4),
("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8),
("tzinfo", zm200)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.assertTrue(base2.tzinfo is None)
self.assertTrue(base2.tzname() is None)
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.assertTrue(base.tzinfo is base3.tzinfo)
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_more_astimezone(self):
# The inherited test_astimezone covered some trivial and error cases.
fnone = FixedOffset(None, "None")
f44m = FixedOffset(44, "44")
fm5h = FixedOffset(-timedelta(hours=5), "m300")
dt = self.theclass.now(tz=f44m)
self.assertTrue(dt.tzinfo is f44m)
# Replacing with degenerate tzinfo raises an exception.
self.assertRaises(ValueError, dt.astimezone, fnone)
# Ditto with None tz.
self.assertRaises(TypeError, dt.astimezone, None)
# Replacing with same tzinfo makes no change.
x = dt.astimezone(dt.tzinfo)
self.assertTrue(x.tzinfo is f44m)
self.assertEqual(x.date(), dt.date())
self.assertEqual(x.time(), dt.time())
# Replacing with different tzinfo does adjust.
got = dt.astimezone(fm5h)
self.assertTrue(got.tzinfo is fm5h)
self.assertEqual(got.utcoffset(), timedelta(hours=-5))
expected = dt - dt.utcoffset() # in effect, convert to UTC
expected += fm5h.utcoffset(dt) # and from there to local time
expected = expected.replace(tzinfo=fm5h) # and attach new tzinfo
self.assertEqual(got.date(), expected.date())
self.assertEqual(got.time(), expected.time())
self.assertEqual(got.timetz(), expected.timetz())
self.assertTrue(got.tzinfo is expected.tzinfo)
self.assertEqual(got, expected)
def test_aware_subtract(self):
cls = self.theclass
# Ensure that utcoffset() is ignored when the operands have the
# same tzinfo member.
class OperandDependentOffset(tzinfo):
def utcoffset(self, t):
if t.minute < 10:
# d0 and d1 equal after adjustment
return timedelta(minutes=t.minute)
else:
# d2 off in the weeds
return timedelta(minutes=59)
base = cls(8, 9, 10, 11, 12, 13, 14, tzinfo=OperandDependentOffset())
d0 = base.replace(minute=3)
d1 = base.replace(minute=9)
d2 = base.replace(minute=11)
for x in d0, d1, d2:
for y in d0, d1, d2:
got = x - y
expected = timedelta(minutes=x.minute - y.minute)
self.assertEqual(got, expected)
# OTOH, if the tzinfo members are distinct, utcoffsets aren't
# ignored.
base = cls(8, 9, 10, 11, 12, 13, 14)
d0 = base.replace(minute=3, tzinfo=OperandDependentOffset())
d1 = base.replace(minute=9, tzinfo=OperandDependentOffset())
d2 = base.replace(minute=11, tzinfo=OperandDependentOffset())
for x in d0, d1, d2:
for y in d0, d1, d2:
got = x - y
if (x is d0 or x is d1) and (y is d0 or y is d1):
expected = timedelta(0)
elif x is y is d2:
expected = timedelta(0)
elif x is d2:
expected = timedelta(minutes=(11-59)-0)
else:
assert y is d2
expected = timedelta(minutes=0-(11-59))
self.assertEqual(got, expected)
def test_mixed_compare(self):
t1 = datetime(1, 2, 3, 4, 5, 6, 7)
t2 = datetime(1, 2, 3, 4, 5, 6, 7)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=None)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(None, ""))
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(0, ""))
self.assertRaises(TypeError, lambda: t1 == t2)
# In datetime w/ identical tzinfo objects, utcoffset is ignored.
class Varies(tzinfo):
def __init__(self):
self.offset = timedelta(minutes=22)
def utcoffset(self, t):
self.offset += timedelta(minutes=1)
return self.offset
v = Varies()
t1 = t2.replace(tzinfo=v)
t2 = t2.replace(tzinfo=v)
self.assertEqual(t1.utcoffset(), timedelta(minutes=23))
self.assertEqual(t2.utcoffset(), timedelta(minutes=24))
self.assertEqual(t1, t2)
# But if they're not identical, it isn't ignored.
t2 = t2.replace(tzinfo=Varies())
self.assertTrue(t1 < t2) # t1's offset counter still going up
def test_subclass_datetimetz(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.year
args = 2002, 12, 31, 4, 5, 6, 500, FixedOffset(-300, "EST", 1)
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.utcoffset(), dt2.utcoffset())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.year - 7)
# Pain to set up DST-aware tzinfo classes.
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
DAY = timedelta(days=1)
# In the US, DST starts at 2am (standard time) on the first Sunday in April.
DSTSTART = datetime(1, 4, 1, 2)
# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct,
# which is the first Sunday on or after Oct 25. Because we view 1:MM as
# being standard time on that day, there is no spelling in local time of
# the last hour of DST (that's 1:MM DST, but 1:MM is taken as standard time).
DSTEND = datetime(1, 10, 25, 1)
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception instead may be sensible here, in one or more of
# the cases.
return ZERO
assert dt.tzinfo is self
# Find first Sunday in April.
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
assert start.weekday() == 6 and start.month == 4 and start.day <= 7
# Find last Sunday in October.
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
assert end.weekday() == 6 and end.month == 10 and end.day >= 25
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
utc_real = FixedOffset(0, "UTC", 0)
# For better test coverage, we want another flavor of UTC that's west of
# the Eastern and Pacific timezones.
utc_fake = FixedOffset(-12*60, "UTCfake", 0)
class TestTimezoneConversions(unittest.TestCase):
# The DST switch times for 2002, in std time.
dston = datetime(2002, 4, 7, 2)
dstoff = datetime(2002, 10, 27, 1)
theclass = datetime
# Check a time that's inside DST.
def checkinside(self, dt, tz, utc, dston, dstoff):
self.assertEqual(dt.dst(), HOUR)
# Conversion to our own timezone is always an identity.
self.assertEqual(dt.astimezone(tz), dt)
asutc = dt.astimezone(utc)
there_and_back = asutc.astimezone(tz)
# Conversion to UTC and back isn't always an identity here,
# because there are redundant spellings (in local time) of
# UTC time when DST begins: the clock jumps from 1:59:59
# to 3:00:00, and a local time of 2:MM:SS doesn't really
# make sense then. The classes above treat 2:MM:SS as
# daylight time then (it's "after 2am"), really an alias
# for 1:MM:SS standard time. The latter form is what
# conversion back from UTC produces.
if dt.date() == dston.date() and dt.hour == 2:
# We're in the redundant hour, and coming back from
# UTC gives the 1:MM:SS standard-time spelling.
self.assertEqual(there_and_back + HOUR, dt)
# Although during was considered to be in daylight
# time, there_and_back is not.
self.assertEqual(there_and_back.dst(), ZERO)
# They're the same times in UTC.
self.assertEqual(there_and_back.astimezone(utc),
dt.astimezone(utc))
else:
# We're not in the redundant hour.
self.assertEqual(dt, there_and_back)
# Because we have a redundant spelling when DST begins, there is
# (unfortunately) an hour when DST ends that can't be spelled at all in
# local time. When DST ends, the clock jumps from 1:59 back to 1:00
# again. The hour 1:MM DST has no spelling then: 1:MM is taken to be
# standard time. 1:MM DST == 0:MM EST, but 0:MM is taken to be
# daylight time. The hour 1:MM daylight == 0:MM standard can't be
# expressed in local time. Nevertheless, we want conversion back
# from UTC to mimic the local clock's "repeat an hour" behavior.
nexthour_utc = asutc + HOUR
nexthour_tz = nexthour_utc.astimezone(tz)
if dt.date() == dstoff.date() and dt.hour == 0:
# We're in the hour before the last DST hour. The last DST hour
# is ineffable. We want the conversion back to repeat 1:MM.
self.assertEqual(nexthour_tz, dt.replace(hour=1))
nexthour_utc += HOUR
nexthour_tz = nexthour_utc.astimezone(tz)
self.assertEqual(nexthour_tz, dt.replace(hour=1))
else:
self.assertEqual(nexthour_tz - dt, HOUR)
# Check a time that's outside DST.
def checkoutside(self, dt, tz, utc):
self.assertEqual(dt.dst(), ZERO)
# Conversion to our own timezone is always an identity.
self.assertEqual(dt.astimezone(tz), dt)
# Converting to UTC and back is an identity too.
asutc = dt.astimezone(utc)
there_and_back = asutc.astimezone(tz)
self.assertEqual(dt, there_and_back)
def convert_between_tz_and_utc(self, tz, utc):
dston = self.dston.replace(tzinfo=tz)
# Because 1:MM on the day DST ends is taken as being standard time,
# there is no spelling in tz for the last hour of daylight time.
# For purposes of the test, the last hour of DST is 0:MM, which is
# taken as being daylight time (and 1:MM is taken as being standard
# time).
dstoff = self.dstoff.replace(tzinfo=tz)
for delta in (timedelta(weeks=13),
DAY,
HOUR,
timedelta(minutes=1),
timedelta(microseconds=1)):
self.checkinside(dston, tz, utc, dston, dstoff)
for during in dston + delta, dstoff - delta:
self.checkinside(during, tz, utc, dston, dstoff)
self.checkoutside(dstoff, tz, utc)
for outside in dston - delta, dstoff + delta:
self.checkoutside(outside, tz, utc)
def test_easy(self):
# Despite the name of this test, the endcases are excruciating.
self.convert_between_tz_and_utc(Eastern, utc_real)
self.convert_between_tz_and_utc(Pacific, utc_real)
self.convert_between_tz_and_utc(Eastern, utc_fake)
self.convert_between_tz_and_utc(Pacific, utc_fake)
# The next is really dancing near the edge. It works because
# Pacific and Eastern are far enough apart that their "problem
# hours" don't overlap.
self.convert_between_tz_and_utc(Eastern, Pacific)
self.convert_between_tz_and_utc(Pacific, Eastern)
# OTOH, these fail! Don't enable them. The difficulty is that
# the edge case tests assume that every hour is representable in
# the "utc" class. This is always true for a fixed-offset tzinfo
# class (lke utc_real and utc_fake), but not for Eastern or Central.
# For these adjacent DST-aware time zones, the range of time offsets
# tested ends up creating hours in the one that aren't representable
# in the other. For the same reason, we would see failures in the
# Eastern vs Pacific tests too if we added 3*HOUR to the list of
# offset deltas in convert_between_tz_and_utc().
#
# self.convert_between_tz_and_utc(Eastern, Central) # can't work
# self.convert_between_tz_and_utc(Central, Eastern) # can't work
def test_tricky(self):
# 22:00 on day before daylight starts.
fourback = self.dston - timedelta(hours=4)
ninewest = FixedOffset(-9*60, "-0900", 0)
fourback = fourback.replace(tzinfo=ninewest)
# 22:00-0900 is 7:00 UTC == 2:00 EST == 3:00 DST. Since it's "after
# 2", we should get the 3 spelling.
# If we plug 22:00 the day before into Eastern, it "looks like std
# time", so its offset is returned as -5, and -5 - -9 = 4. Adding 4
# to 22:00 lands on 2:00, which makes no sense in local time (the
# local clock jumps from 1 to 3). The point here is to make sure we
# get the 3 spelling.
expected = self.dston.replace(hour=3)
got = fourback.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Similar, but map to 6:00 UTC == 1:00 EST == 2:00 DST. In that
# case we want the 1:00 spelling.
sixutc = self.dston.replace(hour=6, tzinfo=utc_real)
# Now 6:00 "looks like daylight", so the offset wrt Eastern is -4,
# and adding -4-0 == -4 gives the 2:00 spelling. We want the 1:00 EST
# spelling.
expected = self.dston.replace(hour=1)
got = sixutc.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Now on the day DST ends, we want "repeat an hour" behavior.
# UTC 4:MM 5:MM 6:MM 7:MM checking these
# EST 23:MM 0:MM 1:MM 2:MM
# EDT 0:MM 1:MM 2:MM 3:MM
# wall 0:MM 1:MM 1:MM 2:MM against these
for utc in utc_real, utc_fake:
for tz in Eastern, Pacific:
first_std_hour = self.dstoff - timedelta(hours=2) # 23:MM
# Convert that to UTC.
first_std_hour -= tz.utcoffset(None)
# Adjust for possibly fake UTC.
asutc = first_std_hour + utc.utcoffset(None)
# First UTC hour to convert; this is 4:00 when utc=utc_real &
# tz=Eastern.
asutcbase = asutc.replace(tzinfo=utc)
for tzhour in (0, 1, 1, 2):
expectedbase = self.dstoff.replace(hour=tzhour)
for minute in 0, 30, 59:
expected = expectedbase.replace(minute=minute)
asutc = asutcbase.replace(minute=minute)
astz = asutc.astimezone(tz)
self.assertEqual(astz.replace(tzinfo=None), expected)
asutcbase += HOUR
def test_bogus_dst(self):
class ok(tzinfo):
def utcoffset(self, dt): return HOUR
def dst(self, dt): return HOUR
now = self.theclass.now().replace(tzinfo=utc_real)
# Doesn't blow up.
now.astimezone(ok())
# Does blow up.
class notok(ok):
def dst(self, dt): return None
self.assertRaises(ValueError, now.astimezone, notok())
def test_fromutc(self):
self.assertRaises(TypeError, Eastern.fromutc) # not enough args
now = datetime.utcnow().replace(tzinfo=utc_real)
self.assertRaises(ValueError, Eastern.fromutc, now) # wrong tzinfo
now = now.replace(tzinfo=Eastern) # insert correct tzinfo
enow = Eastern.fromutc(now) # doesn't blow up
self.assertEqual(enow.tzinfo, Eastern) # has right tzinfo member
self.assertRaises(TypeError, Eastern.fromutc, now, now) # too many args
self.assertRaises(TypeError, Eastern.fromutc, date.today()) # wrong type
# Always converts UTC to standard time.
class FauxUSTimeZone(USTimeZone):
def fromutc(self, dt):
return dt + self.stdoffset
FEastern = FauxUSTimeZone(-5, "FEastern", "FEST", "FEDT")
# UTC 4:MM 5:MM 6:MM 7:MM 8:MM 9:MM
# EST 23:MM 0:MM 1:MM 2:MM 3:MM 4:MM
# EDT 0:MM 1:MM 2:MM 3:MM 4:MM 5:MM
# Check around DST start.
start = self.dston.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 23, 0, 1, 3, 4, 5:
expected = start.replace(hour=wall)
if wall == 23:
expected -= timedelta(days=1)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
# Check around DST end.
start = self.dstoff.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 0, 1, 1, 2, 3, 4:
expected = start.replace(hour=wall)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
#############################################################################
# oddballs
class Oddballs(unittest.TestCase):
def test_bug_1028306(self):
# Trying to compare a date to a datetime should act like a mixed-
# type comparison, despite that datetime is a subclass of date.
as_date = date.today()
as_datetime = datetime.combine(as_date, time())
self.assertTrue(as_date != as_datetime)
self.assertTrue(as_datetime != as_date)
self.assertTrue(not as_date == as_datetime)
self.assertTrue(not as_datetime == as_date)
self.assertRaises(TypeError, lambda: as_date < as_datetime)
self.assertRaises(TypeError, lambda: as_datetime < as_date)
self.assertRaises(TypeError, lambda: as_date <= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime <= as_date)
self.assertRaises(TypeError, lambda: as_date > as_datetime)
self.assertRaises(TypeError, lambda: as_datetime > as_date)
self.assertRaises(TypeError, lambda: as_date >= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime >= as_date)
# Neverthelss, comparison should work with the base-class (date)
# projection if use of a date method is forced.
self.assertTrue(as_date.__eq__(as_datetime))
different_day = (as_date.day + 1) % 20 + 1
self.assertTrue(not as_date.__eq__(as_datetime.replace(day=
different_day)))
# And date should compare with other subclasses of date. If a
# subclass wants to stop this, it's up to the subclass to do so.
date_sc = SubclassDate(as_date.year, as_date.month, as_date.day)
self.assertEqual(as_date, date_sc)
self.assertEqual(date_sc, as_date)
# Ditto for datetimes.
datetime_sc = SubclassDatetime(as_datetime.year, as_datetime.month,
as_date.day, 0, 0, 0)
self.assertEqual(as_datetime, datetime_sc)
self.assertEqual(datetime_sc, as_datetime)
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| apache-2.0 | -1,640,229,078,352,298,800 | 6,358,851,695,176,387,000 | 39.072171 | 91 | 0.566212 | false |
makinacorpus/django | tests/custom_managers/models.py | 9 | 2042 | """
23. Giving models a custom manager
You can use a custom ``Manager`` in a particular model by extending the base
``Manager`` class and instantiating your custom ``Manager`` in your model.
There are two reasons you might want to customize a ``Manager``: to add extra
``Manager`` methods, and/or to modify the initial ``QuerySet`` the ``Manager``
returns.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# An example of a custom manager called "objects".
class PersonManager(models.Manager):
def get_fun_people(self):
return self.filter(fun=True)
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
fun = models.BooleanField()
objects = PersonManager()
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
# An example of a custom manager that sets get_queryset().
class PublishedBookManager(models.Manager):
def get_queryset(self):
return super(PublishedBookManager, self).get_queryset().filter(is_published=True)
@python_2_unicode_compatible
class Book(models.Model):
title = models.CharField(max_length=50)
author = models.CharField(max_length=30)
is_published = models.BooleanField()
published_objects = PublishedBookManager()
authors = models.ManyToManyField(Person, related_name='books')
def __str__(self):
return self.title
# An example of providing multiple custom managers.
class FastCarManager(models.Manager):
def get_queryset(self):
return super(FastCarManager, self).get_queryset().filter(top_speed__gt=150)
@python_2_unicode_compatible
class Car(models.Model):
name = models.CharField(max_length=10)
mileage = models.IntegerField()
top_speed = models.IntegerField(help_text="In miles per hour.")
cars = models.Manager()
fast_cars = FastCarManager()
def __str__(self):
return self.name
| bsd-3-clause | 8,421,285,002,832,024,000 | 2,465,335,578,377,561,000 | 30.415385 | 89 | 0.713026 | false |
phobson/wqio | wqio/tests/test_datacollections.py | 2 | 28761 | from distutils.version import LooseVersion
from textwrap import dedent
from io import StringIO
import numpy
import scipy
from scipy import stats
import pandas
from unittest import mock
import pytest
import pandas.testing as pdtest
from wqio.tests import helpers
from wqio.features import Location, Dataset
from wqio.datacollections import DataCollection, _dist_compare
OLD_SCIPY = LooseVersion(scipy.version.version) < LooseVersion("0.19")
def check_stat(expected_csv, result, comp=False):
index_col = [0]
if comp:
index_col += [1]
file_obj = StringIO(dedent(expected_csv))
expected = pandas.read_csv(file_obj, header=[0, 1], index_col=index_col)
if comp:
expected = expected.stack(level=-1)
pdtest.assert_frame_equal(
expected.sort_index(axis="columns"),
result.sort_index(axis="columns").round(6),
atol=1e-5,
)
def remove_g_and_h(group):
return group.name[1] not in ["G", "H"]
@pytest.fixture
def dc():
df = helpers.make_dc_data_complex()
dc = DataCollection(
df,
rescol="res",
qualcol="qual",
stationcol="loc",
paramcol="param",
ndval="<",
othergroups=None,
pairgroups=["state", "bmp"],
useros=True,
filterfxn=remove_g_and_h,
bsiter=10000,
)
return dc
@pytest.fixture
def dc_noNDs():
df = helpers.make_dc_data_complex()
dc = DataCollection(
df,
rescol="res",
qualcol="qual",
stationcol="loc",
paramcol="param",
ndval="junk",
othergroups=None,
pairgroups=["state", "bmp"],
useros=True,
filterfxn=remove_g_and_h,
bsiter=10000,
)
return dc
def test_basic_attr(dc):
assert dc._raw_rescol == "res"
assert isinstance(dc.data, pandas.DataFrame)
assert dc.roscol == "ros_res"
assert dc.rescol == "ros_res"
assert dc.qualcol == "qual"
assert dc.stationcol == "loc"
assert dc.paramcol == "param"
assert dc.ndval == ["<"]
assert dc.bsiter == 10000
assert dc.groupcols == ["loc", "param"]
assert dc.tidy_columns == ["loc", "param", "res", "__censorship"]
assert hasattr(dc, "filterfxn")
def test_data(dc):
assert isinstance(dc.data, pandas.DataFrame)
assert dc.data.shape == (519, 8)
assert "G" in dc.data["param"].unique()
assert "H" in dc.data["param"].unique()
@pytest.mark.parametrize("useros", [True, False])
def test_tidy(dc, useros):
assert isinstance(dc.tidy, pandas.DataFrame)
assert dc.tidy.shape == (388, 5)
assert "G" not in dc.tidy["param"].unique()
assert "H" not in dc.tidy["param"].unique()
collist = ["loc", "param", "res", "__censorship", "ros_res"]
assert dc.tidy.columns.tolist() == collist
def test_paired(dc):
assert isinstance(dc.paired, pandas.DataFrame)
assert dc.paired.shape == (164, 6)
assert "G" not in dc.paired.index.get_level_values("param").unique()
assert "H" not in dc.paired.index.get_level_values("param").unique()
dc.paired.columns.tolist() == [
("res", "Inflow"),
("res", "Outflow"),
("res", "Reference"),
("__censorship", "Inflow"),
("__censorship", "Outflow"),
("__censorship", "Reference"),
]
def test_count(dc):
known_csv = """\
station,Inflow,Outflow,Reference
result,Count,Count,Count
param,,,
A,21,22,20
B,24,22,19
C,24,24,25
D,24,25,21
E,19,16,20
F,21,24,17
"""
check_stat(known_csv, dc.count)
def test_n_unique(dc):
known_csv = """\
loc,Inflow,Outflow,Reference
result,bmp,bmp,bmp
param,,,
A,7,7,7
B,7,7,7
C,7,7,7
D,7,7,7
E,7,7,7
F,7,7,7
G,7,7,7
H,7,7,7
"""
check_stat(known_csv, dc.n_unique("bmp"))
@helpers.seed
def test_median(dc):
known_csv = """\
station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference
result,lower,median,upper,lower,median,upper,lower,median,upper
param,,,,,,,,,
A,0.334506,1.197251,2.013994,0.860493,2.231058,2.626023,1.073386,1.639472,1.717293
B,1.366948,2.773989,3.297147,0.23201,1.546499,2.579206,0.204164,1.565076,2.196367
C,0.17351,0.525957,0.68024,0.247769,0.396984,0.540742,0.136462,0.412693,0.559458
D,0.374122,1.201892,2.098846,0.516989,1.362759,1.827087,0.314655,0.882695,1.24545
E,0.276095,1.070858,1.152887,0.287914,0.516746,1.456859,0.366824,0.80716,2.040739
F,0.05667,0.832488,1.310575,0.425237,1.510942,2.193997,0.162327,0.745993,1.992513
"""
check_stat(known_csv, dc.median)
@helpers.seed
def test_mean(dc):
known_csv = """\
station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference
result,lower,mean,upper,lower,mean,upper,lower,mean,upper
param,,,,,,,,,
A,1.231607,2.646682,4.204054,1.930601,5.249281,9.081952,1.540167,3.777974,6.389439
B,2.99031,7.647175,12.810844,1.545539,6.863835,12.705913,1.010374,4.504255,9.592572
C,0.37496,0.513248,0.65948,0.411501,1.004637,1.706317,0.35779,0.541962,0.734751
D,1.29141,3.021235,4.987855,1.285899,2.318808,3.451824,1.008364,1.945828,2.924812
E,0.818641,1.914696,3.049554,0.584826,1.098241,1.640807,1.113589,2.283292,3.581946
F,0.8379,9.825404,25.289933,1.497825,3.450184,5.61929,0.939917,2.491708,4.094258
"""
check_stat(known_csv, dc.mean)
@helpers.seed
def test_std_dev(dc):
known_csv = """\
station,Inflow,Outflow,Reference
result,std. dev.,std. dev.,std. dev.
param,,,
A,3.58649,8.719371,5.527633
B,12.360099,13.60243,10.759285
C,0.353755,1.691208,0.493325
D,4.811938,2.849393,2.248178
E,2.55038,1.096698,2.789238
F,34.447565,5.361033,3.398367
"""
check_stat(known_csv, dc.std_dev)
@helpers.seed
def test_percentile_25(dc):
known_csv = """\
station,Inflow,Outflow,Reference
result,pctl 25,pctl 25,pctl 25
param,,,
A,0.522601,0.906029,1.094721
B,1.472541,0.251126,0.314226
C,0.164015,0.267521,0.136462
D,0.35688,0.516989,0.383895
E,0.364748,0.311508,0.394658
F,0.120068,0.406132,0.224429
"""
check_stat(known_csv, dc.percentile(25))
@helpers.seed
def test_percentile_75(dc):
known_csv = """\
station,Inflow,Outflow,Reference
result,pctl 75,pctl 75,pctl 75
param,,,
A,2.563541,3.838021,2.650648
B,4.728871,2.849948,2.261847
C,0.776388,0.853535,0.792612
D,3.04268,2.79341,3.611793
E,1.532775,1.59183,3.201534
F,1.792985,2.80979,2.742249
"""
check_stat(known_csv, dc.percentile(75))
@helpers.seed
def test_logmean(dc):
known_csv = """\
station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference
result,Log-mean,lower,upper,Log-mean,lower,upper,Log-mean,lower,upper
param,,,,,,,,,
A,0.140559,-0.55112,0.644202,0.733004,0.047053,1.22099,0.545205,-0.057683,1.029948
B,1.026473,0.368659,1.541241,0.105106,-0.939789,0.860244,0.068638,-0.932357,0.661203
C,-0.963004,-1.304115,-0.638446,-0.83221,-1.464092,-0.414379,-1.088377,-1.556795,-0.720706
D,0.062317,-0.663241,0.58349,0.185757,-0.325074,0.598432,-0.063507,-0.670456,0.434214
E,-0.103655,-0.751075,0.385909,-0.456202,-1.08692,0.029967,-0.068135,-0.787007,0.51226
F,-0.442721,-1.874677,0.344704,0.211658,-0.504166,0.734283,-0.253352,-1.175917,0.467231
"""
check_stat(known_csv, dc.logmean)
@helpers.seed
def test_logstd_dev(dc):
known_csv = """\
station,Inflow,Outflow,Reference
result,Log-std. dev.,Log-std. dev.,Log-std. dev.
param,,,
A,1.374026,1.343662,1.225352
B,1.430381,2.07646,1.662001
C,0.818504,1.263631,1.057177
D,1.530871,1.187246,1.277927
E,1.264403,1.121038,1.474431
F,2.324063,1.516331,1.701596
"""
check_stat(known_csv, dc.logstd_dev)
@helpers.seed
def test_geomean(dc):
known_csv = """\
station,Inflow,Inflow,Inflow,Outflow,Outflow,Outflow,Reference,Reference,Reference
Geo-mean,Log-mean,lower,upper,Log-mean,lower,upper,Log-mean,lower,upper
param,,,,,,,,,
A,1.150917,0.576304,1.904467,2.081323,1.048178,3.390543,1.724962,0.943949,2.800919
B,2.791205,1.445795,4.670381,1.110829,0.39071,2.363737,1.071049,0.393625,1.937121
C,0.381744,0.271413,0.528113,0.435087,0.231288,0.66075,0.336763,0.210811,0.486409
D,1.064299,0.515179,1.792283,1.204129,0.722474,1.819264,0.938467,0.511475,1.543749
E,0.901536,0.471859,1.470951,0.633686,0.337254,1.03042,0.934134,0.455205,1.66906
F,0.642286,0.153405,1.411572,1.235726,0.604009,2.083988,0.776195,0.308536,1.595571
"""
check_stat(known_csv, dc.geomean)
@helpers.seed
def test_geostd_dev(dc):
known_csv = """\
station,Inflow,Outflow,Reference
Geo-std. dev.,Log-std. dev.,Log-std. dev.,Log-std. dev.
param,,,
A,3.951225,3.833055,3.405365
B,4.180294,7.976181,5.269843
C,2.267105,3.538244,2.878234
D,4.622199,3.278041,3.589191
E,3.540977,3.068036,4.368548
F,10.217099,4.55548,5.48269
"""
check_stat(known_csv, dc.geostd_dev)
@helpers.seed
def test_shapiro(dc):
known_csv = """\
station,Inflow,Inflow,Outflow,Outflow,Reference,Reference
result,pvalue,statistic,pvalue,statistic,pvalue,statistic
param,,,,,,
A,1.8e-05,0.685783,1e-06,0.576069,4e-06,0.61735
B,1e-06,0.594411,0.0,0.530962,0.0,0.41471
C,0.028774,0.905906,0.0,0.546626,0.00279,0.860373
D,1e-06,0.622915,1.5e-05,0.722374,0.000202,0.76518
E,1.7e-05,0.654137,0.004896,0.818813,0.000165,0.74917
F,0.0,0.292916,2e-06,0.634671,0.000167,0.713968
"""
check_stat(known_csv, dc.shapiro)
@helpers.seed
def test_shapiro_log(dc):
known_csv = """\
station,Inflow,Inflow,Outflow,Outflow,Reference,Reference
result,statistic,pvalue,statistic,pvalue,statistic,pvalue
param,,,,,,
A,0.983521938,0.96662426,0.979861856,0.913820148,0.939460814,0.234214202
B,0.957531095,0.390856266,0.97048676,0.722278714,0.967978418,0.735424638
C,0.906479359,0.029602444,0.974698305,0.78197974,0.967106879,0.572929323
D,0.989704251,0.995502174,0.990663111,0.997093379,0.964812279,0.617747009
E,0.955088913,0.479993254,0.95211035,0.523841977,0.963425279,0.61430341
F,0.97542423,0.847370088,0.982230783,0.933124721,0.966197193,0.749036908
"""
check_stat(known_csv, dc.shapiro_log)
@helpers.seed
def test_lilliefors(dc):
known_csv = """\
station,Inflow,Inflow,Outflow,Outflow,Reference,Reference
result,lilliefors,pvalue,lilliefors,pvalue,lilliefors,pvalue
param,,,,,,
A,0.308131,1.4e-05,0.340594,0.0,0.364453,0.0
B,0.36764,0.0,0.420343,0.0,0.417165,0.0
C,0.166799,0.082737,0.324733,0.0,0.161753,0.090455
D,0.273012,6.7e-05,0.240311,0.000665,0.296919,3.7e-05
E,0.341398,3e-06,0.239314,0.014862,0.233773,0.005474
F,0.419545,0.0,0.331315,0.0,0.284249,0.000741
"""
check_stat(known_csv, dc.lilliefors)
@helpers.seed
def test_lilliefors_log(dc):
known_csv = """\
station,Inflow,Inflow,Outflow,Outflow,Reference,Reference
result,log-lilliefors,pvalue,log-lilliefors,pvalue,log-lilliefors,pvalue
param,,,,,,
A,0.08548109,0.95458004,0.15443943,0.19715747,0.20141389,0.03268737
B,0.16162839,0.10505016,0.12447902,0.49697902,0.15934334,0.22969362
C,0.16957278,0.07248915,0.12388174,0.44379732,0.11746642,0.48915671
D,0.06885549,0.99,0.06067356,0.99,0.13401954,0.41967483
E,0.13506577,0.47186822,0.14552341,0.47797919,0.09164876,0.92860794
F,0.14420794,0.30694533,0.08463267,0.92741885,0.08586933,0.9800294
"""
check_stat(known_csv, dc.lilliefors_log)
@helpers.seed
def test_anderson_darling(dc):
with helpers.raises(NotImplementedError):
_ = dc.anderson_darling
@helpers.seed
def test_anderson_darling_log(dc):
with helpers.raises(NotImplementedError):
_ = dc.anderson_darling_log
@helpers.seed
def test_mann_whitney(dc):
known_csv = """\
,,mann_whitney,mann_whitney,mann_whitney,pvalue,pvalue,pvalue
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,180.0,179.0,,0.2198330905,0.4263216587
A,Outflow,282.0,,248.0,0.2198330905,,0.488580368
A,Reference,241.0,192.0,,0.4263216587,0.488580368,
B,Inflow,,345.0,317.0,,0.0766949991,0.0304383994
B,Outflow,183.0,,216.0,0.0766949991,,0.8650586835
B,Reference,139.0,202.0,,0.0304383994,0.8650586835,
C,Inflow,,282.0,323.0,,0.9097070273,0.6527104406
C,Outflow,294.0,,323.0,0.9097070273,,0.6527104406
C,Reference,277.0,277.0,,0.6527104406,0.6527104406,
D,Inflow,,285.0,263.0,,0.7718162376,0.8111960975
D,Outflow,315.0,,293.0,0.7718162376,,0.5082395211
D,Reference,241.0,232.0,,0.8111960975,0.5082395211,
E,Inflow,,164.0,188.0,,0.7033493939,0.9663820218
E,Outflow,140.0,,132.0,0.7033493939,,0.3813114322
E,Reference,192.0,188.0,,0.9663820218,0.3813114322,
F,Inflow,,201.0,172.0,,0.2505911218,0.8601783903
F,Outflow,303.0,,236.0,0.2505911218,,0.4045186043
F,Reference,185.0,172.0,,0.8601783903,0.4045186043
"""
check_stat(known_csv, dc.mann_whitney, comp=True)
@helpers.seed
def test_t_test(dc):
known_csv = """\
,,pvalue,pvalue,pvalue,t_test,t_test,t_test
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,0.2178424157,0.4563196599,,-1.2604458127,-0.7539785777
A,Outflow,0.2178424157,,0.5240147979,1.2604458127,,0.643450194
A,Reference,0.4563196599,0.5240147979,,0.7539785777,-0.643450194,
B,Inflow,,0.8430007638,0.3898358794,,0.1992705833,0.869235357
B,Outflow,0.8430007638,,0.5491097882,-0.1992705833,,0.6043850808
B,Reference,0.3898358794,0.5491097882,,-0.869235357,-0.6043850808,
C,Inflow,,0.1847386316,0.8191392537,,-1.3639360123,-0.2300373632
C,Outflow,0.1847386316,,0.2179907667,1.3639360123,,1.2615982727
C,Reference,0.8191392537,0.2179907667,,0.2300373632,-1.2615982727,
D,Inflow,,0.5484265023,0.344783812,,0.6056706932,0.9582600001
D,Outflow,0.5484265023,,0.6299742693,-0.6056706932,,0.4851636024
D,Reference,0.344783812,0.6299742693,,-0.9582600001,-0.4851636024,
E,Inflow,,0.2304569921,0.6770414622,,1.2287029977,-0.4198288251
E,Outflow,0.2304569921,,0.1023435465,-1.2287029977,,-1.6935358498
E,Reference,0.6770414622,0.1023435465,,0.4198288251,1.6935358498,
F,Inflow,,0.422008391,0.3549979666,,0.8190789273,0.9463539528
F,Outflow,0.422008391,,0.4988994144,-0.8190789273,,0.6826435968
F,Reference,0.3549979666,0.4988994144,,-0.9463539528,-0.6826435968
"""
check_stat(known_csv, dc.t_test, comp=True)
@helpers.seed
def test_levene(dc):
known_csv = """\
,,levene,levene,levene,pvalue,pvalue,pvalue
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,1.176282059,0.293152155,,0.284450688,0.591287419
A,Outflow,1.176282059,,0.397705309,0.284450688,,0.531863542
A,Reference,0.293152155,0.397705309,,0.591287419,0.531863542,
B,Inflow,,0.003559637,0.402002411,,0.952694449,0.529578712
B,Outflow,0.003559637,,0.408938588,0.952694449,,0.526247443
B,Reference,0.402002411,0.408938588,,0.529578712,0.526247443,
C,Inflow,,1.965613561,0.679535532,,0.167626459,0.413910674
C,Outflow,1.965613561,,1.462364363,0.167626459,,0.232602352
C,Reference,0.679535532,1.462364363,,0.413910674,0.232602352,
D,Inflow,,0.643364813,0.983777911,,0.426532092,0.32681669
D,Outflow,0.643364813,,0.116830634,0.426532092,,0.734124856
D,Reference,0.983777911,0.116830634,,0.32681669,0.734124856,
E,Inflow,,0.961616536,0.410491665,,0.333914902,0.525668596
E,Outflow,0.961616536,,2.726351564,0.333914902,,0.107912818
E,Reference,0.410491665,2.726351564,,0.525668596,0.107912818,
F,Inflow,,0.841984453,0.734809611,,0.363948105,0.396999375
F,Outflow,0.841984453,,0.25881357,0.363948105,,0.613802541
F,Reference,0.734809611,0.25881357,,0.396999375,0.613802541,
"""
check_stat(known_csv, dc.levene, comp=True)
@helpers.seed
def test_wilcoxon(dc):
known_csv = """\
,,wilcoxon,wilcoxon,wilcoxon,pvalue,pvalue,pvalue
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,32.0,59.0,,0.03479,0.430679
A,Outflow,32.0,,46.0,0.03479,,0.274445
A,Reference,59.0,46.0,,0.430679,0.274445,
B,Inflow,,38.0,22.0,,0.600179,0.182338
B,Outflow,38.0,,31.0,0.600179,,0.858863
B,Reference,22.0,31.0,,0.182338,0.858863,
C,Inflow,,75.0,120.0,,0.167807,0.601046
C,Outflow,75.0,,113.0,0.167807,,0.463381
C,Reference,120.0,113.0,,0.601046,0.463381,
D,Inflow,,44.0,31.0,,0.593618,0.530285
D,Outflow,44.0,,45.0,0.593618,,0.972125
D,Reference,31.0,45.0,,0.530285,0.972125,
E,Inflow,,21.0,19.0,,0.910156,0.386271
E,Outflow,21.0,,16.0,0.910156,,0.077148
E,Reference,19.0,16.0,,0.386271,0.077148,
F,Inflow,,62.0,22.0,,0.492459,0.952765
F,Outflow,62.0,,28.0,0.492459,,0.656642
F,Reference,22.0,28.0,,0.952765,0.656642,
"""
with pytest.warns(UserWarning):
check_stat(known_csv, dc.wilcoxon, comp=True)
@helpers.seed
def test_ranksums(dc):
known_csv = """\
,,pvalue,pvalue,pvalue,rank_sums,rank_sums,rank_sums
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,0.2153009,0.4187782,,-1.2391203,-0.8085428
A,Outflow,0.2153009,,0.4807102,1.2391203,,0.7051607
A,Reference,0.4187782,0.4807102,,0.8085428,-0.7051607,
B,Inflow,,0.0748817,0.029513,,1.781188,2.1765661
B,Outflow,0.0748817,,0.8547898,-1.781188,,0.1830104
B,Reference,0.029513,0.8547898,,-2.1765661,-0.1830104,
C,Inflow,,0.9015386,0.6455162,,-0.1237179,0.46
C,Outflow,0.9015386,,0.6455162,0.1237179,,0.46
C,Reference,0.6455162,0.6455162,,-0.46,-0.46,
D,Inflow,,0.7641772,0.8023873,,-0.3,0.2502587
D,Outflow,0.7641772,,0.5011969,0.3,,0.6726078
D,Reference,0.8023873,0.5011969,,-0.2502587,-0.6726078,
E,Inflow,,0.6911022,0.9551863,,0.3973597,-0.0561951
E,Outflow,0.6911022,,0.3727144,-0.3973597,,-0.8914004
E,Reference,0.9551863,0.3727144,,0.0561951,0.8914004,
F,Inflow,,0.2459307,0.8486619,,-1.1602902,-0.190826
F,Outflow,0.2459307,,0.3971011,1.1602902,,0.8468098
F,Reference,0.8486619,0.3971011,,0.190826,-0.8468098,
"""
check_stat(known_csv, dc.ranksums, comp=True)
@helpers.seed
@pytest.mark.xfail(OLD_SCIPY, reason="Scipy < 0.19")
def test_kendall(dc):
known_csv = """\
,,kendalltau,kendalltau,kendalltau,pvalue,pvalue,pvalue
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,-0.051661,-0.00738,,0.772893,0.967114
A,Outflow,-0.051661,,-0.083333,0.772893,,0.690095
A,Reference,-0.00738,-0.083333,,0.967114,0.690095,
B,Inflow,,0.441351,0.298246,,0.015267,0.119265
B,Outflow,0.441351,,0.559855,0.015267,,0.004202
B,Reference,0.298246,0.559855,,0.119265,0.004202,
C,Inflow,,0.280223,0.084006,,0.078682,0.578003
C,Outflow,0.280223,,-0.1417,0.078682,,0.352394
C,Reference,0.084006,-0.1417,,0.578003,0.352394,
D,Inflow,,0.403469,0.095299,,0.020143,0.634826
D,Outflow,0.403469,,0.318337,0.020143,,0.094723
D,Reference,0.095299,0.318337,,0.634826,0.094723,
E,Inflow,,0.114286,0.640703,,0.673337,0.004476
E,Outflow,0.114286,,0.167944,0.673337,,0.449603
E,Reference,0.640703,0.167944,,0.004476,0.449603,
F,Inflow,,0.0,0.07231,,1.0,0.763851
F,Outflow,0.0,,0.388889,1.0,,0.063
F,Reference,0.07231,0.388889,,0.763851,0.063,
"""
check_stat(known_csv, dc.kendall, comp=True)
@helpers.seed
def test_spearman(dc):
known_csv = """\
,,pvalue,pvalue,pvalue,spearmanrho,spearmanrho,spearmanrho
loc_2,,Inflow,Outflow,Reference,Inflow,Outflow,Reference
param,loc_1,,,,,,
A,Inflow,,0.7574884491,0.9627447553,,-0.0809319588,0.012262418
A,Outflow,0.7574884491,,0.7617330788,-0.0809319588,,-0.0823529412
A,Reference,0.9627447553,0.7617330788,,0.012262418,-0.0823529412,
B,Inflow,,0.0110829791,0.0775159774,,0.5831305575,0.4537313433
B,Outflow,0.0110829791,,0.0024069317,0.5831305575,,0.6850916941
B,Reference,0.0775159774,0.0024069317,,0.4537313433,0.6850916941,
C,Inflow,,0.1330504059,0.6063501968,,0.3387640122,0.1134228342
C,Outflow,0.1330504059,,0.3431640379,0.3387640122,,-0.2070506455
C,Reference,0.6063501968,0.3431640379,,0.1134228342,-0.2070506455,
D,Inflow,,0.0195715066,0.4751861062,,0.4935814032,0.1858231711
D,Outflow,0.0195715066,,0.1263974782,0.4935814032,,0.363209462
D,Reference,0.4751861062,0.1263974782,,0.1858231711,0.363209462,
E,Inflow,,0.9828818202,0.0013596162,,0.0084033613,0.8112988341
E,Outflow,0.9828818202,,0.3413722947,0.0084033613,,0.3012263814
E,Reference,0.0013596162,0.3413722947,,0.8112988341,0.3012263814,
F,Inflow,,0.9645303744,0.6759971848,,-0.0106277141,0.1348767061
F,Outflow,0.9645303744,,0.0560590794,-0.0106277141,,0.5028571429
F,Reference,0.6759971848,0.0560590794,,0.1348767061,0.5028571429
"""
check_stat(known_csv, dc.spearman, comp=True)
@helpers.seed
def test_theilslopes(dc):
with helpers.raises(NotImplementedError):
_ = dc.theilslopes
def test_inventory(dc):
known_csv = StringIO(
dedent(
"""\
loc,param,Count,Non-Detect
Inflow,A,21,3
Inflow,B,24,6
Inflow,C,24,0
Inflow,D,24,11
Inflow,E,19,4
Inflow,F,21,8
Outflow,A,22,1
Outflow,B,22,9
Outflow,C,24,4
Outflow,D,25,12
Outflow,E,16,2
Outflow,F,24,8
Reference,A,20,2
Reference,B,19,6
Reference,C,25,4
Reference,D,21,12
Reference,E,20,3
Reference,F,17,7
"""
)
)
expected = pandas.read_csv(known_csv, index_col=[0, 1]).astype(int)
pdtest.assert_frame_equal(expected, dc.inventory.astype(int), check_names=False)
def test_inventory_noNDs(dc_noNDs):
known_csv = StringIO(
dedent(
"""\
loc,param,Count,Non-Detect
Inflow,A,21,0
Inflow,B,24,0
Inflow,C,24,0
Inflow,D,24,0
Inflow,E,19,0
Inflow,F,21,0
Outflow,A,22,0
Outflow,B,22,0
Outflow,C,24,0
Outflow,D,25,0
Outflow,E,16,0
Outflow,F,24,0
Reference,A,20,0
Reference,B,19,0
Reference,C,25,0
Reference,D,21,0
Reference,E,20,0
Reference,F,17,0
"""
)
)
expected = pandas.read_csv(known_csv, index_col=[0, 1]).astype(int)
pdtest.assert_frame_equal(
expected, dc_noNDs.inventory.astype(int), check_names=False,
)
@helpers.seed
def test_stat_summary(dc):
known_csv = StringIO(
dedent(
"""\
ros_res,loc,A,B,C,D,E,F
Count,Inflow,21,24,24,24,19,21
Count,Outflow,22,22,24,25,16,24
Count,Reference,20,19,25,21,20,17
Non-Detect,Inflow,3.0,6.0,0.0,11.0,4.0,8.0
Non-Detect,Outflow,1.0,9.0,4.0,12.0,2.0,8.0
Non-Detect,Reference,2.0,6.0,4.0,12.0,3.0,7.0
mean,Inflow,2.64668,7.64717,0.51325,3.02124,1.9147,9.8254
mean,Outflow,5.24928,6.86384,1.00464,2.31881,1.09824,3.45018
mean,Reference,3.77797,4.50425,0.54196,1.94583,2.28329,2.49171
std,Inflow,3.67506,12.62594,0.36136,4.91543,2.62027,35.29825
std,Outflow,8.92456,13.92253,1.72758,2.90815,1.13267,5.47634
std,Reference,5.67123,11.05411,0.5035,2.3037,2.8617,3.50296
min,Inflow,0.0756,0.17404,0.10213,0.05365,0.08312,0.00803
min,Outflow,0.11177,0.02106,0.03578,0.11678,0.07425,0.06377
min,Reference,0.15575,0.04909,0.04046,0.08437,0.05237,0.03445
10%,Inflow,0.1772,0.45233,0.13467,0.15495,0.1763,0.03548
10%,Outflow,0.44852,0.08297,0.08222,0.26949,0.19903,0.18008
10%,Reference,0.38448,0.13467,0.08241,0.19355,0.12777,0.09457
25%,Inflow,0.5226,1.47254,0.16401,0.35688,0.36475,0.12007
25%,Outflow,0.90603,0.25113,0.26752,0.51699,0.31151,0.40613
25%,Reference,1.09472,0.31423,0.13646,0.3839,0.39466,0.22443
50%,Inflow,1.19725,2.77399,0.52596,1.20189,1.07086,0.83249
50%,Outflow,2.23106,1.5465,0.39698,1.36276,0.51675,1.51094
50%,Reference,1.63947,1.56508,0.41269,0.8827,0.80716,0.74599
75%,Inflow,2.56354,4.72887,0.77639,3.04268,1.53278,1.79299
75%,Outflow,3.83802,2.84995,0.85354,2.79341,1.59183,2.80979
75%,Reference,2.65065,2.26185,0.79261,3.61179,3.20153,2.74225
90%,Inflow,6.02835,24.40655,0.99293,8.00691,6.28345,8.51706
90%,Outflow,12.43052,23.90022,2.43829,5.66731,2.30348,10.32829
90%,Reference,12.58278,6.67125,1.2205,4.78255,7.72012,8.57303
max,Inflow,13.87664,45.97893,1.26657,21.75505,8.88365,163.01001
max,Outflow,36.58941,47.49381,8.04948,12.39894,4.19118,23.29367
max,Reference,21.22363,48.23615,1.94442,7.67751,8.75609,10.5095
"""
)
)
expected = pandas.read_csv(known_csv, index_col=[0, 1]).T
pdtest.assert_frame_equal(
expected.round(5),
dc.stat_summary().round(5),
check_names=False,
check_dtype=False,
rtol=1e-4,
)
def test_locations(dc):
for loc in dc.locations:
assert isinstance(loc, Location)
assert len(dc.locations) == 18
assert dc.locations[0].definition == {"loc": "Inflow", "param": "A"}
assert dc.locations[1].definition == {"loc": "Inflow", "param": "B"}
def test_datasets(dc):
_ds = []
for d in dc.datasets("Inflow", "Outflow"):
assert isinstance(d, Dataset)
_ds.append(d)
assert len(_ds) == 6
assert _ds[0].definition == {"param": "A"}
assert _ds[1].definition == {"param": "B"}
# this sufficiently tests dc._filter_collection
def test_selectLocations(dc):
locs = dc.selectLocations(param="A", loc=["Inflow", "Outflow"])
assert len(locs) == 2
for n, (loc, loctype) in enumerate(zip(locs, ["Inflow", "Outflow"])):
assert isinstance(loc, Location)
assert loc.definition["param"] == "A"
assert loc.definition["loc"] == loctype
def test_selectLocations_squeeze_False(dc):
locs = dc.selectLocations(param="A", loc=["Inflow"], squeeze=False)
assert len(locs) == 1
for n, loc in enumerate(locs):
assert isinstance(loc, Location)
assert loc.definition["param"] == "A"
assert loc.definition["loc"] == "Inflow"
def test_selectLocations_squeeze_True(dc):
loc = dc.selectLocations(param="A", loc=["Inflow"], squeeze=True)
assert isinstance(loc, Location)
assert loc.definition["param"] == "A"
assert loc.definition["loc"] == "Inflow"
def test_selectLocations_squeeze_True_None(dc):
loc = dc.selectLocations(param="A", loc=["Junk"], squeeze=True)
assert loc is None
# since the test_selectLocations* tests stress _filter_collection
# enough, we'll mock it out for datasets:
def test_selectDatasets(dc):
with mock.patch.object(dc, "_filter_collection") as _fc:
with mock.patch.object(dc, "datasets", return_value=["A", "B"]) as _ds:
dc.selectDatasets("Inflow", "Reference", foo="A", bar="C")
_ds.assert_called_once_with("Inflow", "Reference")
_fc.assert_called_once_with(["A", "B"], foo="A", bar="C", squeeze=False)
@pytest.mark.parametrize("func", [stats.mannwhitneyu, stats.wilcoxon])
@pytest.mark.parametrize(
("x", "all_same"), [([5, 5, 5, 5, 5], True), ([5, 6, 7, 7, 8], False)]
)
def test_dist_compare_wrapper(x, all_same, func):
y = [5, 5, 5, 5, 5]
with mock.patch.object(stats, func.__name__) as _test:
result = _dist_compare(x, y, _test)
if all_same:
assert numpy.isnan(result.stat)
assert numpy.isnan(result.pvalue)
assert _test.call_count == 0
else:
# assert result == (0, 0)
_test.assert_called_once_with(x, y, alternative="two-sided")
| bsd-3-clause | 8,349,028,156,294,009,000 | -3,173,278,006,722,406,000 | 36.110968 | 98 | 0.643441 | false |
radicalbit/ambari | ambari-server/src/main/resources/common-services/STORM/1.0.1.3.0/package/scripts/nimbus.py | 1 | 3638 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management.libraries.functions import check_process_status
from resource_management.libraries.script import Script
from resource_management.libraries.functions import format
from resource_management.libraries.functions import stack_select
from resource_management.core.resources.system import Execute
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import StackFeature
from storm import storm
from service import service
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_JAAS_CONF
from setup_ranger_storm import setup_ranger_storm
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
from resource_management.core.resources.service import Service
class Nimbus(Script):
def install(self, env):
self.install_packages(env)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
storm("nimbus")
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class NimbusDefault(Nimbus):
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
stack_select.select_packages(params.version)
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env)
setup_ranger_storm(upgrade_type=upgrade_type)
service("nimbus", action="start")
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
service("nimbus", action="stop")
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.pid_nimbus)
def get_log_folder(self):
import params
return params.log_dir
def get_user(self):
import params
return params.storm_user
def get_pid_files(self):
import status_params
return [status_params.pid_nimbus]
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class NimbusWindows(Nimbus):
def start(self, env):
import status_params
env.set_params(status_params)
Service(status_params.nimbus_win_service_name, action="start")
def stop(self, env):
import status_params
env.set_params(status_params)
Service(status_params.nimbus_win_service_name, action="stop")
def status(self, env):
import status_params
from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
env.set_params(status_params)
check_windows_service_status(status_params.nimbus_win_service_name)
if __name__ == "__main__":
Nimbus().execute()
| apache-2.0 | -1,992,194,608,170,381,600 | -8,405,352,722,690,119,000 | 32.072727 | 106 | 0.765531 | false |
emilk/sproxel | distro/common/lib/lib-tk/test/test_tkinter/test_text.py | 7 | 1172 | import unittest
import Tkinter
from test.test_support import requires, run_unittest
from ttk import setup_master
requires('gui')
class TextTest(unittest.TestCase):
def setUp(self):
self.root = setup_master()
self.text = Tkinter.Text(self.root)
def tearDown(self):
self.text.destroy()
def test_search(self):
text = self.text
# pattern and index are obligatory arguments.
self.assertRaises(Tkinter.TclError, text.search, None, '1.0')
self.assertRaises(Tkinter.TclError, text.search, 'a', None)
self.assertRaises(Tkinter.TclError, text.search, None, None)
# Invalid text index.
self.assertRaises(Tkinter.TclError, text.search, '', 0)
# Check if we are getting the indices as strings -- you are likely
# to get Tcl_Obj under Tk 8.5 if Tkinter doesn't convert it.
text.insert('1.0', 'hi-test')
self.assertEqual(text.search('-test', '1.0', 'end'), '1.2')
self.assertEqual(text.search('test', '1.0', 'end'), '1.3')
tests_gui = (TextTest, )
if __name__ == "__main__":
run_unittest(*tests_gui)
| bsd-3-clause | -5,283,198,189,824,871,000 | 6,800,869,829,391,916,000 | 28.051282 | 74 | 0.612628 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.