repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
vizual54/MissionPlanner
|
Lib/site-packages/scipy/cluster/tests/test_hierarchy.py
|
51
|
56990
|
#!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os.path
import numpy as np
from numpy.testing import TestCase, run_module_suite
from scipy.cluster.hierarchy import linkage, from_mlab_linkage, to_mlab_linkage,\
num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster, \
is_isomorphic, single, complete, weighted, centroid, leaders, \
correspond, is_monotonic, maxdists, maxinconsts, maxRstat, \
is_valid_linkage, is_valid_im, to_tree, leaves_list
from scipy.spatial.distance import squareform, pdist
_tdist = np.array([[0, 662, 877, 255, 412, 996],
[662, 0, 295, 468, 268, 400],
[877, 295, 0, 754, 564, 138],
[255, 468, 754, 0, 219, 869],
[412, 268, 564, 219, 0, 669],
[996, 400, 138, 869, 669, 0 ]], dtype='double')
_ytdist = squareform(_tdist)
eo = {}
_filenames = ["iris.txt",
"Q-X.txt",
"fclusterdata-maxclusts-2.txt",
"fclusterdata-maxclusts-3.txt",
"fclusterdata-maxclusts-4.txt",
"linkage-single-tdist.txt",
"linkage-complete-tdist.txt",
"linkage-average-tdist.txt",
"linkage-weighted-tdist.txt",
"inconsistent-Q-single-1.txt",
"inconsistent-Q-single-2.txt",
"inconsistent-Q-single-3.txt",
"inconsistent-Q-single-4.txt",
"inconsistent-Q-single-5.txt",
"inconsistent-Q-single-6.txt",
"inconsistent-complete-tdist-depth-1.txt",
"inconsistent-complete-tdist-depth-2.txt",
"inconsistent-complete-tdist-depth-3.txt",
"inconsistent-complete-tdist-depth-4.txt",
"inconsistent-single-tdist-depth-0.txt",
"inconsistent-single-tdist-depth-1.txt",
"inconsistent-single-tdist-depth-2.txt",
"inconsistent-single-tdist-depth-3.txt",
"inconsistent-single-tdist-depth-4.txt",
"inconsistent-single-tdist-depth-5.txt",
"inconsistent-single-tdist.txt",
"inconsistent-weighted-tdist-depth-1.txt",
"inconsistent-weighted-tdist-depth-2.txt",
"inconsistent-weighted-tdist-depth-3.txt",
"inconsistent-weighted-tdist-depth-4.txt",
"linkage-Q-average.txt",
"linkage-Q-complete.txt",
"linkage-Q-single.txt",
"linkage-Q-weighted.txt",
"linkage-Q-centroid.txt",
"linkage-Q-median.txt",
"linkage-Q-ward.txt"
]
def load_testing_files():
for fn in _filenames:
name = fn.replace(".txt", "").replace("-ml", "")
fqfn = os.path.join(os.path.dirname(__file__), fn)
eo[name] = np.loadtxt(open(fqfn))
#print "%s: %s %s" % (name, str(eo[name].shape), str(eo[name].dtype))
#eo['pdist-boolean-inp'] = np.bool_(eo['pdist-boolean-inp'])
load_testing_files()
class TestLinkage(TestCase):
def test_linkage_empty_distance_matrix(self):
"Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected."
y = np.zeros((0,))
self.assertRaises(ValueError, linkage, y)
################### linkage
def test_linkage_single_tdist(self):
"Tests linkage(Y, 'single') on the tdist data set."
Z = linkage(_ytdist, 'single')
Zmlab = eo['linkage-single-tdist']
eps = 1e-10
expectedZ = from_mlab_linkage(Zmlab)
self.assertTrue(within_tol(Z, expectedZ, eps))
def test_linkage_complete_tdist(self):
"Tests linkage(Y, 'complete') on the tdist data set."
Z = linkage(_ytdist, 'complete')
Zmlab = eo['linkage-complete-tdist']
eps = 1e-10
expectedZ = from_mlab_linkage(Zmlab)
self.assertTrue(within_tol(Z, expectedZ, eps))
def test_linkage_average_tdist(self):
"Tests linkage(Y, 'average') on the tdist data set."
Z = linkage(_ytdist, 'average')
Zmlab = eo['linkage-average-tdist']
eps = 1e-05
expectedZ = from_mlab_linkage(Zmlab)
#print Z, expectedZ, np.abs(Z - expectedZ).max()
self.assertTrue(within_tol(Z, expectedZ, eps))
def test_linkage_weighted_tdist(self):
"Tests linkage(Y, 'weighted') on the tdist data set."
Z = linkage(_ytdist, 'weighted')
Zmlab = eo['linkage-weighted-tdist']
eps = 1e-10
expectedZ = from_mlab_linkage(Zmlab)
#print Z, expectedZ, np.abs(Z - expectedZ).max()
self.assertTrue(within_tol(Z, expectedZ, eps))
################### linkage on Q
def test_linkage_single_q(self):
"Tests linkage(Y, 'single') on the Q data set."
X = eo['Q-X']
Z = single(X)
Zmlab = eo['linkage-Q-single']
eps = 1e-06
expectedZ = from_mlab_linkage(Zmlab)
#print abs(Z-expectedZ).max()
self.assertTrue(within_tol(Z, expectedZ, eps))
def test_linkage_complete_q(self):
"Tests linkage(Y, 'complete') on the Q data set."
X = eo['Q-X']
Z = complete(X)
Zmlab = eo['linkage-Q-complete']
eps = 1e-07
expectedZ = from_mlab_linkage(Zmlab)
#print abs(Z-expectedZ).max()
self.assertTrue(within_tol(Z, expectedZ, eps))
def test_linkage_centroid_q(self):
"Tests linkage(Y, 'centroid') on the Q data set."
X = eo['Q-X']
Z = centroid(X)
Zmlab = eo['linkage-Q-centroid']
eps = 1e-07
expectedZ = from_mlab_linkage(Zmlab)
#print abs(Z-expectedZ).max()
self.assertTrue(within_tol(Z, expectedZ, eps))
def test_linkage_weighted_q(self):
"Tests linkage(Y, 'weighted') on the Q data set."
X = eo['Q-X']
Z = weighted(X)
Zmlab = eo['linkage-Q-weighted']
eps = 1e-07
expectedZ = from_mlab_linkage(Zmlab)
#print abs(Z-expectedZ).max()
self.assertTrue(within_tol(Z, expectedZ, eps))
class TestInconsistent(TestCase):
def test_single_inconsistent_tdist_1(self):
"Tests inconsistency matrix calculation (depth=1) on a single linkage."
Y = squareform(_tdist)
Z = linkage(Y, 'single')
R = inconsistent(Z, 1)
Rright = eo['inconsistent-single-tdist-depth-1']
eps = 1e-15
#print np.abs(R - Rright).max()
self.assertTrue(within_tol(R, Rright, eps))
def test_single_inconsistent_tdist_2(self):
"Tests inconsistency matrix calculation (depth=2) on a single linkage."
Y = squareform(_tdist)
Z = linkage(Y, 'single')
R = inconsistent(Z, 2)
Rright = eo['inconsistent-single-tdist-depth-2']
eps = 1e-05
#print np.abs(R - Rright).max()
self.assertTrue(within_tol(R, Rright, eps))
def test_single_inconsistent_tdist_3(self):
"Tests inconsistency matrix calculation (depth=3) on a single linkage."
Y = squareform(_tdist)
Z = linkage(Y, 'single')
R = inconsistent(Z, 3)
Rright = eo['inconsistent-single-tdist-depth-3']
eps = 1e-05
#print np.abs(R - Rright).max()
self.assertTrue(within_tol(R, Rright, eps))
def test_single_inconsistent_tdist_4(self):
"Tests inconsistency matrix calculation (depth=4) on a single linkage."
Y = squareform(_tdist)
Z = linkage(Y, 'single')
R = inconsistent(Z, 4)
Rright = eo['inconsistent-single-tdist-depth-4']
eps = 1e-05
#print np.abs(R - Rright).max()
self.assertTrue(within_tol(R, Rright, eps))
# with complete linkage...
def test_complete_inconsistent_tdist_1(self):
"Tests inconsistency matrix calculation (depth=1) on a complete linkage."
Y = squareform(_tdist)
Z = linkage(Y, 'complete')
R = inconsistent(Z, 1)
Rright = eo['inconsistent-complete-tdist-depth-1']
eps = 1e-15
#print np.abs(R - Rright).max()
self.assertTrue(within_tol(R, Rright, eps))
def test_complete_inconsistent_tdist_2(self):
"Tests inconsistency matrix calculation (depth=2) on a complete linkage."
Y = squareform(_tdist)
Z = linkage(Y, 'complete')
R = inconsistent(Z, 2)
Rright = eo['inconsistent-complete-tdist-depth-2']
eps = 1e-05
#print np.abs(R - Rright).max()
self.assertTrue(within_tol(R, Rright, eps))
def test_complete_inconsistent_tdist_3(self):
"Tests inconsistency matrix calculation (depth=3) on a complete linkage."
Y = squareform(_tdist)
Z = linkage(Y, 'complete')
R = inconsistent(Z, 3)
Rright = eo['inconsistent-complete-tdist-depth-3']
eps = 1e-05
#print np.abs(R - Rright).max()
self.assertTrue(within_tol(R, Rright, eps))
def test_complete_inconsistent_tdist_4(self):
"Tests inconsistency matrix calculation (depth=4) on a complete linkage."
Y = squareform(_tdist)
Z = linkage(Y, 'complete')
R = inconsistent(Z, 4)
Rright = eo['inconsistent-complete-tdist-depth-4']
eps = 1e-05
#print np.abs(R - Rright).max()
self.assertTrue(within_tol(R, Rright, eps))
# with single linkage and Q data set
def test_single_inconsistent_Q_1(self):
"Tests inconsistency matrix calculation (depth=1, dataset=Q) with single linkage."
X = eo['Q-X']
Z = linkage(X, 'single', 'euclidean')
R = inconsistent(Z, 1)
Rright = eo['inconsistent-Q-single-1']
eps = 1e-06
#print np.abs(R - Rright).max()
self.assertTrue(within_tol(R, Rright, eps))
def test_single_inconsistent_Q_2(self):
"Tests inconsistency matrix calculation (depth=2, dataset=Q) with single linkage."
X = eo['Q-X']
Z = linkage(X, 'single', 'euclidean')
R = inconsistent(Z, 2)
Rright = eo['inconsistent-Q-single-2']
eps = 1e-06
#print np.abs(R - Rright).max()
self.assertTrue(within_tol(R, Rright, eps))
def test_single_inconsistent_Q_3(self):
"Tests inconsistency matrix calculation (depth=3, dataset=Q) with single linkage."
X = eo['Q-X']
Z = linkage(X, 'single', 'euclidean')
R = inconsistent(Z, 3)
Rright = eo['inconsistent-Q-single-3']
eps = 1e-05
#print np.abs(R - Rright).max()
self.assertTrue(within_tol(R, Rright, eps))
def test_single_inconsistent_Q_4(self):
"Tests inconsistency matrix calculation (depth=4, dataset=Q) with single linkage."
X = eo['Q-X']
Z = linkage(X, 'single', 'euclidean')
R = inconsistent(Z, 4)
Rright = eo['inconsistent-Q-single-4']
eps = 1e-05
#print np.abs(R - Rright).max()
self.assertTrue(within_tol(R, Rright, eps))
class TestCopheneticDistance(TestCase):
def test_linkage_cophenet_tdist_Z(self):
"Tests cophenet(Z) on tdist data set."
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, 295, 138, 219, 295, 295]);
Z = linkage(_ytdist, 'single')
M = cophenet(Z)
eps = 1e-10
self.assertTrue(within_tol(M, expectedM, eps))
def test_linkage_cophenet_tdist_Z_Y(self):
"Tests cophenet(Z, Y) on tdist data set."
Z = linkage(_ytdist, 'single')
(c, M) = cophenet(Z, _ytdist)
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295, 295, 138, 219, 295, 295]);
expectedc = 0.639931296433393415057366837573
eps = 1e-10
self.assertTrue(np.abs(c - expectedc) <= eps)
self.assertTrue(within_tol(M, expectedM, eps))
class TestFromMLabLinkage(TestCase):
def test_from_mlab_linkage_empty(self):
"Tests from_mlab_linkage on empty linkage array."
X = np.asarray([])
R = from_mlab_linkage([])
self.assertTrue((R == X).all())
def test_from_mlab_linkage_single_row(self):
"Tests from_mlab_linkage on linkage array with single row."
expectedZP = np.asarray([[ 0., 1., 3., 2.]])
Z = [[1,2,3]]
ZP = from_mlab_linkage(Z)
return self.assertTrue((ZP == expectedZP).all())
def test_from_mlab_linkage_multiple_rows(self):
"Tests from_mlab_linkage on linkage array with multiple rows."
Z = np.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
expectedZS = np.array([[ 2., 5., 138., 2.],
[ 3., 4., 219., 2.],
[ 0., 7., 255., 3.],
[ 1., 8., 268., 4.],
[ 6., 9., 295., 6.]],
dtype=np.double)
ZS = from_mlab_linkage(Z)
#print expectedZS, ZS
self.assertTrue((expectedZS == ZS).all())
class TestToMLabLinkage(TestCase):
def test_to_mlab_linkage_empty(self):
"Tests to_mlab_linkage on empty linkage array."
X = np.asarray([])
R = to_mlab_linkage([])
self.assertTrue((R == X).all())
def test_to_mlab_linkage_single_row(self):
"Tests to_mlab_linkage on linkage array with single row."
Z = np.asarray([[ 0., 1., 3., 2.]])
expectedZP = np.asarray([[1,2,3]])
ZP = to_mlab_linkage(Z)
return self.assertTrue((ZP == expectedZP).all())
def test_from_mlab_linkage_multiple_rows(self):
"Tests to_mlab_linkage on linkage array with multiple rows."
expectedZM = np.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
Z = np.array([[ 2., 5., 138., 2.],
[ 3., 4., 219., 2.],
[ 0., 7., 255., 3.],
[ 1., 8., 268., 4.],
[ 6., 9., 295., 6.]],
dtype=np.double)
ZM = to_mlab_linkage(Z)
#print expectedZM, ZM
self.assertTrue((expectedZM == ZM).all())
class TestFcluster(TestCase):
def test_fclusterdata_maxclusts_2(self):
"Tests fclusterdata(X, criterion='maxclust', t=2) on a random 3-cluster data set."
expectedT = np.int_(eo['fclusterdata-maxclusts-2'])
X = eo['Q-X']
T = fclusterdata(X, criterion='maxclust', t=2)
self.assertTrue(is_isomorphic(T, expectedT))
def test_fclusterdata_maxclusts_3(self):
"Tests fclusterdata(X, criterion='maxclust', t=3) on a random 3-cluster data set."
expectedT = np.int_(eo['fclusterdata-maxclusts-3'])
X = eo['Q-X']
T = fclusterdata(X, criterion='maxclust', t=3)
self.assertTrue(is_isomorphic(T, expectedT))
def test_fclusterdata_maxclusts_4(self):
"Tests fclusterdata(X, criterion='maxclust', t=4) on a random 3-cluster data set."
expectedT = np.int_(eo['fclusterdata-maxclusts-4'])
X = eo['Q-X']
T = fclusterdata(X, criterion='maxclust', t=4)
self.assertTrue(is_isomorphic(T, expectedT))
def test_fcluster_maxclusts_2(self):
"Tests fcluster(Z, criterion='maxclust', t=2) on a random 3-cluster data set."
expectedT = np.int_(eo['fclusterdata-maxclusts-2'])
X = eo['Q-X']
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=2)
self.assertTrue(is_isomorphic(T, expectedT))
def test_fcluster_maxclusts_3(self):
"Tests fcluster(Z, criterion='maxclust', t=3) on a random 3-cluster data set."
expectedT = np.int_(eo['fclusterdata-maxclusts-3'])
X = eo['Q-X']
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
self.assertTrue(is_isomorphic(T, expectedT))
def test_fcluster_maxclusts_4(self):
"Tests fcluster(Z, criterion='maxclust', t=4) on a random 3-cluster data set."
expectedT = np.int_(eo['fclusterdata-maxclusts-4'])
X = eo['Q-X']
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=4)
self.assertTrue(is_isomorphic(T, expectedT))
class TestLeaders(TestCase):
def test_leaders_single(self):
"Tests leaders using a flat clustering generated by single linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
L = leaders(Z, T)
#print L, Lright, T
self.assertTrue((L[0] == Lright[0]).all() and (L[1] == Lright[1]).all())
class TestIsIsomorphic(TestCase):
def test_is_isomorphic_1(self):
"Tests is_isomorphic on test case #1 (one flat cluster, different labellings)"
a = [1, 1, 1]
b = [2, 2, 2]
self.assertTrue(is_isomorphic(a, b) == True)
self.assertTrue(is_isomorphic(b, a) == True)
def test_is_isomorphic_2(self):
"Tests is_isomorphic on test case #2 (two flat clusters, different labelings)"
a = [1, 7, 1]
b = [2, 3, 2]
self.assertTrue(is_isomorphic(a, b) == True)
self.assertTrue(is_isomorphic(b, a) == True)
def test_is_isomorphic_3(self):
"Tests is_isomorphic on test case #3 (no flat clusters)"
a = []
b = []
self.assertTrue(is_isomorphic(a, b) == True)
def test_is_isomorphic_4A(self):
"Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)"
a = [1, 2, 3]
b = [1, 3, 2]
self.assertTrue(is_isomorphic(a, b) == True)
self.assertTrue(is_isomorphic(b, a) == True)
def test_is_isomorphic_4B(self):
"Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)"
a = [1, 2, 3, 3]
b = [1, 3, 2, 3]
self.assertTrue(is_isomorphic(a, b) == False)
self.assertTrue(is_isomorphic(b, a) == False)
def test_is_isomorphic_4C(self):
"Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)"
a = [7, 2, 3]
b = [6, 3, 2]
self.assertTrue(is_isomorphic(a, b) == True)
self.assertTrue(is_isomorphic(b, a) == True)
def test_is_isomorphic_5A(self):
"Tests is_isomorphic on test case #5A (1000 observations, 2 random clusters, random permutation of the labeling). Run 3 times."
for k in xrange(0, 3):
self.help_is_isomorphic_randperm(1000, 2)
def test_is_isomorphic_5B(self):
"Tests is_isomorphic on test case #5B (1000 observations, 3 random clusters, random permutation of the labeling). Run 3 times."
for k in xrange(0, 3):
self.help_is_isomorphic_randperm(1000, 3)
def test_is_isomorphic_5C(self):
"Tests is_isomorphic on test case #5C (1000 observations, 5 random clusters, random permutation of the labeling). Run 3 times."
for k in xrange(0, 3):
self.help_is_isomorphic_randperm(1000, 5)
def test_is_isomorphic_6A(self):
"Tests is_isomorphic on test case #5A (1000 observations, 2 random clusters, random permutation of the labeling, slightly nonisomorphic.) Run 3 times."
for k in xrange(0, 3):
self.help_is_isomorphic_randperm(1000, 2, True, 5)
def test_is_isomorphic_6B(self):
"Tests is_isomorphic on test case #5B (1000 observations, 3 random clusters, random permutation of the labeling, slightly nonisomorphic.) Run 3 times."
for k in xrange(0, 3):
self.help_is_isomorphic_randperm(1000, 3, True, 5)
def test_is_isomorphic_6C(self):
"Tests is_isomorphic on test case #5C (1000 observations, 5 random clusters, random permutation of the labeling, slightly non-isomorphic.) Run 3 times."
for k in xrange(0, 3):
self.help_is_isomorphic_randperm(1000, 5, True, 5)
def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
a = np.int_(np.random.rand(nobs) * nclusters)
b = np.zeros(a.size, dtype=np.int_)
q = {}
P = np.random.permutation(nclusters)
for i in xrange(0, a.shape[0]):
b[i] = P[a[i]]
if noniso:
Q = np.random.permutation(nobs)
b[Q[0:nerrors]] += 1
b[Q[0:nerrors]] %= nclusters
self.assertTrue(is_isomorphic(a, b) == (not noniso))
self.assertTrue(is_isomorphic(b, a) == (not noniso))
class TestIsValidLinkage(TestCase):
def test_is_valid_linkage_int_type(self):
"Tests is_valid_linkage(Z) with integer type."
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.int)
self.assertTrue(is_valid_linkage(Z) == False)
self.assertRaises(TypeError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_5_columns(self):
"Tests is_valid_linkage(Z) with 5 columns."
Z = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
self.assertTrue(is_valid_linkage(Z) == False)
self.assertRaises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_3_columns(self):
"Tests is_valid_linkage(Z) with 3 columns."
Z = np.asarray([[0, 1, 3.0],
[3, 2, 4.0]], dtype=np.double)
self.assertTrue(is_valid_linkage(Z) == False)
self.assertRaises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_empty(self):
"Tests is_valid_linkage(Z) with empty linkage."
Z = np.zeros((0, 4), dtype=np.double)
self.assertTrue(is_valid_linkage(Z) == False)
self.assertRaises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_1x4(self):
"Tests is_valid_linkage(Z) on linkage over 2 observations."
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
self.assertTrue(is_valid_linkage(Z) == True)
def test_is_valid_linkage_2x4(self):
"Tests is_valid_linkage(Z) on linkage over 3 observations."
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
self.assertTrue(is_valid_linkage(Z) == True)
def test_is_valid_linkage_4_and_up(self):
"Tests is_valid_linkage(Z) on linkage on observation sets between sizes 4 and 15 (step size 3)."
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)/2)
Z = linkage(y)
self.assertTrue(is_valid_linkage(Z) == True)
def test_is_valid_linkage_4_and_up_neg_index_left(self):
"Tests is_valid_linkage(Z) on linkage on observation sets between sizes 4 and 15 (step size 3) with negative indices (left)."
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)/2)
Z = linkage(y)
Z[int(i/2),0] = -2
self.assertTrue(is_valid_linkage(Z) == False)
self.assertRaises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_index_right(self):
"Tests is_valid_linkage(Z) on linkage on observation sets between sizes 4 and 15 (step size 3) with negative indices (right)."
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)/2)
Z = linkage(y)
Z[int(i/2),1] = -2
self.assertTrue(is_valid_linkage(Z) == False)
self.assertRaises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_dist(self):
"Tests is_valid_linkage(Z) on linkage on observation sets between sizes 4 and 15 (step size 3) with negative distances."
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)/2)
Z = linkage(y)
Z[int(i/2),2] = -0.5
self.assertTrue(is_valid_linkage(Z) == False)
self.assertRaises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_counts(self):
"Tests is_valid_linkage(Z) on linkage on observation sets between sizes 4 and 15 (step size 3) with negative counts."
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)/2)
Z = linkage(y)
Z[int(i/2),3] = -2
self.assertTrue(is_valid_linkage(Z) == False)
self.assertRaises(ValueError, is_valid_linkage, Z, throw=True)
class TestIsValidInconsistent(TestCase):
def test_is_valid_im_int_type(self):
"Tests is_valid_im(R) with integer type."
R = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.int)
self.assertTrue(is_valid_im(R) == False)
self.assertRaises(TypeError, is_valid_im, R, throw=True)
def test_is_valid_im_5_columns(self):
"Tests is_valid_im(R) with 5 columns."
R = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
self.assertTrue(is_valid_im(R) == False)
self.assertRaises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_3_columns(self):
"Tests is_valid_im(R) with 3 columns."
R = np.asarray([[0, 1, 3.0],
[3, 2, 4.0]], dtype=np.double)
self.assertTrue(is_valid_im(R) == False)
self.assertRaises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_empty(self):
"Tests is_valid_im(R) with empty inconsistency matrix."
R = np.zeros((0, 4), dtype=np.double)
self.assertTrue(is_valid_im(R) == False)
self.assertRaises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_1x4(self):
"Tests is_valid_im(R) on im over 2 observations."
R = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
self.assertTrue(is_valid_im(R) == True)
def test_is_valid_im_2x4(self):
"Tests is_valid_im(R) on im over 3 observations."
R = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
self.assertTrue(is_valid_im(R) == True)
def test_is_valid_im_4_and_up(self):
"Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 (step size 3)."
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)/2)
Z = linkage(y)
R = inconsistent(Z)
self.assertTrue(is_valid_im(R) == True)
def test_is_valid_im_4_and_up_neg_index_left(self):
"Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 (step size 3) with negative link height means."
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)/2)
Z = linkage(y)
R = inconsistent(Z)
R[int(i/2),0] = -2.0
self.assertTrue(is_valid_im(R) == False)
self.assertRaises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_index_right(self):
"Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 (step size 3) with negative link height standard deviations."
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)/2)
Z = linkage(y)
R = inconsistent(Z)
R[int(i/2),1] = -2.0
self.assertTrue(is_valid_im(R) == False)
self.assertRaises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_dist(self):
"Tests is_valid_im(R) on im on observation sets between sizes 4 and 15 (step size 3) with negative link counts."
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)/2)
Z = linkage(y)
R = inconsistent(Z)
R[int(i/2),2] = -0.5
self.assertTrue(is_valid_im(R) == False)
self.assertRaises(ValueError, is_valid_im, R, throw=True)
class TestNumObsLinkage(TestCase):
def test_num_obs_linkage_empty(self):
"Tests num_obs_linkage(Z) with empty linkage."
Z = np.zeros((0, 4), dtype=np.double)
self.assertRaises(ValueError, num_obs_linkage, Z)
def test_num_obs_linkage_1x4(self):
"Tests num_obs_linkage(Z) on linkage over 2 observations."
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
self.assertTrue(num_obs_linkage(Z) == 2)
def test_num_obs_linkage_2x4(self):
"Tests num_obs_linkage(Z) on linkage over 3 observations."
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
self.assertTrue(num_obs_linkage(Z) == 3)
def test_num_obs_linkage_4_and_up(self):
"Tests num_obs_linkage(Z) on linkage on observation sets between sizes 4 and 15 (step size 3)."
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)/2)
Z = linkage(y)
self.assertTrue(num_obs_linkage(Z) == i)
class TestLeavesList(TestCase):
def test_leaves_list_1x4(self):
"Tests leaves_list(Z) on a 1x4 linkage."
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
node = to_tree(Z)
self.assertTrue((leaves_list(Z) == [0, 1]).all())
def test_leaves_list_2x4(self):
"Tests leaves_list(Z) on a 2x4 linkage."
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
node = to_tree(Z)
self.assertTrue((leaves_list(Z) == [0, 1, 2]).all())
def test_leaves_list_iris_single(self):
"Tests leaves_list(Z) on the Iris data set using single linkage."
X = eo['iris']
Y = pdist(X)
Z = linkage(X, 'single')
node = to_tree(Z)
self.assertTrue((node.pre_order() == leaves_list(Z)).all())
def test_leaves_list_iris_complete(self):
"Tests leaves_list(Z) on the Iris data set using complete linkage."
X = eo['iris']
Y = pdist(X)
Z = linkage(X, 'complete')
node = to_tree(Z)
self.assertTrue((node.pre_order() == leaves_list(Z)).all())
def test_leaves_list_iris_centroid(self):
"Tests leaves_list(Z) on the Iris data set using centroid linkage."
X = eo['iris']
Y = pdist(X)
Z = linkage(X, 'centroid')
node = to_tree(Z)
self.assertTrue((node.pre_order() == leaves_list(Z)).all())
def test_leaves_list_iris_median(self):
"Tests leaves_list(Z) on the Iris data set using median linkage."
X = eo['iris']
Y = pdist(X)
Z = linkage(X, 'median')
node = to_tree(Z)
self.assertTrue((node.pre_order() == leaves_list(Z)).all())
def test_leaves_list_iris_ward(self):
"Tests leaves_list(Z) on the Iris data set using ward linkage."
X = eo['iris']
Y = pdist(X)
Z = linkage(X, 'ward')
node = to_tree(Z)
self.assertTrue((node.pre_order() == leaves_list(Z)).all())
def test_leaves_list_iris_average(self):
"Tests leaves_list(Z) on the Iris data set using average linkage."
X = eo['iris']
Y = pdist(X)
Z = linkage(X, 'average')
node = to_tree(Z)
self.assertTrue((node.pre_order() == leaves_list(Z)).all())
class TestCorrespond(TestCase):
def test_correspond_empty(self):
"Tests correspond(Z, y) with empty linkage and condensed distance matrix."
y = np.zeros((0,))
Z = np.zeros((0,4))
self.assertRaises(ValueError, correspond, Z, y)
def test_correspond_2_and_up(self):
"Tests correspond(Z, y) on linkage and CDMs over observation sets of different sizes."
for i in xrange(2, 4):
y = np.random.rand(i*(i-1)/2)
Z = linkage(y)
self.assertTrue(correspond(Z, y))
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)/2)
Z = linkage(y)
self.assertTrue(correspond(Z, y))
def test_correspond_4_and_up(self):
"Tests correspond(Z, y) on linkage and CDMs over observation sets of different sizes. Correspondance should be false."
for (i, j) in zip(range(2, 4), range(3, 5)) + zip(range(3, 5), range(2, 4)):
y = np.random.rand(i*(i-1)/2)
y2 = np.random.rand(j*(j-1)/2)
Z = linkage(y)
Z2 = linkage(y2)
self.assertTrue(correspond(Z, y2) == False)
self.assertTrue(correspond(Z2, y) == False)
def test_correspond_4_and_up_2(self):
"Tests correspond(Z, y) on linkage and CDMs over observation sets of different sizes. Correspondance should be false."
for (i, j) in zip(range(2, 7), range(16, 21)) + zip(range(2, 7), range(16, 21)):
y = np.random.rand(i*(i-1)/2)
y2 = np.random.rand(j*(j-1)/2)
Z = linkage(y)
Z2 = linkage(y2)
self.assertTrue(correspond(Z, y2) == False)
self.assertTrue(correspond(Z2, y) == False)
def test_num_obs_linkage_multi_matrix(self):
"Tests num_obs_linkage with observation matrices of multiple sizes."
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
Z = linkage(Y)
#print Z
#print A.shape, Y.shape, Yr.shape
self.assertTrue(num_obs_linkage(Z) == n)
class TestIsMonotonic(TestCase):
def test_is_monotonic_empty(self):
"Tests is_monotonic(Z) on an empty linkage."
Z = np.zeros((0, 4))
self.assertRaises(ValueError, is_monotonic, Z)
def test_is_monotonic_1x4(self):
"Tests is_monotonic(Z) on 1x4 linkage. Expecting True."
Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double);
self.assertTrue(is_monotonic(Z) == True)
def test_is_monotonic_2x4_T(self):
"Tests is_monotonic(Z) on 2x4 linkage. Expecting True."
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 3]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == True)
def test_is_monotonic_2x4_F(self):
"Tests is_monotonic(Z) on 2x4 linkage. Expecting False."
Z = np.asarray([[0, 1, 0.4, 2],
[2, 3, 0.3, 3]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_3x4_T(self):
"Tests is_monotonic(Z) on 3x4 linkage. Expecting True."
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == True)
def test_is_monotonic_3x4_F1(self):
"Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False."
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.2, 2],
[4, 5, 0.6, 4]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_3x4_F2(self):
"Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False."
Z = np.asarray([[0, 1, 0.8, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_3x4_F3(self):
"Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False"
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.2, 4]], dtype=np.double)
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_tdist_linkage1(self):
"Tests is_monotonic(Z) on clustering generated by single linkage on tdist data set. Expecting True."
Z = linkage(_ytdist, 'single')
self.assertTrue(is_monotonic(Z) == True)
def test_is_monotonic_tdist_linkage2(self):
"Tests is_monotonic(Z) on clustering generated by single linkage on tdist data set. Perturbing. Expecting False."
Z = linkage(_ytdist, 'single')
Z[2,2]=0.0
self.assertTrue(is_monotonic(Z) == False)
def test_is_monotonic_iris_linkage(self):
"Tests is_monotonic(Z) on clustering generated by single linkage on Iris data set. Expecting True."
X = eo['iris']
Y = pdist(X)
Z = linkage(X, 'single')
self.assertTrue(is_monotonic(Z) == True)
class TestMaxDists(TestCase):
def test_maxdists_empty_linkage(self):
"Tests maxdists(Z) on empty linkage. Expecting exception."
Z = np.zeros((0, 4), dtype=np.double)
self.assertRaises(ValueError, maxdists, Z)
def test_maxdists_one_cluster_linkage(self):
"Tests maxdists(Z) on linkage with one cluster."
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
MD = maxdists(Z)
eps = 1e-15
expectedMD = calculate_maximum_distances(Z)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxdists_Q_linkage_single(self):
"Tests maxdists(Z) on the Q data set using single linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'single')
MD = maxdists(Z)
eps = 1e-15
expectedMD = calculate_maximum_distances(Z)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxdists_Q_linkage_complete(self):
"Tests maxdists(Z) on the Q data set using complete linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'complete')
MD = maxdists(Z)
eps = 1e-15
expectedMD = calculate_maximum_distances(Z)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxdists_Q_linkage_ward(self):
"Tests maxdists(Z) on the Q data set using Ward linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'ward')
MD = maxdists(Z)
eps = 1e-15
expectedMD = calculate_maximum_distances(Z)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxdists_Q_linkage_centroid(self):
"Tests maxdists(Z) on the Q data set using centroid linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'centroid')
MD = maxdists(Z)
eps = 1e-15
expectedMD = calculate_maximum_distances(Z)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxdists_Q_linkage_median(self):
"Tests maxdists(Z) on the Q data set using median linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'median')
MD = maxdists(Z)
eps = 1e-15
expectedMD = calculate_maximum_distances(Z)
self.assertTrue(within_tol(MD, expectedMD, eps))
class TestMaxInconsts(TestCase):
def test_maxinconsts_empty_linkage(self):
"Tests maxinconsts(Z, R) on empty linkage. Expecting exception."
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
self.assertRaises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_difrow_linkage(self):
"Tests maxinconsts(Z, R) on linkage and inconsistency matrices with different numbers of clusters. Expecting exception."
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
self.assertRaises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_one_cluster_linkage(self):
"Tests maxinconsts(Z, R) on linkage with one cluster."
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxinconsts(Z, R)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxinconsts_Q_linkage_single(self):
"Tests maxinconsts(Z, R) on the Q data set using single linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'single')
R = inconsistent(Z)
MD = maxinconsts(Z, R)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxinconsts_Q_linkage_complete(self):
"Tests maxinconsts(Z, R) on the Q data set using complete linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'complete')
R = inconsistent(Z)
MD = maxinconsts(Z, R)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxinconsts_Q_linkage_ward(self):
"Tests maxinconsts(Z, R) on the Q data set using Ward linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'ward')
R = inconsistent(Z)
MD = maxinconsts(Z, R)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxinconsts_Q_linkage_centroid(self):
"Tests maxinconsts(Z, R) on the Q data set using centroid linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'centroid')
R = inconsistent(Z)
MD = maxinconsts(Z, R)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxinconsts_Q_linkage_median(self):
"Tests maxinconsts(Z, R) on the Q data set using median linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'median')
R = inconsistent(Z)
MD = maxinconsts(Z, R)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R)
self.assertTrue(within_tol(MD, expectedMD, eps))
class TestMaxRStat(TestCase):
def test_maxRstat_float_index(self):
"Tests maxRstat(Z, R, 3.3). Expecting exception."
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
self.assertRaises(TypeError, maxRstat, Z, R, 3.3)
def test_maxRstat_neg_index(self):
"Tests maxRstat(Z, R, -1). Expecting exception."
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
self.assertRaises(ValueError, maxRstat, Z, R, -1)
def test_maxRstat_oob_pos_index(self):
"Tests maxRstat(Z, R, 4). Expecting exception."
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
self.assertRaises(ValueError, maxRstat, Z, R, 4)
def test_maxRstat_0_empty_linkage(self):
"Tests maxRstat(Z, R, 0) on empty linkage. Expecting exception."
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
self.assertRaises(ValueError, maxRstat, Z, R, 0)
def test_maxRstat_0_difrow_linkage(self):
"Tests maxRstat(Z, R, 0) on linkage and inconsistency matrices with different numbers of clusters. Expecting exception."
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
self.assertRaises(ValueError, maxRstat, Z, R, 0)
def test_maxRstat_0_one_cluster_linkage(self):
"Tests maxRstat(Z, R, 0) on linkage with one cluster."
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 0)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 0)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_0_Q_linkage_single(self):
"Tests maxRstat(Z, R, 0) on the Q data set using single linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'single')
R = inconsistent(Z)
MD = maxRstat(Z, R, 0)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 0)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_0_Q_linkage_complete(self):
"Tests maxRstat(Z, R, 0) on the Q data set using complete linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'complete')
R = inconsistent(Z)
MD = maxRstat(Z, R, 0)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 0)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_0_Q_linkage_ward(self):
"Tests maxRstat(Z, R, 0) on the Q data set using Ward linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'ward')
R = inconsistent(Z)
MD = maxRstat(Z, R, 0)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 0)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_0_Q_linkage_centroid(self):
"Tests maxRstat(Z, R, 0) on the Q data set using centroid linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'centroid')
R = inconsistent(Z)
MD = maxRstat(Z, R, 0)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 0)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_0_Q_linkage_median(self):
"Tests maxRstat(Z, R, 0) on the Q data set using median linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'median')
R = inconsistent(Z)
MD = maxRstat(Z, R, 0)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 0)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_1_empty_linkage(self):
"Tests maxRstat(Z, R, 1) on empty linkage. Expecting exception."
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
self.assertRaises(ValueError, maxRstat, Z, R, 0)
def test_maxRstat_1_difrow_linkage(self):
"Tests maxRstat(Z, R, 1) on linkage and inconsistency matrices with different numbers of clusters. Expecting exception."
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
self.assertRaises(ValueError, maxRstat, Z, R, 0)
def test_maxRstat_1_one_cluster_linkage(self):
"Tests maxRstat(Z, R, 1) on linkage with one cluster."
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 1)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_1_Q_linkage_single(self):
"Tests maxRstat(Z, R, 1) on the Q data set using single linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'single')
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_1_Q_linkage_complete(self):
"Tests maxRstat(Z, R, 1) on the Q data set using complete linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'complete')
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_1_Q_linkage_ward(self):
"Tests maxRstat(Z, R, 1) on the Q data set using Ward linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'ward')
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_1_Q_linkage_centroid(self):
"Tests maxRstat(Z, R, 1) on the Q data set using centroid linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'centroid')
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_1_Q_linkage_median(self):
"Tests maxRstat(Z, R, 1) on the Q data set using median linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'median')
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_2_empty_linkage(self):
"Tests maxRstat(Z, R, 2) on empty linkage. Expecting exception."
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
self.assertRaises(ValueError, maxRstat, Z, R, 2)
def test_maxRstat_2_difrow_linkage(self):
"Tests maxRstat(Z, R, 2) on linkage and inconsistency matrices with different numbers of clusters. Expecting exception."
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
self.assertRaises(ValueError, maxRstat, Z, R, 2)
def test_maxRstat_2_one_cluster_linkage(self):
"Tests maxRstat(Z, R, 2) on linkage with one cluster."
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 2)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 2)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_2_Q_linkage_single(self):
"Tests maxRstat(Z, R, 2) on the Q data set using single linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'single')
R = inconsistent(Z)
MD = maxRstat(Z, R, 2)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 2)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_2_Q_linkage_complete(self):
"Tests maxRstat(Z, R, 2) on the Q data set using complete linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'complete')
R = inconsistent(Z)
MD = maxRstat(Z, R, 2)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 2)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_2_Q_linkage_ward(self):
"Tests maxRstat(Z, R, 2) on the Q data set using Ward linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'ward')
R = inconsistent(Z)
MD = maxRstat(Z, R, 2)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 2)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_2_Q_linkage_centroid(self):
"Tests maxRstat(Z, R, 2) on the Q data set using centroid linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'centroid')
R = inconsistent(Z)
MD = maxRstat(Z, R, 2)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 2)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_2_Q_linkage_median(self):
"Tests maxRstat(Z, R, 2) on the Q data set using median linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'median')
R = inconsistent(Z)
MD = maxRstat(Z, R, 2)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 2)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_3_empty_linkage(self):
"Tests maxRstat(Z, R, 3) on empty linkage. Expecting exception."
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
self.assertRaises(ValueError, maxRstat, Z, R, 3)
def test_maxRstat_3_difrow_linkage(self):
"Tests maxRstat(Z, R, 3) on linkage and inconsistency matrices with different numbers of clusters. Expecting exception."
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
self.assertRaises(ValueError, maxRstat, Z, R, 3)
def test_maxRstat_3_one_cluster_linkage(self):
"Tests maxRstat(Z, R, 3) on linkage with one cluster."
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 3)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 3)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_3_Q_linkage_single(self):
"Tests maxRstat(Z, R, 3) on the Q data set using single linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'single')
R = inconsistent(Z)
MD = maxRstat(Z, R, 3)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 3)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_3_Q_linkage_complete(self):
"Tests maxRstat(Z, R, 3) on the Q data set using complete linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'complete')
R = inconsistent(Z)
MD = maxRstat(Z, R, 3)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 3)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_3_Q_linkage_ward(self):
"Tests maxRstat(Z, R, 3) on the Q data set using Ward linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'ward')
R = inconsistent(Z)
MD = maxRstat(Z, R, 3)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 3)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_3_Q_linkage_centroid(self):
"Tests maxRstat(Z, R, 3) on the Q data set using centroid linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'centroid')
R = inconsistent(Z)
MD = maxRstat(Z, R, 3)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 3)
self.assertTrue(within_tol(MD, expectedMD, eps))
def test_maxRstat_3_Q_linkage_median(self):
"Tests maxRstat(Z, R, 3) on the Q data set using median linkage."
X = eo['Q-X']
Y = pdist(X)
Z = linkage(X, 'median')
R = inconsistent(Z)
MD = maxRstat(Z, R, 3)
eps = 1e-15
expectedMD = calculate_maximum_inconsistencies(Z, R, 3)
self.assertTrue(within_tol(MD, expectedMD, eps))
def calculate_maximum_distances(Z):
"Used for testing correctness of maxdists. Very slow."
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[left - n]
if right >= n:
q[1] = B[right - n]
q[2] = Z[i, 2]
B[i] = q.max()
return B
def calculate_maximum_inconsistencies(Z, R, k=3):
"Used for testing correctness of maxinconsts. Very slow."
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
#print R.shape
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[left - n]
if right >= n:
q[1] = B[right - n]
q[2] = R[i, k]
B[i] = q.max()
return B
def within_tol(a, b, tol):
return np.abs(a - b).max() < tol
if __name__ == "__main__":
run_module_suite()
|
gpl-3.0
|
mitar/pychecker
|
pychecker2/utest/ops.py
|
11
|
1735
|
from pychecker2 import TestSupport
from pychecker2 import OpChecks
class OpTests(TestSupport.WarningTester):
def testOperator(self):
for op in ['--', '++']:
self.warning('def f(x):\n'
' return %sx' % op,
2, OpChecks.OpCheck.operator, op)
def testOperatorPlus(self):
self.warning('def f(x):\n'
' return +x', 2, OpChecks.OpCheck.operatorPlus)
def testEqualNone(self):
self.silent('def f(x):\n'
' return 1 <= x <= None\n')
self.warning('def f(x):\n'
' return x == None\n', 2, OpChecks.CompareCheck.useIs)
self.warning('def f(x):\n'
' return 1 == x == None\n', 2, OpChecks.CompareCheck.useIs)
self.warning('def f(x):\n'
' return None == x == 1\n', 2, OpChecks.CompareCheck.useIs)
self.warning('def f(x):\n'
' return None == x\n', 2, OpChecks.CompareCheck.useIs)
def testExcept(self):
self.warning('try:\n'
' pass\n'
'except:\n'
' pass\n', 4, OpChecks.ExceptCheck.emptyExcept)
self.warning('try:\n'
' pass\n'
'except AssertionError:\n'\
' pass\n'
'except:\n'
' pass\n', 6, OpChecks.ExceptCheck.emptyExcept)
self.warning('try:\n'
' pass\n'
'except:\n'
' pass\n'
'except AssertionError:\n'\
' pass\n', 4, OpChecks.ExceptCheck.emptyExcept)
|
bsd-3-clause
|
percona/tokudb-engine
|
mysql-test/suite/tokudb/replace-ignore-gen.py
|
56
|
1868
|
def sqlgen_setup():
print "--disable_warnings"
print "drop table if exists t;"
print "--enable_warnings"
def sqlgen_fill_table(n):
print "insert into t values"
for i in range(n):
print " (%s, %s, %s)," % (i, i, 10*i)
print " (%s, %s, %s);" % (n, n, 10*n)
def sqlgen_create_table(fields, pk, keys):
print "create table t ("
print " %s, " % fields
print " primary key (%s), " % pk
print " %s" % keys
print ") engine = tokudb;"
def sqlgen_explain_and_do(query):
print "explain %s" % query
print query
def sqlgen_drop_table():
print "drop table t;"
print "# Tokutek"
print "# Test that replace into and insert ignore insertions "
print "# work under various index schemas. "
print "#"
print "# this test is interesting because tokudb can do blind "
print "# (searchless) insertions into dictionaries when keys are"
print "# a subset of the primary key, but not otherwise."
print ""
sqlgen_setup()
print ""
num_rows = 50;
pk = "a, b"
fields = "a int, b int, c int"
for query in ["insert ignore", "replace into"]:
print "# testing query type \"%s\"" % query
for keys in ["key (b)", "key (b), key(c)"]:
print ""
print "# testing primary key %s" % pk
sqlgen_create_table(fields, pk, keys)
sqlgen_fill_table(num_rows);
print ""
for k in ["8", "15"]:
print "%s t values (%s, %s, -1);" % (query, k, k)
s = "select * from t where a = %s;" % k
sqlgen_explain_and_do(s)
s = "select * from t force index (b) where b = %s;" % k
sqlgen_explain_and_do(s)
n = int(k) * 10
s = "select * from t where c = %s;" % n
sqlgen_explain_and_do(s)
s = "select * from t where c = -1;"
sqlgen_explain_and_do(s)
sqlgen_drop_table()
|
gpl-2.0
|
AsherBond/MondocosmOS
|
grass_trunk/scripts/r.reclass.area/r.reclass.area.py
|
2
|
3579
|
#!/usr/bin/env python
############################################################################
#
# MODULE: r.reclass.area
# AUTHOR(S): NRCS
# Converted to Python by Glynn Clements
# PURPOSE: Reclasses a raster map greater or less than user specified area size (in hectares)
# COPYRIGHT: (C) 1999,2008 by the GRASS Development Team
#
# This program is free software under the GNU General Public
# License (>=v2). Read the file COPYING that comes with GRASS
# for details.
#
#############################################################################
# 3/2007: added label support MN
# 3/2004: added parser support MN
# 11/2001 added mapset support markus
# 2/2001 fixes markus
# 2000: updated to GRASS 5
# 1998 from NRCS, slightly modified for GRASS 4.2.1
#%module
#% description: Reclasses a raster map greater or less than user specified area size (in hectares).
#% keywords: raster
#% keywords: statistics
#% keywords: aggregation
#%end
#%option G_OPT_R_INPUT
#%end
#%option G_OPT_R_OUTPUT
#%end
#%option
#% key: lesser
#% type: double
#% description: Lesser value option that sets the <= area size limit [hectares]
#%end
#%option
#% key: greater
#% type: double
#% description: Greater value option that sets the >= area size limit [hectares]
#%end
import sys
import os
import grass.script as grass
def main():
infile = options['input']
lesser = options['lesser']
greater = options['greater']
outfile = options['output']
s = grass.read_command("g.region", flags = 'p')
kv = grass.parse_key_val(s, sep = ':')
s = kv['projection'].strip().split()
if s == '0':
grass.fatal(_("xy-locations are not supported"))
grass.fatal(_("Need projected data with grids in meters"))
if not lesser and not greater:
grass.fatal(_("You have to specify either lesser= or greater="))
if lesser and greater:
grass.fatal(_("lesser= and greater= are mutually exclusive"))
if lesser:
limit = float(lesser)
if greater:
limit = float(greater)
if not grass.find_file(infile)['name']:
grass.fatal(_("Raster map <%s> not found") % infile)
clumpfile = "%s.clump.%s" % (infile.split('@')[0], outfile)
if not grass.overwrite():
if grass.find_file(clumpfile)['name']:
grass.fatal(_("Temporary raster map <%s> exists") % clumpfile)
grass.message(_("Generating a clumped raster file ..."))
grass.run_command('r.clump', input = infile, output = clumpfile)
if lesser:
grass.message(_("Generating a reclass map with area size less than or equal to %f hectares...") % limit)
else:
grass.message(_("Generating a reclass map with area size greater than or equal to %f hectares...") % limit)
recfile = outfile + '.recl'
p1 = grass.pipe_command('r.stats', flags = 'aln', input = (clumpfile, infile), fs = '|')
p2 = grass.feed_command('r.reclass', input = clumpfile, output = recfile, rules = '-')
for line in p1.stdout:
f = line.rstrip('\r\n').split('|')
if len(f) < 5:
continue
hectares = float(f[4]) * 0.0001
if lesser:
test = hectares <= limit
else:
test = hectares >= limit
if test:
p2.stdin.write("%s = %s %s\n" % (f[0], f[2], f[3]))
p1.wait()
p2.stdin.close()
p2.wait()
grass.message(_("Generating output raster map <%s>...") % outfile)
grass.mapcalc("$outfile = $recfile", outfile = outfile, recfile = recfile)
grass.run_command('g.remove', rast = [recfile, clumpfile], quiet = True)
if __name__ == "__main__":
options, flags = grass.parser()
main()
|
agpl-3.0
|
RohitDas/cubeproject
|
lib/django/db/backends/postgresql_psycopg2/operations.py
|
80
|
10124
|
from __future__ import unicode_literals
from psycopg2.extras import Inet
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
class DatabaseOperations(BaseDatabaseOperations):
def unification_cast_sql(self, output_field):
internal_type = output_field.get_internal_type()
if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"):
# PostgreSQL will resolve a union as type 'text' if input types are
# 'unknown'.
# http://www.postgresql.org/docs/9.4/static/typeconv-union-case.html
# These fields cannot be implicitly cast back in the default
# PostgreSQL configuration so we need to explicitly cast them.
# We must also remove components of the type within brackets:
# varchar(255) -> varchar.
return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0]
return '%s'
def date_extract_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
return "EXTRACT('dow' FROM %s) + 1" % field_name
else:
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "%s AT TIME ZONE %%s" % field_name
params = [tzname]
else:
params = []
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
sql = "EXTRACT('dow' FROM %s) + 1" % field_name
else:
sql = "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = "%s AT TIME ZONE %%s" % field_name
params = [tzname]
else:
params = []
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
sql = "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
return sql, params
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def lookup_cast(self, lookup_type, internal_type=None):
lookup = '%s'
# Cast text lookups to text to allow things like filter(x__contains=4)
if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
if internal_type in ('IPAddressField', 'GenericIPAddressField'):
lookup = "HOST(%s)"
else:
lookup = "%s::text"
# Use UPPER(x) for case-insensitive lookups; it's faster.
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
lookup = 'UPPER(%s)' % lookup
return lookup
def last_insert_id(self, cursor, table_name, pk_name):
# Use pg_get_serial_sequence to get the underlying sequence name
# from the table name and column name (available since PostgreSQL 8)
cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
self.quote_name(table_name), pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def prepare_sql_script(self, sql, _allow_fallback=False):
return [sql]
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def set_time_zone_sql(self):
return "SET TIME ZONE %s"
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
# Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows
# us to truncate tables referenced by a foreign key in any other
# table.
tables_sql = ', '.join(
style.SQL_FIELD(self.quote_name(table)) for table in tables)
if allow_cascade:
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
style.SQL_KEYWORD('CASCADE'),
)]
else:
sql = ['%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
)]
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
sql = []
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
if not (column_name and len(column_name) > 0):
# This will be the case if it's an m2m using an autogenerated
# intermediate table (see BaseDatabaseIntrospection.sequence_list)
column_name = 'id'
sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" %
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(self.quote_name(table_name)),
style.SQL_FIELD(column_name))
)
return sql
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
# Use pg_get_serial_sequence to get the underlying sequence name from the table name
# and column name (available since PostgreSQL 8)
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(model._meta.db_table)),
style.SQL_FIELD(f.column),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(model._meta.db_table)),
)
)
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
if not f.rel.through:
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(f.m2m_db_table())),
style.SQL_FIELD('id'),
style.SQL_FIELD(qn('id')),
style.SQL_FIELD(qn('id')),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(f.m2m_db_table()))
)
)
return output
def prep_for_iexact_query(self, x):
return x
def max_name_length(self):
"""
Returns the maximum length of an identifier.
Note that the maximum length of an identifier is 63 by default, but can
be changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h .
This implementation simply returns 63, but can easily be overridden by a
custom database backend that inherits most of its behavior from this one.
"""
return 63
def distinct_sql(self, fields):
if fields:
return 'DISTINCT ON (%s)' % ', '.join(fields)
else:
return 'DISTINCT'
def last_executed_query(self, cursor, sql, params):
# http://initd.org/psycopg/docs/cursor.html#cursor.query
# The query attribute is a Psycopg extension to the DB API 2.0.
if cursor.query is not None:
return cursor.query.decode('utf-8')
return None
def return_insert_id(self):
return "RETURNING %s", ()
def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
def value_to_db_date(self, value):
return value
def value_to_db_datetime(self, value):
return value
def value_to_db_time(self, value):
return value
def value_to_db_ipaddress(self, value):
if value:
return Inet(value)
return None
|
bsd-3-clause
|
zaffra/Donate
|
django/core/validators.py
|
158
|
6583
|
import re
import urlparse
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
# These values, if given to validate(), will trigger the self.required check.
EMPTY_VALUES = (None, '', [], (), {})
try:
from django.conf import settings
URL_VALIDATOR_USER_AGENT = settings.URL_VALIDATOR_USER_AGENT
except ImportError:
# It's OK if Django settings aren't configured.
URL_VALIDATOR_USER_AGENT = 'Django (http://www.djangoproject.com/)'
class RegexValidator(object):
regex = ''
message = _(u'Enter a valid value.')
code = 'invalid'
def __init__(self, regex=None, message=None, code=None):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
if isinstance(self.regex, basestring):
self.regex = re.compile(regex)
def __call__(self, value):
"""
Validates that the input matches the regular expression.
"""
if not self.regex.search(smart_unicode(value)):
raise ValidationError(self.message, code=self.code)
class URLValidator(RegexValidator):
regex = re.compile(
r'^https?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def __init__(self, verify_exists=False, validator_user_agent=URL_VALIDATOR_USER_AGENT):
super(URLValidator, self).__init__()
self.verify_exists = verify_exists
self.user_agent = validator_user_agent
def __call__(self, value):
try:
super(URLValidator, self).__call__(value)
except ValidationError, e:
# Trivial case failed. Try for possible IDN domain
if value:
value = smart_unicode(value)
scheme, netloc, path, query, fragment = urlparse.urlsplit(value)
try:
netloc = netloc.encode('idna') # IDN -> ACE
except UnicodeError: # invalid domain part
raise e
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
super(URLValidator, self).__call__(url)
else:
raise
else:
url = value
if self.verify_exists:
import urllib2
headers = {
"Accept": "text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5",
"Accept-Language": "en-us,en;q=0.5",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7",
"Connection": "close",
"User-Agent": self.user_agent,
}
try:
req = urllib2.Request(url, None, headers)
u = urllib2.urlopen(req)
except ValueError:
raise ValidationError(_(u'Enter a valid URL.'), code='invalid')
except: # urllib2.URLError, httplib.InvalidURL, etc.
raise ValidationError(_(u'This URL appears to be a broken link.'), code='invalid_link')
def validate_integer(value):
try:
int(value)
except (ValueError, TypeError), e:
raise ValidationError('')
class EmailValidator(RegexValidator):
def __call__(self, value):
try:
super(EmailValidator, self).__call__(value)
except ValidationError, e:
# Trivial case failed. Try for possible IDN domain-part
if value and u'@' in value:
parts = value.split(u'@')
domain_part = parts[-1]
try:
parts[-1] = parts[-1].encode('idna')
except UnicodeError:
raise e
super(EmailValidator, self).__call__(u'@'.join(parts))
else:
raise
email_re = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$', re.IGNORECASE) # domain
validate_email = EmailValidator(email_re, _(u'Enter a valid e-mail address.'), 'invalid')
slug_re = re.compile(r'^[-\w]+$')
validate_slug = RegexValidator(slug_re, _(u"Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."), 'invalid')
ipv4_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$')
validate_ipv4_address = RegexValidator(ipv4_re, _(u'Enter a valid IPv4 address.'), 'invalid')
comma_separated_int_list_re = re.compile('^[\d,]+$')
validate_comma_separated_integer_list = RegexValidator(comma_separated_int_list_re, _(u'Enter only digits separated by commas.'), 'invalid')
class BaseValidator(object):
compare = lambda self, a, b: a is not b
clean = lambda self, x: x
message = _(u'Ensure this value is %(limit_value)s (it is %(show_value)s).')
code = 'limit_value'
def __init__(self, limit_value):
self.limit_value = limit_value
def __call__(self, value):
cleaned = self.clean(value)
params = {'limit_value': self.limit_value, 'show_value': cleaned}
if self.compare(cleaned, self.limit_value):
raise ValidationError(
self.message % params,
code=self.code,
params=params,
)
class MaxValueValidator(BaseValidator):
compare = lambda self, a, b: a > b
message = _(u'Ensure this value is less than or equal to %(limit_value)s.')
code = 'max_value'
class MinValueValidator(BaseValidator):
compare = lambda self, a, b: a < b
message = _(u'Ensure this value is greater than or equal to %(limit_value)s.')
code = 'min_value'
class MinLengthValidator(BaseValidator):
compare = lambda self, a, b: a < b
clean = lambda self, x: len(x)
message = _(u'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).')
code = 'min_length'
class MaxLengthValidator(BaseValidator):
compare = lambda self, a, b: a > b
clean = lambda self, x: len(x)
message = _(u'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).')
code = 'max_length'
|
bsd-3-clause
|
danielharbor/openerp
|
addons/point_of_sale/wizard/pos_box.py
|
381
|
2211
|
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.addons.account.wizard.pos_box import CashBox
class PosBox(CashBox):
_register = False
def run(self, cr, uid, ids, context=None):
if not context:
context = dict()
active_model = context.get('active_model', False) or False
active_ids = context.get('active_ids', []) or []
if active_model == 'pos.session':
records = self.pool[active_model].browse(cr, uid, active_ids, context=context)
bank_statements = [record.cash_register_id for record in records if record.cash_register_id]
if not bank_statements:
raise osv.except_osv(_('Error!'),
_("There is no cash register for this PoS Session"))
return self._run(cr, uid, ids, bank_statements, context=context)
else:
return super(PosBox, self).run(cr, uid, ids, context=context)
class PosBoxIn(PosBox):
_inherit = 'cash.box.in'
def _compute_values_for_statement_line(self, cr, uid, box, record, context=None):
if context is None:
context = {}
values = super(PosBoxIn, self)._compute_values_for_statement_line(cr, uid, box, record, context=context)
active_model = context.get('active_model', False) or False
active_ids = context.get('active_ids', []) or []
if active_model == 'pos.session':
session = self.pool[active_model].browse(cr, uid, active_ids, context=context)[0]
values['ref'] = session.name
return values
class PosBoxOut(PosBox):
_inherit = 'cash.box.out'
def _compute_values_for_statement_line(self, cr, uid, box, record, context=None):
values = super(PosBoxOut, self)._compute_values_for_statement_line(cr, uid, box, record, context=context)
active_model = context.get('active_model', False) or False
active_ids = context.get('active_ids', []) or []
if active_model == 'pos.session':
session = self.pool[active_model].browse(cr, uid, active_ids, context=context)[0]
values['ref'] = session.name
return values
|
agpl-3.0
|
lukeiwanski/tensorflow-opencl
|
tensorflow/contrib/training/python/training/feeder_test.py
|
43
|
10781
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.feeder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import portpicker
from tensorflow.contrib.training.python.training import feeder as feeder_lib
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import server_lib
_PORTS = set()
def _pick_unused_port():
"""For some reason portpicker returns the same port sometimes."""
while True:
p = portpicker.pick_unused_port()
if p not in _PORTS:
break
_PORTS.add(p)
return p
class FeederThread(object):
# Helper class, wrapping a feeder and making sure it's located on the proper
# device
def __init__(self, test_case, coord, servers, job, task_num, prefix=''):
self.graph = ops.Graph()
self.coord = coord
self.server = servers[job][task_num]
self.remote_devices = []
# Just because we do tf.session(X) doesn't mean ops will located
# on the X task; wrapping all feeder creation/interaction in an
# extra tf.device(X) ensures that any ops that don't provider
# their own tf.device() wrapper will be placed on the correct "local"
# feeder task. A session can and does put ops that have no device
# assignment onto any of the tasks it knows about, not just the
# task passed as its target= argument!
self.device = '/job:%s/task:%d' % (job, task_num)
self.prefix = prefix
self.thread = test_case.checkedThread(target=self._feed_thread)
with self.graph.as_default(), ops.device(self.device):
self.feeder = feeder_lib.Feeder(
[dtypes_lib.string, dtypes_lib.string], [[], []], capacity=1)
self.feeder.set_many_fed_tensors(self._get_feed_values())
def _get_feed_values(self):
# Return some feeding strings, possibly prefixed.
return [
constant_op.constant(
['%s%s' % (self.prefix, x) for x in ['a0', 'a1', 'a2']]),
constant_op.constant(
['%s%s' % (self.prefix, x) for x in ['b0', 'b1', 'b2']])
]
def add_remote_device(self, dev):
with self.graph.as_default(), ops.device(self.device):
self.feeder.add_remote_device(dev)
def start(self):
self.thread.start()
self.feeder.wait_until_feeding() # wait until it's up & feeding
if self.coord.should_stop():
self.coord.join() # rethrows errors encountered in run_feeding_forever
def join(self):
self.thread.join()
def _session(self):
return session_lib.Session(target=self.server.target)
def _feed_thread(self):
with self.coord.stop_on_exception():
with self.graph.as_default(), ops.device(self.device):
self.feeder.run_feeding_forever(self._session, self.coord)
class FeederTest(test.TestCase):
# Tests for Feeder
def _create_local_cluster(self, **kargs):
"""Creates a local cluster."""
cluster_dict = {}
for (k, v) in kargs.items():
cluster_dict[k] = [
'localhost:%d' % _pick_unused_port() for _ in range(v)
]
# Launch servers:
servers = {}
for (k, v) in kargs.items():
servers[k] = [
server_lib.Server(
cluster_dict, job_name=k, task_index=idx, start=True)
for idx in range(v)
]
return servers
def testFeederActsLikeQueue(self):
# Tests that a feeder acts like a queue
feeder = feeder_lib.Feeder(
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[[], []],
capacity=10)
feeder.set_many_fed_tensors([
constant_op.constant(['a0', 'a1', 'a2']),
constant_op.constant(['b0', 'b1', 'b2'])
])
out_a, out_b = feeder.get_fed_tensors()
with self.test_session() as session:
coord = coordinator.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coord)
a, b = session.run([out_a, out_b])
self.assertEquals(b'a0', a)
self.assertEquals(b'b0', b)
a = session.run(out_a) # Omit b!
self.assertEquals(b'a1', a)
a, b = session.run([out_a, out_b])
self.assertEquals(b'a2', a)
self.assertEquals(b'b2', b) # queued together
a, b = session.run([out_a, out_b]) # loops around
self.assertEquals(b'a0', a)
self.assertEquals(b'b0', b) # queued together
coord.request_stop()
coord.join()
def testFeederSeparateThread(self):
# Start a feeder on a seperate thread, but with a shared local queue
servers = self._create_local_cluster(worker=1)
coord = coordinator.Coordinator()
feed_thread = FeederThread(self, coord, servers, 'worker', 0)
feed_thread.start()
with ops.Graph().as_default():
with ops.device('/job:worker/task:0'):
feeder = feeder_lib.Feeder(
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[[], []],
capacity=1)
out_a, out_b = feeder.get_fed_tensors()
with session_lib.Session(servers['worker'][0].target) as session:
a, b = session.run([out_a, out_b])
self.assertEquals(b'a0', a)
self.assertEquals(b'b0', b)
a = session.run(out_a) # Omit b!
self.assertEquals(b'a1', a)
coord.request_stop()
coord.join()
feed_thread.join()
def testOneEachFeeding(self):
# One feeder, one consumer
servers = self._create_local_cluster(consumer=1, feeder=1)
coord = coordinator.Coordinator()
feeder_thread = FeederThread(self, coord, servers, 'feeder', 0)
feeder_thread.add_remote_device('/job:consumer/task:0')
feeder_thread.start()
with ops.Graph().as_default():
with ops.device('/job:consumer/task:0'):
feeder = feeder_lib.Feeder(
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[[], []],
capacity=1)
out_a, out_b = feeder.get_fed_tensors()
with session_lib.Session(servers['consumer'][0].target) as session:
a, b = session.run([out_a, out_b])
self.assertEquals(b'a0', a)
self.assertEquals(b'b0', b)
a = session.run(out_a) # Omit b!
self.assertEquals(b'a1', a)
coord.request_stop()
coord.join()
feeder_thread.join()
def testMultipleProducersAndConsumers(self):
# Three feeders, three consumers.
servers = self._create_local_cluster(consumer=3, feeder=3)
coord = coordinator.Coordinator()
# Start the three feeders:
f0 = FeederThread(self, coord, servers, 'feeder', 0, prefix='feed0_')
f0.add_remote_device('/job:consumer/task:0')
f0.add_remote_device('/job:consumer/task:1')
f0.start()
f1 = FeederThread(self, coord, servers, 'feeder', 1, prefix='feed1_')
f1.add_remote_device('/job:consumer/task:2')
f1.add_remote_device('/job:consumer/task:0')
f1.start()
f2 = FeederThread(self, coord, servers, 'feeder', 2, prefix='feed2_')
f2.add_remote_device('/job:consumer/task:1')
f2.add_remote_device('/job:consumer/task:2')
f2.start()
# Three consumers.
def _run_consumer(task, expected_keys):
server = servers['consumer'][task]
# Runs until everything in expected_keys has been seen at least once;
# fails if any prefix not in expected_keys shows up
with ops.Graph().as_default(), ops.device('/job:consumer/task:%d' % task):
feeder = feeder_lib.Feeder(
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[[], []],
capacity=1)
out_a, out_b = feeder.get_fed_tensors()
counts = collections.Counter()
with session_lib.Session(server.target) as sess:
while True:
a, b = sess.run([out_a, out_b])
counts[a[:-1]] += 1
counts[b[:-1]] += 1
self.assertTrue(a[:-1] in expected_keys)
self.assertTrue(b[:-1] in expected_keys)
if all(counts[k] > 0 for k in expected_keys):
return
_run_consumer(0, [b'feed0_a', b'feed0_b', b'feed1_a', b'feed1_b'])
_run_consumer(1, [b'feed0_a', b'feed0_b', b'feed2_a', b'feed2_b'])
_run_consumer(2, [b'feed1_a', b'feed1_b', b'feed2_a', b'feed2_b'])
coord.request_stop()
coord.join()
f0.join()
f1.join()
f2.join()
def testAddRemoteReplicas(self):
with ops.Graph().as_default():
for idx in range(3):
with ops.name_scope('replica_%d' % idx):
feeder = feeder_lib.Feeder(
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[[], []],
capacity=10)
feeder.add_remote_replicas(
'consumer',
replica_count=3,
feeder_task_num=idx,
replicas_per_feeder=2,
base_device_spec='/device:cpu:0')
# Examine ops...
op_types_by_scope_and_device = collections.defaultdict(
lambda: collections.defaultdict(collections.Counter))
for op in ops.get_default_graph().get_operations():
scope = '/'.join(op.name.split('/')[:-1])
dev = op.device
op_types_by_scope_and_device[scope][dev][op.type] += 1
expected_ops = collections.Counter(
{'QueueEnqueueV2': 1, 'FIFOQueueV2': 1})
expected_enq_devices = [('replica_0', [
'/job:consumer/replica:0/device:cpu:0',
'/job:consumer/replica:1/device:cpu:0',
]), ('replica_1', [
'/job:consumer/replica:2/device:cpu:0',
'/job:consumer/replica:0/device:cpu:0',
]), ('replica_2', [
'/job:consumer/replica:1/device:cpu:0',
'/job:consumer/replica:2/device:cpu:0',
])]
for scope, devs in expected_enq_devices:
for dev in devs:
self.assertEqual(expected_ops,
op_types_by_scope_and_device[scope][dev])
if __name__ == '__main__':
test.main()
|
apache-2.0
|
FrancescAlted/blaze
|
blaze/compute/ops/from_dynd.py
|
4
|
1344
|
"""
Helper functions which constructs blaze functions from dynd kernels.
"""
from __future__ import absolute_import, division, print_function
from dynd import _lowlevel
import datashape
from ..function import ElementwiseBlazeFunc
def _make_sig(kern):
dslist = [datashape.dshape(str(x)) for x in kern.types]
return datashape.Function(*(dslist[1:] + [dslist[0]]))
def blazefunc_from_dynd_property(tplist, propname, modname, name):
"""Converts a dynd property access into a Blaze ufunc.
Parameters
----------
tplist : list of dynd types
A list of the types to use.
propname : str
The name of the property to access on the type.
modname : str
The module name to report in the ufunc's name
name : str
The ufunc's name.
"""
# Get the list of type signatures
kernlist = [_lowlevel.make_ckernel_deferred_from_property(tp, propname,
'expr', 'default')
for tp in tplist]
siglist = [_make_sig(kern) for kern in kernlist]
# Create the empty blaze function to start
bf = ElementwiseBlazeFunc('blaze', name)
# TODO: specify elementwise
#bf.add_metadata({'elementwise': True})
for (sig, kern) in zip(siglist, kernlist):
bf.add_overload(sig, kern)
return bf
|
bsd-3-clause
|
NMGRL/pychron
|
pychron/dashboard/tasks/server/plugin.py
|
2
|
2722
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from envisage.ui.tasks.task_factory import TaskFactory
from pyface.timer.do_later import do_after
from traits.api import Instance, on_trait_change
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.dashboard.server import DashboardServer
from pychron.dashboard.tasks.server.preferences import DashboardServerPreferencesPane
from pychron.dashboard.tasks.server.task import DashboardServerTask
from pychron.envisage.tasks.base_task_plugin import BaseTaskPlugin
class DashboardServerPlugin(BaseTaskPlugin):
dashboard_server = Instance(DashboardServer)
def _tasks_default(self):
return [TaskFactory(id='pychron.dashboard.server',
name='Dashboard Server',
accelerator='Ctrl+4',
factory=self._factory)]
def _factory(self):
f = DashboardServerTask(server=self.dashboard_server)
return f
def start(self):
app = self.application
elm = app.get_service('pychron.extraction_line.extraction_line_manager.ExtractionLineManager')
labspy = app.get_service('pychron.labspy.client.LabspyClient')
self.dashboard_server = DashboardServer(application=app,
labspy_client=labspy,
extraction_line_manager=elm)
self.dashboard_server.bind_preferences()
def _preferences_panes_default(self):
return [DashboardServerPreferencesPane]
def stop(self):
self.dashboard_server.deactivate()
@on_trait_change('application:started')
def start_server(self):
do_after(5000, self.dashboard_server.activate)
# self.dashboard_server.activate()
# ============= EOF =============================================
|
apache-2.0
|
blacktear23/django
|
tests/regressiontests/generic_views/models.py
|
52
|
1031
|
from django.db import models
class Artist(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ['name']
verbose_name = 'professional artist'
verbose_name_plural = 'professional artists'
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('artist_detail', (), {'pk': self.id})
class Author(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
class Book(models.Model):
name = models.CharField(max_length=300)
slug = models.SlugField()
pages = models.IntegerField()
authors = models.ManyToManyField(Author)
pubdate = models.DateField()
class Meta:
ordering = ['-pubdate']
def __unicode__(self):
return self.name
class Page(models.Model):
content = models.TextField()
template = models.CharField(max_length=300)
|
bsd-3-clause
|
FujitsuEnablingSoftwareTechnologyGmbH/tempest
|
tempest/api/compute/admin/test_simple_tenant_usage.py
|
4
|
2798
|
# Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import time
from tempest.api.compute import base
from tempest import test
class TenantUsagesTestJSON(base.BaseV2ComputeAdminTest):
@classmethod
def setup_clients(cls):
super(TenantUsagesTestJSON, cls).setup_clients()
cls.adm_client = cls.os_adm.tenant_usages_client
cls.client = cls.os.tenant_usages_client
@classmethod
def resource_setup(cls):
super(TenantUsagesTestJSON, cls).resource_setup()
cls.tenant_id = cls.client.tenant_id
# Create a server in the demo tenant
cls.create_test_server(wait_until='ACTIVE')
time.sleep(2)
now = datetime.datetime.now()
cls.start = cls._parse_strtime(now - datetime.timedelta(days=1))
cls.end = cls._parse_strtime(now + datetime.timedelta(days=1))
@classmethod
def _parse_strtime(cls, at):
# Returns formatted datetime
return at.strftime('%Y-%m-%dT%H:%M:%S.%f')
@test.attr(type='gate')
@test.idempotent_id('062c8ae9-9912-4249-8b51-e38d664e926e')
def test_list_usage_all_tenants(self):
# Get usage for all tenants
params = {'start': self.start,
'end': self.end,
'detailed': int(bool(True))}
tenant_usage = self.adm_client.list_tenant_usages(params)
self.assertEqual(len(tenant_usage), 8)
@test.attr(type='gate')
@test.idempotent_id('94135049-a4c5-4934-ad39-08fa7da4f22e')
def test_get_usage_tenant(self):
# Get usage for a specific tenant
params = {'start': self.start,
'end': self.end}
tenant_usage = self.adm_client.get_tenant_usage(
self.tenant_id, params)
self.assertEqual(len(tenant_usage), 8)
@test.attr(type='gate')
@test.idempotent_id('9d00a412-b40e-4fd9-8eba-97b496316116')
def test_get_usage_tenant_with_non_admin_user(self):
# Get usage for a specific tenant with non admin user
params = {'start': self.start,
'end': self.end}
tenant_usage = self.client.get_tenant_usage(
self.tenant_id, params)
self.assertEqual(len(tenant_usage), 8)
|
apache-2.0
|
lethargi/paparazzi
|
sw/tools/calibration/calibrate.py
|
18
|
6985
|
#! /usr/bin/env python
# Copyright (C) 2010 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function
import sys
import os
from optparse import OptionParser
import scipy
from scipy import optimize
import calibration_utils
def main():
usage = "usage: %prog [options] log_filename.data" + "\n" + "Run %prog --help to list the options."
parser = OptionParser(usage)
parser.add_option("-i", "--id", dest="ac_id",
action="store",
help="aircraft id to use")
parser.add_option("-s", "--sensor", dest="sensor",
type="choice", choices=["ACCEL", "MAG"],
help="sensor to calibrate (ACCEL, MAG)",
action="store", default="ACCEL")
parser.add_option("-p", "--plot",
help="Show resulting plots",
action="store_true", dest="plot")
parser.add_option("--noise_threshold",
help="specify noise threshold instead of automatically determining it",
action="store", dest="noise_threshold", default=0)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
else:
if os.path.isfile(args[0]):
filename = args[0]
else:
print(args[0] + " not found")
sys.exit(1)
if not filename.endswith(".data"):
parser.error("Please specify a *.data log file")
if os.path.getsize(filename) == 0:
print("File specified has no data.")
sys.exit(1)
ac_ids = calibration_utils.get_ids_in_log(filename)
if options.ac_id is None:
if len(ac_ids) == 1:
options.ac_id = ac_ids[0]
else:
parser.error("More than one aircraft id found in log file. Specify the id to use.")
if options.verbose:
print("Using aircraft id "+options.ac_id)
if options.sensor == "ACCEL":
sensor_ref = 9.81
sensor_res = 10
noise_window = 20
noise_threshold = options.noise_threshold
elif options.sensor == "MAG":
sensor_ref = 1.
sensor_res = 11
noise_window = 10
noise_threshold = options.noise_threshold
if options.verbose:
print("reading file "+filename+" for aircraft "+options.ac_id+" and sensor "+options.sensor)
# read raw measurements from log file
measurements = calibration_utils.read_log(options.ac_id, filename, options.sensor)
if len(measurements) == 0:
print("Error: found zero IMU_"+options.sensor+"_RAW measurements for aircraft with id "+options.ac_id+" in log file!")
sys.exit(1)
if options.verbose:
print("found "+str(len(measurements))+" records")
# check that values are not all zero
if not measurements.any():
print("Error: all IMU_"+options.sensor+"_RAW measurements are zero!")
sys.exit(1)
# estimate the noise threshold if not explicitly given
if noise_threshold <= 0:
# mean over all measurements (flattended array) as approx neutral value
neutral = scipy.mean(measurements)
# find the median of measurement vector length after subtracting approximate neutral
meas_median = scipy.median(scipy.array([scipy.linalg.norm(v - neutral) for v in measurements]))
if options.sensor == "ACCEL":
# set noise threshold to be below 10% of that for accelerometers
noise_threshold = meas_median * 0.1
elif options.sensor == "MAG":
# set noise threshold to be below 60% of that for magnetometers
noise_threshold = meas_median * 0.6
if options.verbose:
print("Using noise threshold of", noise_threshold, "for filtering.")
# filter out noisy measurements
flt_meas, flt_idx = calibration_utils.filter_meas(measurements, noise_window, noise_threshold)
if options.verbose:
print("remaining "+str(len(flt_meas))+" after filtering")
if len(flt_meas) == 0:
print("Error: found zero IMU_" + options.sensor + "_RAW measurements for aircraft with id " + options.ac_id +
" in log file after filtering with noise threshold of " + noise_threshold +
"!\nMaybe try specifying manually with the --noise_threshold option.")
if options.plot:
calibration_utils.plot_measurements(options.sensor, measurements)
sys.exit(1)
# get an initial min/max guess
p0 = calibration_utils.get_min_max_guess(flt_meas, sensor_ref)
cp0, np0 = calibration_utils.scale_measurements(flt_meas, p0)
print("initial guess : avg "+str(np0.mean())+" std "+str(np0.std()))
# print p0
def err_func(p, meas, y):
cp, np = calibration_utils.scale_measurements(meas, p)
err = y*scipy.ones(len(meas)) - np
return err
p1, cov, info, msg, success = optimize.leastsq(err_func, p0[:], args=(flt_meas, sensor_ref), full_output=1)
optimze_failed = success not in [1, 2, 3, 4]
if optimze_failed:
print("Optimization error: ", msg)
print("Please try to provide a clean logfile with proper distribution of measurements.")
#sys.exit(1)
cp1, np1 = calibration_utils.scale_measurements(flt_meas, p1)
if optimze_failed:
print("last iteration of failed optimized guess : avg "+str(np1.mean())+" std "+str(np1.std()))
else:
print("optimized guess : avg "+str(np1.mean())+" std "+str(np1.std()))
if not optimze_failed:
calibration_utils.print_xml(p1, options.sensor, sensor_res)
if options.plot:
# if we are calibrating a mag, just draw first plot (non-blocking), then show the second
if options.sensor == "MAG":
calibration_utils.plot_results(options.sensor, measurements, flt_idx, flt_meas, cp0, np0, cp1, np1, sensor_ref, blocking=False)
calibration_utils.plot_mag_3d(flt_meas, cp1, p1)
# otherwise show the first plot (blocking)
else:
calibration_utils.plot_results(options.sensor, measurements, flt_idx, flt_meas, cp0, np0, cp1, np1, sensor_ref)
if __name__ == "__main__":
main()
|
gpl-2.0
|
birocorneliu/youtube-dl
|
youtube_dl/extractor/tinypic.py
|
126
|
1893
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import ExtractorError
class TinyPicIE(InfoExtractor):
IE_NAME = 'tinypic'
IE_DESC = 'tinypic.com videos'
_VALID_URL = r'http://(?:.+?\.)?tinypic\.com/player\.php\?v=(?P<id>[^&]+)&s=\d+'
_TESTS = [
{
'url': 'http://tinypic.com/player.php?v=6xw7tc%3E&s=5#.UtqZmbRFCM8',
'md5': '609b74432465364e72727ebc6203f044',
'info_dict': {
'id': '6xw7tc',
'ext': 'flv',
'title': 'shadow phenomenon weird',
},
},
{
'url': 'http://de.tinypic.com/player.php?v=dy90yh&s=8',
'only_matching': True,
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id, 'Downloading page')
mobj = re.search(r'(?m)fo\.addVariable\("file",\s"(?P<fileid>[\da-z]+)"\);\n'
'\s+fo\.addVariable\("s",\s"(?P<serverid>\d+)"\);', webpage)
if mobj is None:
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
file_id = mobj.group('fileid')
server_id = mobj.group('serverid')
KEYWORDS_SUFFIX = ', Video, images, photos, videos, myspace, ebay, video hosting, photo hosting'
keywords = self._html_search_meta('keywords', webpage, 'title')
title = keywords[:-len(KEYWORDS_SUFFIX)] if keywords.endswith(KEYWORDS_SUFFIX) else ''
video_url = 'http://v%s.tinypic.com/%s.flv' % (server_id, file_id)
thumbnail = 'http://v%s.tinypic.com/%s_th.jpg' % (server_id, file_id)
return {
'id': file_id,
'url': video_url,
'thumbnail': thumbnail,
'title': title
}
|
unlicense
|
johnpbatty/python-neutronclient
|
neutronclient/tests/unit/lb/test_cli20_member.py
|
7
|
4933
|
# Copyright 2013 Mirantis Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from neutronclient.neutron.v2_0.lb import member
from neutronclient.tests.unit import test_cli20
class CLITestV20LbMemberJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20LbMemberJSON, self).setUp(plurals={'tags': 'tag'})
def test_create_member(self):
"""lb-member-create with mandatory params only."""
resource = 'member'
cmd = member.CreateMember(test_cli20.MyApp(sys.stdout), None)
address = '10.0.0.1'
port = '8080'
tenant_id = 'my-tenant'
my_id = 'my-id'
pool_id = 'pool-id'
args = ['--address', address, '--protocol-port', port,
'--tenant-id', tenant_id, pool_id]
position_names = ['address', 'protocol_port', 'tenant_id', 'pool_id',
'admin_state_up']
position_values = [address, port, tenant_id, pool_id, True]
self._test_create_resource(resource, cmd, None, my_id, args,
position_names, position_values,
admin_state_up=None)
def test_create_member_all_params(self):
"""lb-member-create with all available params."""
resource = 'member'
cmd = member.CreateMember(test_cli20.MyApp(sys.stdout), None)
address = '10.0.0.1'
admin_state_up = False
port = '8080'
weight = '1'
tenant_id = 'my-tenant'
my_id = 'my-id'
pool_id = 'pool-id'
args = ['--address', address, '--admin-state-down',
'--protocol-port', port, '--weight', weight,
'--tenant-id', tenant_id, pool_id]
position_names = [
'address', 'admin_state_up', 'protocol_port', 'weight',
'tenant_id', 'pool_id'
]
position_values = [address, admin_state_up, port, weight,
tenant_id, pool_id]
self._test_create_resource(resource, cmd, None, my_id, args,
position_names, position_values,
admin_state_up=None)
def test_list_members(self):
"""lb-member-list."""
resources = "members"
cmd = member.ListMember(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_members_pagination(self):
"""lb-member-list."""
resources = "members"
cmd = member.ListMember(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_members_sort(self):
"""lb-member-list --sort-key name --sort-key id --sort-key asc
--sort-key desc
"""
resources = "members"
cmd = member.ListMember(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_members_limit(self):
"""lb-member-list -P."""
resources = "members"
cmd = member.ListMember(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_show_member_id(self):
"""lb-member-show test_id."""
resource = 'member'
cmd = member.ShowMember(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_update_member(self):
"""lb-member-update myid --name myname --tags a b."""
resource = 'member'
cmd = member.UpdateMember(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b'],
{'name': 'myname', 'tags': ['a', 'b'], })
def test_delete_member(self):
"""lb-member-delete my-id."""
resource = 'member'
cmd = member.DeleteMember(test_cli20.MyApp(sys.stdout), None)
my_id = 'my-id'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
class CLITestV20LbMemberXML(CLITestV20LbMemberJSON):
format = 'xml'
|
apache-2.0
|
glneo/gnuradio-davisaf
|
gr-digital/examples/ofdm/receive_path.py
|
13
|
3477
|
#
# Copyright 2005,2006,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import eng_notation
from gnuradio import digital
import copy
import sys
# /////////////////////////////////////////////////////////////////////////////
# receive path
# /////////////////////////////////////////////////////////////////////////////
class receive_path(gr.hier_block2):
def __init__(self, rx_callback, options):
gr.hier_block2.__init__(self, "receive_path",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(0, 0, 0))
options = copy.copy(options) # make a copy so we can destructively modify
self._verbose = options.verbose
self._log = options.log
self._rx_callback = rx_callback # this callback is fired when there's a packet available
# receiver
self.ofdm_rx = digital.ofdm_demod(options,
callback=self._rx_callback)
# Carrier Sensing Blocks
alpha = 0.001
thresh = 30 # in dB, will have to adjust
self.probe = gr.probe_avg_mag_sqrd_c(thresh,alpha)
self.connect(self, self.ofdm_rx)
self.connect(self.ofdm_rx, self.probe)
# Display some information about the setup
if self._verbose:
self._print_verbage()
def carrier_sensed(self):
"""
Return True if we think carrier is present.
"""
#return self.probe.level() > X
return self.probe.unmuted()
def carrier_threshold(self):
"""
Return current setting in dB.
"""
return self.probe.threshold()
def set_carrier_threshold(self, threshold_in_db):
"""
Set carrier threshold.
@param threshold_in_db: set detection threshold
@type threshold_in_db: float (dB)
"""
self.probe.set_threshold(threshold_in_db)
def add_options(normal, expert):
"""
Adds receiver-specific options to the Options Parser
"""
normal.add_option("-W", "--bandwidth", type="eng_float",
default=500e3,
help="set symbol bandwidth [default=%default]")
normal.add_option("-v", "--verbose", action="store_true", default=False)
expert.add_option("", "--log", action="store_true", default=False,
help="Log all parts of flow graph to files (CAUTION: lots of data)")
# Make a static method to call before instantiation
add_options = staticmethod(add_options)
def _print_verbage(self):
"""
Prints information about the receive path
"""
pass
|
gpl-3.0
|
yeyanchao/calibre
|
src/calibre/gui2/store/basic_config_widget_ui.py
|
1
|
1447
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/yc/code/calibre/calibre/src/calibre/gui2/store/basic_config_widget.ui'
#
# Created: Thu Oct 25 16:54:55 2012
# by: PyQt4 UI code generator 4.8.5
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(460, 69)
Form.setWindowTitle(_("Form"))
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(Form)
self.label.setText(_("Added Tags:"))
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 1, 0, 1, 1)
self.tags = QtGui.QLineEdit(Form)
self.tags.setObjectName(_fromUtf8("tags"))
self.gridLayout.addWidget(self.tags, 1, 1, 1, 1)
self.open_external = QtGui.QCheckBox(Form)
self.open_external.setText(_("Open store in external web browswer"))
self.open_external.setObjectName(_fromUtf8("open_external"))
self.gridLayout.addWidget(self.open_external, 0, 0, 1, 2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
pass
|
gpl-3.0
|
karban/field
|
resources/python/rope/base/evaluate.py
|
12
|
12131
|
import rope.base.builtins
import rope.base.pynames
import rope.base.pyobjects
from rope.base import ast, astutils, exceptions, pyobjects, arguments, worder
BadIdentifierError = exceptions.BadIdentifierError
def eval_location(pymodule, offset):
"""Find the pyname at the offset"""
return eval_location2(pymodule, offset)[1]
def eval_location2(pymodule, offset):
"""Find the primary and pyname at offset"""
pyname_finder = ScopeNameFinder(pymodule)
return pyname_finder.get_primary_and_pyname_at(offset)
def eval_node(scope, node):
"""Evaluate a `ast.AST` node and return a PyName
Return `None` if the expression cannot be evaluated.
"""
return eval_node2(scope, node)[1]
def eval_node2(scope, node):
evaluator = StatementEvaluator(scope)
ast.walk(node, evaluator)
return evaluator.old_result, evaluator.result
def eval_str(holding_scope, name):
return eval_str2(holding_scope, name)[1]
def eval_str2(holding_scope, name):
try:
# parenthesizing for handling cases like 'a_var.\nattr'
node = ast.parse('(%s)' % name)
except SyntaxError:
raise BadIdentifierError('Not a resolvable python identifier selected.')
return eval_node2(holding_scope, node)
class ScopeNameFinder(object):
def __init__(self, pymodule):
self.module_scope = pymodule.get_scope()
self.lines = pymodule.lines
self.worder = worder.Worder(pymodule.source_code, True)
def _is_defined_in_class_body(self, holding_scope, offset, lineno):
if lineno == holding_scope.get_start() and \
holding_scope.parent is not None and \
holding_scope.parent.get_kind() == 'Class' and \
self.worder.is_a_class_or_function_name_in_header(offset):
return True
if lineno != holding_scope.get_start() and \
holding_scope.get_kind() == 'Class' and \
self.worder.is_name_assigned_in_class_body(offset):
return True
return False
def _is_function_name_in_function_header(self, scope, offset, lineno):
if scope.get_start() <= lineno <= scope.get_body_start() and \
scope.get_kind() == 'Function' and \
self.worder.is_a_class_or_function_name_in_header(offset):
return True
return False
def get_pyname_at(self, offset):
return self.get_primary_and_pyname_at(offset)[1]
def get_primary_and_pyname_at(self, offset):
lineno = self.lines.get_line_number(offset)
holding_scope = self.module_scope.get_inner_scope_for_line(lineno)
# function keyword parameter
if self.worder.is_function_keyword_parameter(offset):
keyword_name = self.worder.get_word_at(offset)
pyobject = self.get_enclosing_function(offset)
if isinstance(pyobject, pyobjects.PyFunction):
return (None, pyobject.get_parameters().get(keyword_name, None))
# class body
if self._is_defined_in_class_body(holding_scope, offset, lineno):
class_scope = holding_scope
if lineno == holding_scope.get_start():
class_scope = holding_scope.parent
name = self.worder.get_primary_at(offset).strip()
try:
return (None, class_scope.pyobject[name])
except rope.base.exceptions.AttributeNotFoundError:
return (None, None)
# function header
if self._is_function_name_in_function_header(holding_scope, offset, lineno):
name = self.worder.get_primary_at(offset).strip()
return (None, holding_scope.parent[name])
# from statement module
if self.worder.is_from_statement_module(offset):
module = self.worder.get_primary_at(offset)
module_pyname = self._find_module(module)
return (None, module_pyname)
if self.worder.is_from_aliased(offset):
name = self.worder.get_from_aliased(offset)
else:
name = self.worder.get_primary_at(offset)
return eval_str2(holding_scope, name)
def get_enclosing_function(self, offset):
function_parens = self.worder.find_parens_start_from_inside(offset)
try:
function_pyname = self.get_pyname_at(function_parens - 1)
except BadIdentifierError:
function_pyname = None
if function_pyname is not None:
pyobject = function_pyname.get_object()
if isinstance(pyobject, pyobjects.AbstractFunction):
return pyobject
elif isinstance(pyobject, pyobjects.AbstractClass) and \
'__init__' in pyobject:
return pyobject['__init__'].get_object()
elif '__call__' in pyobject:
return pyobject['__call__'].get_object()
return None
def _find_module(self, module_name):
dots = 0
while module_name[dots] == '.':
dots += 1
return rope.base.pynames.ImportedModule(
self.module_scope.pyobject, module_name[dots:], dots)
class StatementEvaluator(object):
def __init__(self, scope):
self.scope = scope
self.result = None
self.old_result = None
def _Name(self, node):
self.result = self.scope.lookup(node.id)
def _Attribute(self, node):
pyname = eval_node(self.scope, node.value)
if pyname is None:
pyname = rope.base.pynames.UnboundName()
self.old_result = pyname
if pyname.get_object() != rope.base.pyobjects.get_unknown():
try:
self.result = pyname.get_object()[node.attr]
except exceptions.AttributeNotFoundError:
self.result = None
def _Call(self, node):
primary, pyobject = self._get_primary_and_object_for_node(node.func)
if pyobject is None:
return
def _get_returned(pyobject):
args = arguments.create_arguments(primary, pyobject,
node, self.scope)
return pyobject.get_returned_object(args)
if isinstance(pyobject, rope.base.pyobjects.AbstractClass):
result = None
if '__new__' in pyobject:
new_function = pyobject['__new__'].get_object()
result = _get_returned(new_function)
if result is None or \
result == rope.base.pyobjects.get_unknown():
result = rope.base.pyobjects.PyObject(pyobject)
self.result = rope.base.pynames.UnboundName(pyobject=result)
return
pyfunction = None
if isinstance(pyobject, rope.base.pyobjects.AbstractFunction):
pyfunction = pyobject
elif '__call__' in pyobject:
pyfunction = pyobject['__call__'].get_object()
if pyfunction is not None:
self.result = rope.base.pynames.UnboundName(
pyobject=_get_returned(pyfunction))
def _Str(self, node):
self.result = rope.base.pynames.UnboundName(
pyobject=rope.base.builtins.get_str())
def _Num(self, node):
type_name = type(node.n).__name__
self.result = self._get_builtin_name(type_name)
def _get_builtin_name(self, type_name):
pytype = rope.base.builtins.builtins[type_name].get_object()
return rope.base.pynames.UnboundName(
rope.base.pyobjects.PyObject(pytype))
def _BinOp(self, node):
self.result = rope.base.pynames.UnboundName(
self._get_object_for_node(node.left))
def _BoolOp(self, node):
self.result = rope.base.pynames.UnboundName(
self._get_object_for_node(node.values[0]))
def _Repr(self, node):
self.result = self._get_builtin_name('str')
def _UnaryOp(self, node):
self.result = rope.base.pynames.UnboundName(
self._get_object_for_node(node.operand))
def _Compare(self, node):
self.result = self._get_builtin_name('bool')
def _Dict(self, node):
keys = None
values = None
if node.keys:
keys = self._get_object_for_node(node.keys[0])
values = self._get_object_for_node(node.values[0])
self.result = rope.base.pynames.UnboundName(
pyobject=rope.base.builtins.get_dict(keys, values))
def _List(self, node):
holding = None
if node.elts:
holding = self._get_object_for_node(node.elts[0])
self.result = rope.base.pynames.UnboundName(
pyobject=rope.base.builtins.get_list(holding))
def _ListComp(self, node):
pyobject = self._what_does_comprehension_hold(node)
self.result = rope.base.pynames.UnboundName(
pyobject=rope.base.builtins.get_list(pyobject))
def _GeneratorExp(self, node):
pyobject = self._what_does_comprehension_hold(node)
self.result = rope.base.pynames.UnboundName(
pyobject=rope.base.builtins.get_iterator(pyobject))
def _what_does_comprehension_hold(self, node):
scope = self._make_comprehension_scope(node)
pyname = eval_node(scope, node.elt)
return pyname.get_object() if pyname is not None else None
def _make_comprehension_scope(self, node):
scope = self.scope
module = scope.pyobject.get_module()
names = {}
for comp in node.generators:
new_names = _get_evaluated_names(comp.target, comp.iter, module,
'.__iter__().next()', node.lineno)
names.update(new_names)
return rope.base.pyscopes.TemporaryScope(scope.pycore, scope, names)
def _Tuple(self, node):
objects = []
if len(node.elts) < 4:
for stmt in node.elts:
pyobject = self._get_object_for_node(stmt)
objects.append(pyobject)
else:
objects.append(self._get_object_for_node(node.elts[0]))
self.result = rope.base.pynames.UnboundName(
pyobject=rope.base.builtins.get_tuple(*objects))
def _get_object_for_node(self, stmt):
pyname = eval_node(self.scope, stmt)
pyobject = None
if pyname is not None:
pyobject = pyname.get_object()
return pyobject
def _get_primary_and_object_for_node(self, stmt):
primary, pyname = eval_node2(self.scope, stmt)
pyobject = None
if pyname is not None:
pyobject = pyname.get_object()
return primary, pyobject
def _Subscript(self, node):
if isinstance(node.slice, ast.Index):
self._call_function(node.value, '__getitem__',
[node.slice.value])
elif isinstance(node.slice, ast.Slice):
self._call_function(node.value, '__getslice__')
def _call_function(self, node, function_name, other_args=None):
pyname = eval_node(self.scope, node)
if pyname is not None:
pyobject = pyname.get_object()
else:
return
if function_name in pyobject:
call_function = pyobject[function_name].get_object()
args = [node]
if other_args:
args += other_args
arguments_ = arguments.Arguments(args, self.scope)
self.result = rope.base.pynames.UnboundName(
pyobject=call_function.get_returned_object(arguments_))
def _Lambda(self, node):
self.result = rope.base.pynames.UnboundName(
pyobject=rope.base.builtins.Lambda(node, self.scope))
def _get_evaluated_names(targets, assigned, module, evaluation, lineno):
result = {}
for name, levels in astutils.get_name_levels(targets):
assignment = rope.base.pynames.AssignmentValue(assigned, levels,
evaluation)
# XXX: this module should not access `rope.base.pynamesdef`!
pyname = rope.base.pynamesdef.AssignedName(lineno, module)
pyname.assignments.append(assignment)
result[name] = pyname
return result
|
gpl-2.0
|
Acidburn0zzz/servo
|
tests/wpt/webgl/tests/deqp/functional/gles3/framebufferblit/frambufferblit_test_generator.py
|
51
|
3604
|
#!/usr/bin/env python
# Copyright (c) 2016 The Khronos Group Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and/or associated documentation files (the
# "Materials"), to deal in the Materials without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Materials, and to
# permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
#
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
"""
Generator for framebufferblit* tests.
This file needs to be run in its folder.
"""
import sys
_DO_NOT_EDIT_WARNING = """<!--
This file is auto-generated from framebufferblit_test_generator.py
DO NOT EDIT!
-->
"""
_HTML_TEMPLATE = """<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>WebGL Framebuffer Blit Conformance Tests</title>
<link rel="stylesheet" href="../../../../resources/js-test-style.css"/>
<script src="../../../../js/js-test-pre.js"></script>
<script src="../../../../js/webgl-test-utils.js"></script>
<script src="../../../../closure-library/closure/goog/base.js"></script>
<script src="../../../deqp-deps.js"></script>
<script>goog.require('functional.gles3.es3fFramebufferBlitTests');</script>
</head>
<body>
<div id="description"></div>
<div id="console"></div>
<canvas id="canvas" width="200" height="200"> </canvas>
<script>
var wtu = WebGLTestUtils;
var gl = wtu.create3DContext('canvas', null, 2);
functional.gles3.es3fFramebufferBlitTests.run(gl, [%(start)s, %(end)s]);
</script>
</body>
</html>
"""
_GROUPS = [
'rect',
'conversion',
'depth_stencil',
'default_framebuffer',
]
_GROUP_TEST_COUNTS = [
7,
35,
1,
7
]
def GenerateFilename(group, count, index):
"""Generate test filename."""
filename = group
assert index >= 0 and index < count
if count > 1:
index_str = str(index)
if index < 10:
index_str = "0" + index_str
filename += "_" + index_str
filename += ".html"
return filename
def WriteTest(filename, start, end):
"""Write one test."""
file = open(filename, "wb")
file.write(_DO_NOT_EDIT_WARNING)
file.write(_HTML_TEMPLATE % {
'start': start,
'end': end
})
file.close
def GenerateTests():
"""Generate all tests."""
assert len(_GROUPS) == len(_GROUP_TEST_COUNTS)
test_index = 0
filelist = []
for ii in range(len(_GROUPS)):
group = _GROUPS[ii]
count = _GROUP_TEST_COUNTS[ii]
for index in range(count):
filename = GenerateFilename(group, count, index)
filelist.append(filename)
WriteTest(filename, test_index, test_index + 1)
test_index += 1
return filelist
def GenerateTestList(filelist):
file = open("00_test_list.txt", "wb")
file.write('\n'.join(filelist))
file.close
def main(argv):
"""This is the main function."""
filelist = GenerateTests()
GenerateTestList(filelist)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
mpl-2.0
|
jahrome/viper
|
modules/rats/clientmesh.py
|
6
|
1536
|
# Originally written by Kevin Breen (@KevTheHermit):
# https://github.com/kevthehermit/RATDecoders/blob/master/ClientMesh.py
import re
import string
from base64 import b64decode
def stringPrintable(line):
return filter(lambda x: x in string.printable, line)
def first_split(data):
splits = data.split('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7e')
if len(splits) == 2:
return splits[1]
def base64_deocde(b64_string):
return b64decode(b64_string)
def conf_extract(coded_config):
conf_list = []
decoded_conf = base64_deocde(coded_config)
split_list = decoded_conf.split('``')
for conf in split_list:
conf_list.append(conf)
return conf_list
def process_config(raw_config):
conf_dict = {}
conf_dict['Domain'] = raw_config[0]
conf_dict['Port'] = raw_config[1]
conf_dict['Password'] = raw_config[2]
conf_dict['CampaignID'] = raw_config[3]
conf_dict['MsgBoxFlag'] = raw_config[4]
conf_dict['MsgBoxTitle'] = raw_config[5]
conf_dict['MsgBoxText'] = raw_config[6]
conf_dict['Startup'] = raw_config[7]
conf_dict['RegistryKey'] = raw_config[8]
conf_dict['RegistryPersistance'] = raw_config[9]
conf_dict['LocalKeyLogger'] = raw_config[10]
conf_dict['VisibleFlag'] = raw_config[11]
conf_dict['Unknown'] = raw_config[12]
return conf_dict
def config(data):
coded_config = first_split(data)
raw_config = conf_extract(coded_config)
final_config = process_config(raw_config)
return final_config
|
bsd-3-clause
|
minhtuancn/odoo
|
addons/mail/tests/__init__.py
|
261
|
1173
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_mail_group, test_mail_message, test_mail_features, test_mail_gateway, test_message_read, test_invite
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
hanwen/artisjokke
|
stepmake/bin/packagepython.py
|
1
|
3487
|
#!/usr/bin/python
#ugh. junkme.
# packagepython.py -- implement general StepMake-wide python stuff
#
# source file of the GNU LilyPond music typesetter
#
# (c) 1997--1998 Han-Wen Nienhuys <[email protected]>
# Jan Nieuwenhuizen <[email protected]>
import re
import string
import sys
import os
import getopt
make_assign_re = re.compile ('^([A-Z_]*)=(.*)$')
def read_makefile (fn):
file = open (fn)
lines = file.readlines()
mi = pa = mj = 0
mp = ''
make_dict = {}
for l in lines:
m = make_assign_re.search (l)
if m:
nm = m.group (1)
val = m.group (2)
make_dict[nm] = val
return make_dict
class Package:
def __init__ (self, dirname):
dict = read_makefile (dirname + '/VERSION')
version_list = []
for x in [ 'MAJOR_VERSION', 'MINOR_VERSION', 'PATCH_LEVEL']:
version_list.append (string.atoi (dict[x]))
version_list.append (dict['MY_PATCH_LEVEL'])
self.topdir = dirname
self.groupdir = self.topdir + '/..'
self.patch_dir = self.groupdir + '/patches/'
self.release_dir = self.groupdir + '/releases/'
self.test_dir = self.groupdir + '/test/'
self.version = tuple(version_list)
self.Name = dict['PACKAGE_NAME']
self.name = string.lower (self.Name)
if self.name == 'lilypond':
self.nickname = 'lelie'
else:
self.nickname = self.name
self.NAME = string.upper (self.Name)
class Packager:
def __init__ (self):
try:
m= os.environ['MAILADDRESS']
except KeyError:
m= '(address unknown)'
self.mail= m
try:
m= os.environ['WEBMASTER']
except KeyError:
m= self.mail
self.webmaster= m
def full_version_tup(tup):
t = [0,0,0,'']
for i in range (4):
try:
t[i] = tup[i]
except IndexError:
break
return tuple(t)
def split_my_patchlevel (str):
m = re.match ('(.*?)([0-9]*)$', str)
return (m.group (1), string.atoi (m.group (2)))
def next_version(tup):
l = list(full_version_tup (tup))
t3name=t3num=''
if l[3]:
(t3name,t3num)= split_my_patchlevel (l[3])
if t3num:
t3num = '%d' % (t3num + 1)
else:
t3num = t3name =''
else:
l[2] = l[2] +1
return tuple(l[0:3] + [t3name + t3num])
def prev_version(tup):
l = list(full_version_tup (tup))
t3name=t3num=''
if l[3]:
(t3name, t3num) = split_my_patchlevel (l[3])
if t3num and t3num - 1 > 0:
t3num = '%d' %(t3num - 1)
else:
t3num = t3name =''
else:
l[2] = l[2] -1
return tuple(l[0:3] + [t3name + t3num])
def version_tuple_to_str(tup):
tup = full_version_tup (tup)
if tup[3]:
my = '.' + tup[3]
else:
my = ''
return ('%d.%d.%d' % tup[0:3]) + my
def version_str_to_tuple(str):
t = string.split(str, '.')
mypatch = ''
if len (t) >= 4:
mypatch = string.join (t[3:], '.')
return (string.atoi(t[0]), string.atoi(t[1]), string.atoi(t[2]), mypatch)
def version_compare (ltup, rtup):
rtup = full_version_tup (ltup)
rtup = full_version_tup (rtup)
for i in (0,1,2):
if ltup[i] - rtup[i]: return ltup[i] - rtup[i]
if ltup[3] and rtup[3]:
(lname, lnum) = split_my_patchlevel (ltup[i])
(rname, rnum) = split_my_patchlevel (rtup[3])
if lname != rname:
raise 'ambiguous'
return sign (lnum - rnum)
if ltup[3]:
return 1
else:
return -1
if __name__ == '__main__':
p = Package ('.')
v= p.version
print v, prev_version(v), next_version(v)
pv=(0,1,1,'jcn4')
print version_tuple_to_str(pv), prev_version(pv), next_version(pv)
print version_tuple_to_str((0,1,1,''))
print full_version_tup ((0,1))
def dump_file(f, s):
i = open(f, 'w')
i.write(s)
i.close ()
|
mit
|
kxliugang/edx-platform
|
common/test/acceptance/tests/studio/test_import_export.py
|
47
|
13412
|
"""
Acceptance tests for the Import and Export pages
"""
from datetime import datetime
from abc import abstractmethod
from bok_choy.promise import EmptyPromise
from .base_studio_test import StudioLibraryTest, StudioCourseTest
from ...fixtures.course import XBlockFixtureDesc
from ...pages.studio.import_export import ExportLibraryPage, ExportCoursePage, ImportLibraryPage, ImportCoursePage
from ...pages.studio.library import LibraryEditPage
from ...pages.studio.container import ContainerPage
from ...pages.studio.overview import CourseOutlinePage
class ExportTestMixin(object):
"""
Tests to run both for course and library export pages.
"""
def test_export(self):
"""
Scenario: I am able to export a course or library
Given that I have a course or library
And I click the download button
The download will succeed
And the file will be of the right MIME type.
"""
good_status, is_tarball_mimetype = self.export_page.download_tarball()
self.assertTrue(good_status)
self.assertTrue(is_tarball_mimetype)
class TestCourseExport(ExportTestMixin, StudioCourseTest):
"""
Export tests for courses.
"""
def setUp(self): # pylint: disable=arguments-differ
super(TestCourseExport, self).setUp()
self.export_page = ExportCoursePage(
self.browser,
self.course_info['org'], self.course_info['number'], self.course_info['run'],
)
self.export_page.visit()
def test_header(self):
"""
Scenario: I should see the correct text when exporting a course.
Given that I have a course to export from
When I visit the export page
The correct header should be shown
"""
self.assertEqual(self.export_page.header_text, 'Course Export')
class TestLibraryExport(ExportTestMixin, StudioLibraryTest):
"""
Export tests for libraries.
"""
def setUp(self): # pylint: disable=arguments-differ
"""
Ensure a library exists and navigate to the library edit page.
"""
super(TestLibraryExport, self).setUp()
self.export_page = ExportLibraryPage(self.browser, self.library_key)
self.export_page.visit()
def test_header(self):
"""
Scenario: I should see the correct text when exporting a library.
Given that I have a library to export from
When I visit the export page
The correct header should be shown
"""
self.assertEqual(self.export_page.header_text, 'Library Export')
class BadExportMixin(object):
"""
Test mixin for bad exports.
"""
def test_bad_export(self):
"""
Scenario: I should receive an error when attempting to export a broken course or library.
Given that I have a course or library
No error modal should be showing
When I click the export button
An error modal should be shown
When I click the modal's action button
I should arrive at the edit page for the broken component
"""
# No error should be there to start.
self.assertFalse(self.export_page.is_error_modal_showing())
self.export_page.click_export()
self.export_page.wait_for_error_modal()
self.export_page.click_modal_button()
EmptyPromise(
lambda: self.edit_page.is_browser_on_page,
'Arrived at component edit page',
timeout=30
)
class TestLibraryBadExport(BadExportMixin, StudioLibraryTest):
"""
Verify exporting a bad library causes an error.
"""
def setUp(self):
"""
Set up the pages and start the tests.
"""
super(TestLibraryBadExport, self).setUp()
self.export_page = ExportLibraryPage(self.browser, self.library_key)
self.edit_page = LibraryEditPage(self.browser, self.library_key)
self.export_page.visit()
def populate_library_fixture(self, library_fixture):
"""
Create a library with a bad component.
"""
library_fixture.add_children(
XBlockFixtureDesc("problem", "Bad Problem", data='<'),
)
class TestCourseBadExport(BadExportMixin, StudioCourseTest):
"""
Verify exporting a bad course causes an error.
"""
ready_method = 'wait_for_component_menu'
def setUp(self): # pylint: disable=arguments-differ
super(TestCourseBadExport, self).setUp()
self.export_page = ExportCoursePage(
self.browser,
self.course_info['org'], self.course_info['number'], self.course_info['run'],
)
self.edit_page = ContainerPage(self.browser, self.unit.locator)
self.export_page.visit()
def populate_course_fixture(self, course_fixture):
"""
Populate the course with a unit that has a bad problem.
"""
self.unit = XBlockFixtureDesc('vertical', 'Unit')
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Main Section').add_children(
XBlockFixtureDesc('sequential', 'Subsection').add_children(
self.unit.add_children(
XBlockFixtureDesc("problem", "Bad Problem", data='<')
)
)
)
)
class ImportTestMixin(object):
"""
Tests to run for both course and library import pages.
"""
def setUp(self):
super(ImportTestMixin, self).setUp()
self.import_page = self.import_page_class(*self.page_args())
self.landing_page = self.landing_page_class(*self.page_args())
self.import_page.visit()
@abstractmethod
def page_args(self):
"""
Generates the args for initializing a page object.
"""
return []
def test_upload(self):
"""
Scenario: I want to upload a course or library for import.
Given that I have a library or course to import into
And I have a valid .tar.gz file containing data to replace it with
I can select the file and upload it
And the page will give me confirmation that it uploaded successfully
"""
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
def test_import_timestamp(self):
"""
Scenario: I perform a course / library import
On import success, the page displays a UTC timestamp previously not visible
And if I refresh the page, the timestamp is still displayed
"""
self.assertFalse(self.import_page.is_timestamp_visible())
# Get the time when the import has started.
# import_page timestamp is in (MM/DD/YYYY at HH:mm) so replacing (second, microsecond) to
# keep the comparison consistent
upload_start_time = datetime.utcnow().replace(microsecond=0, second=0)
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
# Get the time when the import has finished.
# import_page timestamp is in (MM/DD/YYYY at HH:mm) so replacing (second, microsecond) to
# keep the comparison consistent
upload_finish_time = datetime.utcnow().replace(microsecond=0, second=0)
import_timestamp = self.import_page.parsed_timestamp
self.import_page.wait_for_timestamp_visible()
# Verify that 'import_timestamp' is between start and finish upload time
self.assertLessEqual(
upload_start_time,
import_timestamp,
"Course import timestamp should be upload_start_time <= import_timestamp <= upload_end_time"
)
self.assertGreaterEqual(
upload_finish_time,
import_timestamp,
"Course import timestamp should be upload_start_time <= import_timestamp <= upload_end_time"
)
self.import_page.visit()
self.import_page.wait_for_tasks(completed=True)
self.import_page.wait_for_timestamp_visible()
def test_landing_url(self):
"""
Scenario: When uploading a library or course, a link appears for me to view the changes.
Given that I upload a library or course
A button will appear that contains the URL to the library or course's main page
"""
self.import_page.upload_tarball(self.tarball_name)
self.assertEqual(self.import_page.finished_target_url(), self.landing_page.url)
def test_bad_filename_error(self):
"""
Scenario: I should be reprimanded for trying to upload something that isn't a .tar.gz file.
Given that I select a file that is an .mp4 for upload
An error message will appear
"""
self.import_page.upload_tarball('funny_cat_video.mp4')
self.import_page.wait_for_filename_error()
def test_task_list(self):
"""
Scenario: I should see feedback checkpoints when uploading a course or library
Given that I am on an import page
No task checkpoint list should be showing
When I upload a valid tarball
Each task in the checklist should be marked confirmed
And the task list should be visible
"""
# The task list shouldn't be visible to start.
self.assertFalse(self.import_page.is_task_list_showing(), "Task list shown too early.")
self.import_page.wait_for_tasks()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_tasks(completed=True)
self.assertTrue(self.import_page.is_task_list_showing(), "Task list did not display.")
def test_bad_import(self):
"""
Scenario: I should see a failed checklist when uploading an invalid course or library
Given that I am on an import page
And I upload a tarball with a broken XML file
The tasks should be confirmed up until the 'Updating' task
And the 'Updating' task should be marked failed
And the remaining tasks should not be marked as started
"""
self.import_page.upload_tarball(self.bad_tarball_name)
self.import_page.wait_for_tasks(fail_on='Updating')
class TestCourseImport(ImportTestMixin, StudioCourseTest):
"""
Tests the Course import page
"""
tarball_name = '2015.lzdwNM.tar.gz'
bad_tarball_name = 'bad_course.tar.gz'
import_page_class = ImportCoursePage
landing_page_class = CourseOutlinePage
def page_args(self):
return [self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']]
def test_course_updated(self):
"""
Given that I visit an empty course before import
I should not see a section named 'Section'
When I visit the import page
And I upload a course that has a section named 'Section'
And I visit the course outline page again
The section named 'Section' should now be available
"""
self.landing_page.visit()
# Should not exist yet.
self.assertRaises(IndexError, self.landing_page.section, "Section")
self.import_page.visit()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
self.landing_page.visit()
# There's a section named 'Section' in the tarball.
self.landing_page.section("Section")
def test_header(self):
"""
Scenario: I should see the correct text when importing a course.
Given that I have a course to import to
When I visit the import page
The correct header should be shown
"""
self.assertEqual(self.import_page.header_text, 'Course Import')
class TestLibraryImport(ImportTestMixin, StudioLibraryTest):
"""
Tests the Library import page
"""
tarball_name = 'library.HhJfPD.tar.gz'
bad_tarball_name = 'bad_library.tar.gz'
import_page_class = ImportLibraryPage
landing_page_class = LibraryEditPage
def page_args(self):
return [self.browser, self.library_key]
def test_library_updated(self):
"""
Given that I visit an empty library
No XBlocks should be shown
When I visit the import page
And I upload a library that contains three XBlocks
And I visit the library page
Three XBlocks should be shown
"""
self.landing_page.visit()
self.landing_page.wait_until_ready()
# No items should be in the library to start.
self.assertEqual(len(self.landing_page.xblocks), 0)
self.import_page.visit()
self.import_page.upload_tarball(self.tarball_name)
self.import_page.wait_for_upload()
self.landing_page.visit()
self.landing_page.wait_until_ready()
# There are three blocks in the tarball.
self.assertEqual(len(self.landing_page.xblocks), 3)
def test_header(self):
"""
Scenario: I should see the correct text when importing a library.
Given that I have a library to import to
When I visit the import page
The correct header should be shown
"""
self.assertEqual(self.import_page.header_text, 'Library Import')
|
agpl-3.0
|
sfprime/pattern
|
pattern/text/en/wordnet/__init__.py
|
21
|
16924
|
#### PATTERN | WORDNET #############################################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <[email protected]>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
# WordNet is a lexical database for English.
# It disambiguates word senses, e.g., "tree" in the sense of a plant or in the sense of a graph.
# It groups similar word senses into sets of synonyms called synsets,
# with a short description and semantic relations to other synsets:
# - synonym = a word that is similar in meaning,
# - hypernym = a word with a broader meaning, (tree => plant)
# - hyponym = a word with a more specific meaning, (tree => oak)
# - holonym = a word that is the whole of parts, (tree => forest)
# - meronym = a word that is a part of the whole, (tree => trunk)
# - antonym = a word that is opposite in meaning.
import os
import sys
import glob
from math import log
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
# Path to WordNet /dict folder.
CORPUS = ""
os.environ["WNHOME"] = os.path.join(MODULE, CORPUS)
os.environ["WNSEARCHDIR"] = os.path.join(MODULE, CORPUS, "dict")
from pywordnet import wordnet as wn
from pywordnet import wntools
# The bundled version of PyWordNet has custom fixes.
# - line 365: check if lexnames exist.
# - line 765: check if lexnames exist + use os.path.join().
# - line 674: add HYPONYM and HYPERNYM to the pointer table.
# - line 916: implement "x in Dictionary" instead of Dictionary.has_key(x)
# - line 804: Dictionary.dataFile now stores a list of (file, size)-tuples.
# - line 1134: _dataFilePath() returns a list (i.e., data.noun can be split into data.noun1 + data.noun2).
# - line 1186: _lineAt() seeks in second datafile if offset > EOF first datafile.
VERSION = ""
s = open(os.path.join(MODULE, CORPUS, "dict", "index.noun")).read(2048)
if "WordNet 2.1" in s: VERSION = "2.1"
if "WordNet 3.0" in s: VERSION = "3.0"
del s
#---------------------------------------------------------------------------------------------------
DIACRITICS = {
"a": ("á","ä","â","à","å"),
"e": ("é","ë","ê","è"),
"i": ("í","ï","î","ì"),
"o": ("ó","ö","ô","ò","ō","ø"),
"u": ("ú","ü","û","ù","ů"),
"y": ("ý","ÿ","ý"),
"s": ("š",),
"c": ("ç","č"),
"n": ("ñ",),
"z": ("ž",)
}
def normalize(word):
""" Normalizes the word for synsets() or Sentiwordnet[] by removing diacritics
(PyWordNet does not take unicode).
"""
if not isinstance(word, basestring):
word = str(word)
if not isinstance(word, str):
try: word = word.encode("utf-8", "ignore")
except:
pass
for k, v in DIACRITICS.items():
for v in v:
word = word.replace(v, k)
return word
### SYNSET #########################################################################################
NOUNS, VERBS, ADJECTIVES, ADVERBS = \
wn.N, wn.V, wn.ADJ, wn.ADV
NOUN, VERB, ADJECTIVE, ADVERB = \
NN, VB, JJ, RB = \
"NN", "VB", "JJ", "RB"
def synsets(word, pos=NOUN):
""" Returns a list of Synset objects, one for each word sense.
Each word can be understood in different "senses",
each of which is part of a set of synonyms (= Synset).
"""
word, pos = normalize(word), pos.lower()
try:
if pos.startswith(NOUN.lower()): # "NNS" or "nn" will also pass.
w = wn.N[word]
elif pos.startswith(VERB.lower()):
w = wn.V[word]
elif pos.startswith(ADJECTIVE.lower()):
w = wn.ADJ[word]
elif pos.startswith(ADVERB.lower()):
w = wn.ADV[word]
else:
raise TypeError("part of speech must be NOUN, VERB, ADJECTIVE or ADVERB, not %s" % repr(pos))
return [Synset(s.synset) for i, s in enumerate(w)]
except KeyError:
return []
return []
class Synset(object):
def __init__(self, synset=None, pos=NOUN):
""" A set of synonyms that share a common meaning.
"""
if isinstance(synset, int):
synset = wn.getSynset({NN: "n", VB: "v", JJ: "adj", RB: "adv"}[pos], synset)
if isinstance(synset, basestring):
synset = synsets(synset, pos)[0]._synset
self._synset = synset
def __iter__(self):
for s in self._synset.getSenses(): yield unicode(s.form)
def __len__(self):
return len(self._synset.getSenses())
def __getitem__(self, i):
return unicode(self._synset.getSenses()[i].form)
def __eq__(self, synset):
return isinstance(synset, Synset) and self.id == synset.id
def __ne__(self, synset):
return not self.__eq__(synset)
def __repr__(self):
return "Synset(%s)" % repr(self[0])
@property
def id(self):
return self._synset.offset
@property
def pos(self):
""" Yields the part-of-speech tag (NOUN, VERB, ADJECTIVE or ADVERB).
"""
pos = self._synset.pos
if pos == "noun":
return NOUN
if pos == "verb":
return VERB
if pos == "adjective":
return ADJECTIVE
if pos == "adverb":
return ADVERB
part_of_speech = tag = pos
@property
def synonyms(self):
""" Yields a list of word forms (i.e. synonyms), for example:
synsets("TV")[0].synonyms => ["television", "telecasting", "TV", "video"]
"""
return [unicode(s.form) for s in self._synset.getSenses()]
senses = synonyms # Backwards compatibility; senses = list of Synsets for a word.
@property
def gloss(self):
""" Yields a descriptive string, for example:
synsets("glass")[0].gloss => "a brittle transparent solid with irregular atomic structure".
"""
return unicode(self._synset.gloss)
@property
def lexname(self):
""" Yields a category, e.g., noun.animal.
"""
return self._synset.lexname and unicode(self._synset.lexname) or None
@property
def antonym(self):
""" Yields the semantically opposite synset, for example:
synsets("death")[0].antonym => Synset("birth").
"""
p = self._synset.getPointers(wn.ANTONYM)
return len(p) > 0 and Synset(p[0].getTarget()) or None
def meronyms(self):
""" Yields a list of synsets that are semantic members/parts of this synset, for example:
synsets("house")[0].meronyms() =>
[Synset("library"),
Synset("loft"),
Synset("porch")
]
"""
p = self._synset.getPointers(wn.MEMBER_HOLONYM)
p+= self._synset.getPointers(wn.PART_HOLONYM)
return [Synset(p.getTarget()) for p in p]
def holonyms(self):
""" Yields a list of synsets of which this synset is a member/part, for example:
synsets("tree")[0].holonyms() => Synset("forest").
"""
p = self._synset.getPointers(wn.MEMBER_MERONYM)
p+= self._synset.getPointers(wn.PART_MERONYM)
return [Synset(p.getTarget()) for p in p]
def hyponyms(self, recursive=False, depth=None):
""" Yields a list of semantically more specific synsets, for example:
synsets("train")[0].hyponyms() =>
[Synset("boat train"),
Synset("car train"),
Synset("freight train"),
Synset("hospital train"),
Synset("mail train"),
Synset("passenger train"),
Synset("streamliner"),
Synset("subway train")
]
"""
p = [Synset(p.getTarget()) for p in self._synset.getPointers(wn.HYPONYM)]
if depth is None and recursive is False:
return p
if depth == 0:
return []
if depth is not None:
depth -= 1
if depth is None or depth > 0:
[p.extend(s.hyponyms(True, depth)) for s in list(p)]
return p
def hypernyms(self, recursive=False, depth=None):
""" Yields a list of semantically broader synsets.
"""
p = [Synset(p.getTarget()) for p in self._synset.getPointers(wn.HYPERNYM)]
if depth is None and recursive is False:
return p
if depth == 0:
return []
if depth is not None:
depth -= 1
if depth is None or depth > 0:
[p.extend(s.hypernyms(True, depth)) for s in list(p)]
return p
@property
def hypernym(self):
""" Yields the synset that is the semantic parent, for example:
synsets("train")[0].hypernym => Synset("public transport").
"""
p = self._synset.getPointers(wn.HYPERNYM)
return len(p) > 0 and Synset(p[0].getTarget()) or None
def similar(self):
""" Returns a list of similar synsets for adjectives and adverbs, for example:
synsets("almigthy",JJ)[0].similar() => Synset("powerful").
"""
# ALSO_SEE returns wn.Sense instead of wn.Synset in some cases:
s = lambda x: isinstance(x, wn.Sense) and x.synset or x
p = [Synset(s(p.getTarget())) for p in self._synset.getPointers(wn.SIMILAR)]
p+= [Synset(s(p.getTarget())) for p in self._synset.getPointers(wn.ALSO_SEE)]
return p
def similarity(self, synset):
""" Returns the semantic similarity of the given synsets (0.0-1.0).
synsets("cat")[0].similarity(synsets("dog")[0]) => 0.86.
synsets("cat")[0].similarity(synsets("box")[0]) => 0.17.
"""
if self == synset:
return 1.0
try: # Lin semantic distance measure.
lin = 2.0 * log(lcs(self, synset).ic) / (log(self.ic * synset.ic) or 1)
except OverflowError:
lin = 0.0
except ValueError: # / log(0)
lin = 0.0
return abs(lin)
@property
def ic(self):
return information_content(self)
@property
def weight(self):
return sentiwordnet is not None \
and sentiwordnet.synset(self.id, self.pos)[:2] \
or None
def similarity(synset1, synset2):
""" Returns the semantic similarity of the given synsets.
"""
return synset1.similarity(synset2)
def ancestor(synset1, synset2):
""" Returns the common ancestor of both synsets.
For example synsets("cat")[0].ancestor(synsets("dog")[0]) => Synset("carnivore")
"""
h1, h2 = synset1.hypernyms(recursive=True), synset2.hypernyms(recursive=True)
for s in h1:
if s in h2:
return s
least_common_subsumer = lcs = ancestor
### INFORMATION CONTENT ############################################################################
# Information Content (IC) is used to calculate semantic similarity in Synset.similarity().
# Information Content values for each synset are derived from word frequency in a given corpus.
# The idea is that less frequent words convey more information.
# Semantic similarity depends on the amount of information two concepts (synsets) have in common,
# given by the Most Specific Common Abstraction (MSCA), i.e. the shared ancestor in the taxonomy.
# http://www.d.umn.edu/~tpederse/Pubs/AAAI04PedersenT.pdf
# http://afflatus.ucd.ie/papers/ecai2004b.pdf
IC = {} # Switch data file according to WordNet version:
IC_CORPUS = os.path.join(MODULE, "resnik-ic" + VERSION[0] + ".txt")
IC_MAX = 0
def information_content(synset):
""" Returns the IC value for the given Synset (trained on the Brown corpus).
"""
global IC_MAX
if not IC:
IC[NOUN] = {}
IC[VERB] = {}
for s in open(IC_CORPUS).readlines()[1:]: # Skip the header.
s = s.split()
id, w, pos = (
int(s[0][:-1]),
float(s[1]),
s[0][-1] == "n" and NOUN or VERB)
if len(s) == 3 and s[2] == "ROOT":
IC[pos][0] = IC[pos].get(0,0) + w
if w != 0:
IC[pos][id] = w
if w > IC_MAX:
IC_MAX = w
return IC.get(synset.pos, {}).get(synset.id, 0.0) / IC_MAX
### WORDNET3 TO WORDNET2 ###########################################################################
# Map WordNet3 synset id's to WordNet2 synset id's.
_map32_pos1 = {NN: "n", VB: "v", JJ: "a", RB: "r"}
_map32_pos2 = {"n": NN, "v": VB, "a": JJ, "r": RB}
_map32_cache = None
def map32(id, pos=NOUN):
""" Returns an (id, pos)-tuple with the WordNet2 synset id for the given WordNet3 synset id.
Returns None if no id was found.
"""
global _map32_cache
if not _map32_cache:
_map32_cache = open(os.path.join(MODULE, "dict", "index.32")).readlines()
_map32_cache = (x for x in _map32_cache if x[0] != ";") # comments
_map32_cache = dict(x.strip().split(" ") for x in _map32_cache)
k = pos in _map32_pos2 and pos or _map32_pos1.get(pos, "x")
k+= str(id).lstrip("0")
k = _map32_cache.get(k, None)
if k is not None:
return int(k[1:]), _map32_pos2[k[0]]
return None
#### SENTIWORDNET ##################################################################################
# http://nmis.isti.cnr.it/sebastiani/Publications/LREC06.pdf
# http://nmis.isti.cnr.it/sebastiani/Publications/LREC10.pdf
sys.path.insert(0, os.path.join(MODULE, "..", ".."))
try:
from pattern.text import Sentiment
except:
class Sentiment(object):
PLACEHOLDER = True
sys.path.pop(0)
class SentiWordNet(Sentiment):
def __init__(self, path="SentiWordNet*.txt", language="en"):
""" A sentiment lexicon with scores from SentiWordNet.
The value for each word is a tuple with values for
polarity (-1.0-1.0), subjectivity (0.0-1.0) and intensity (0.5-2.0).
"""
Sentiment.__init__(self, path=path, language=language)
def load(self):
# Backwards compatibility: look for SentiWordNet*.txt in:
# given path, pattern/text/en/ or pattern/text/en/wordnet/
try: f = (
glob.glob(os.path.join(self.path)) + \
glob.glob(os.path.join(MODULE, self.path)) + \
glob.glob(os.path.join(MODULE, "..", self.path)))[0]
except IndexError:
raise ImportError("can't find SentiWordnet data file")
# Map synset id: a-00193480" => (193480, JJ).
# Map synset id's to WordNet2 if VERSION == 2:
if int(float(VERSION)) == 3:
m = lambda id, pos: (int(id.lstrip("0")), _map32_pos2[pos])
if int(float(VERSION)) == 2:
m = map32
for s in open(f):
if not s.startswith(("#", "\t")):
pos, id, p, n, senses, gloss = s.split("\t")
w = senses.split()
k = m(id, pos)
v = (float(p) - float(n),
float(p) + float(n)
)
# Apply the score to the first synonym in the synset.
# Several WordNet3 entries may point to the same WordNet2 entry.
if k is not None:
k = "%s-%s" % (pos, str(k[0]).zfill(8)) # "a-00193480"
if k not in self._synsets or w[0].endswith("#1"):
self._synsets[k] = v
for w in w:
if w.endswith("#1"):
dict.__setitem__(self, w[:-2].replace("_", " "), v)
# Words are stored without diacritics,
# use wordnet.normalize(word).
def __getitem__(self, k):
return Sentiment.__getitem__(self, normalize(k))
def get(self, k, *args, **kwargs):
return Sentiment.get(self, normalize(k), *args, **kwargs)
def assessments(self, words=[], negation=True):
raise NotImplementedError
def __call__(self, s, negation=True):
raise NotImplementedError
if not hasattr(Sentiment, "PLACEHOLDER"):
sentiwordnet = SentiWordNet()
else:
sentiwordnet = None
# Backwards compatibility.
# Older code may be using pattern.en.wordnet.sentiment[w],
# which yields a (positive, negative, neutral)-tuple.
class sentiment(object):
def load(self, **kwargs):
sentiwordnet.load(**kwargs)
def __getitem__(self, w):
p, s = sentiwordnet.get(w, (0.0, 0.0))
return p < 0 and (0.0, -p, 1.0-s) or (p, 0.0, 1.0-s)
def __contains__(self, w):
return w in sentiwordnet
sentiment = sentiment()
#print sentiwordnet["industry"] # (0.0, 0.0)
#print sentiwordnet["horrible"] # (-0.625, 0.625)
#print sentiwordnet.synset(synsets("horrible", pos="JJ")[0].id, pos="JJ")
#print synsets("horrible", pos="JJ")[0].weight
|
bsd-3-clause
|
wdurhamh/statsmodels
|
statsmodels/discrete/discrete_margins.py
|
19
|
25467
|
#Splitting out maringal effects to see if they can be generalized
from statsmodels.compat.python import lzip, callable, range
import numpy as np
from scipy.stats import norm
from statsmodels.tools.decorators import cache_readonly, resettable_cache
#### margeff helper functions ####
#NOTE: todo marginal effects for group 2
# group 2 oprobit, ologit, gologit, mlogit, biprobit
def _check_margeff_args(at, method):
"""
Checks valid options for margeff
"""
if at not in ['overall','mean','median','zero','all']:
raise ValueError("%s not a valid option for `at`." % at)
if method not in ['dydx','eyex','dyex','eydx']:
raise ValueError("method is not understood. Got %s" % method)
def _check_discrete_args(at, method):
"""
Checks the arguments for margeff if the exogenous variables are discrete.
"""
if method in ['dyex','eyex']:
raise ValueError("%s not allowed for discrete variables" % method)
if at in ['median', 'zero']:
raise ValueError("%s not allowed for discrete variables" % at)
def _get_const_index(exog):
"""
Returns a boolean array of non-constant column indices in exog and
an scalar array of where the constant is or None
"""
effects_idx = exog.var(0) != 0
if np.any(~effects_idx):
const_idx = np.where(~effects_idx)[0]
else:
const_idx = None
return effects_idx, const_idx
def _isdummy(X):
"""
Given an array X, returns the column indices for the dummy variables.
Parameters
----------
X : array-like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 2, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _isdummy(X)
>>> ind
array([ True, False, False, True, True], dtype=bool)
"""
X = np.asarray(X)
if X.ndim > 1:
ind = np.zeros(X.shape[1]).astype(bool)
max = (np.max(X, axis=0) == 1)
min = (np.min(X, axis=0) == 0)
remainder = np.all(X % 1. == 0, axis=0)
ind = min & max & remainder
if X.ndim == 1:
ind = np.asarray([ind])
return np.where(ind)[0]
def _get_dummy_index(X, const_idx):
dummy_ind = _isdummy(X)
dummy = True
# adjust back for a constant because effects doesn't have one
if const_idx is not None:
dummy_ind[dummy_ind > const_idx] -= 1
if dummy_ind.size == 0: # don't waste your time
dummy = False
dummy_ind = None # this gets passed to stand err func
return dummy_ind, dummy
def _iscount(X):
"""
Given an array X, returns the column indices for count variables.
Parameters
----------
X : array-like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 10, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _iscount(X)
>>> ind
array([ True, False, False, True, True], dtype=bool)
"""
X = np.asarray(X)
remainder = np.logical_and(np.logical_and(np.all(X % 1. == 0, axis = 0),
X.var(0) != 0), np.all(X >= 0, axis=0))
dummy = _isdummy(X)
remainder = np.where(remainder)[0].tolist()
for idx in dummy:
remainder.remove(idx)
return np.array(remainder)
def _get_count_index(X, const_idx):
count_ind = _iscount(X)
count = True
# adjust back for a constant because effects doesn't have one
if const_idx is not None:
count_ind[count_ind > const_idx] -= 1
if count_ind.size == 0: # don't waste your time
count = False
count_ind = None # for stand err func
return count_ind, count
def _get_margeff_exog(exog, at, atexog, ind):
if atexog is not None: # user supplied
if isinstance(atexog, dict):
# assumes values are singular or of len(exog)
for key in atexog:
exog[:,key] = atexog[key]
elif isinstance(atexog, np.ndarray): #TODO: handle DataFrames
if atexog.ndim == 1:
k_vars = len(atexog)
else:
k_vars = atexog.shape[1]
try:
assert k_vars == exog.shape[1]
except:
raise ValueError("atexog does not have the same number "
"of variables as exog")
exog = atexog
#NOTE: we should fill in atexog after we process at
if at == 'mean':
exog = np.atleast_2d(exog.mean(0))
elif at == 'median':
exog = np.atleast_2d(np.median(exog, axis=0))
elif at == 'zero':
exog = np.zeros((1,exog.shape[1]))
exog[0,~ind] = 1
return exog
def _get_count_effects(effects, exog, count_ind, method, model, params):
"""
If there's a count variable, the predicted difference is taken by
subtracting one and adding one to exog then averaging the difference
"""
# this is the index for the effect and the index for count col in exog
for i in count_ind:
exog0 = exog.copy()
exog0[:, i] -= 1
effect0 = model.predict(params, exog0)
exog0[:, i] += 2
effect1 = model.predict(params, exog0)
#NOTE: done by analogy with dummy effects but untested bc
# stata doesn't handle both count and eydx anywhere
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = ((effect1 - effect0)/2)
return effects
def _get_dummy_effects(effects, exog, dummy_ind, method, model, params):
"""
If there's a dummy variable, the predicted difference is taken at
0 and 1
"""
# this is the index for the effect and the index for dummy col in exog
for i in dummy_ind:
exog0 = exog.copy() # only copy once, can we avoid a copy?
exog0[:,i] = 0
effect0 = model.predict(params, exog0)
#fittedvalues0 = np.dot(exog0,params)
exog0[:,i] = 1
effect1 = model.predict(params, exog0)
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = (effect1 - effect0)
return effects
def _effects_at(effects, at):
if at == 'all':
effects = effects
elif at == 'overall':
effects = effects.mean(0)
else:
effects = effects[0,:]
return effects
def _margeff_cov_params_dummy(model, cov_margins, params, exog, dummy_ind,
method, J):
"""
Returns the Jacobian for discrete regressors for use in margeff_cov_params.
For discrete regressors the marginal effect is
\Delta F = F(XB) | d = 1 - F(XB) | d = 0
The row of the Jacobian for this variable is given by
f(XB)*X | d = 1 - f(XB)*X | d = 0
Where F is the default prediction of the model.
"""
for i in dummy_ind:
exog0 = exog.copy()
exog1 = exog.copy()
exog0[:,i] = 0
exog1[:,i] = 1
dfdb0 = model._derivative_predict(params, exog0, method)
dfdb1 = model._derivative_predict(params, exog1, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0)
if J > 1:
K = dfdb.shape[1] // (J-1)
cov_margins[i::K, :] = dfdb
else:
cov_margins[i, :] = dfdb # how each F changes with change in B
return cov_margins
def _margeff_cov_params_count(model, cov_margins, params, exog, count_ind,
method, J):
"""
Returns the Jacobian for discrete regressors for use in margeff_cov_params.
For discrete regressors the marginal effect is
\Delta F = F(XB) | d += 1 - F(XB) | d -= 1
The row of the Jacobian for this variable is given by
(f(XB)*X | d += 1 - f(XB)*X | d -= 1) / 2
where F is the default prediction for the model.
"""
for i in count_ind:
exog0 = exog.copy()
exog0[:,i] -= 1
dfdb0 = model._derivative_predict(params, exog0, method)
exog0[:,i] += 2
dfdb1 = model._derivative_predict(params, exog0, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0) / 2
if J > 1:
K = dfdb.shape[1] / (J-1)
cov_margins[i::K, :] = dfdb
else:
cov_margins[i, :] = dfdb # how each F changes with change in B
return cov_margins
def margeff_cov_params(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
Computes the variance-covariance of marginal effects by the delta method.
Parameters
----------
model : model instance
The model that returned the fitted results. Its pdf method is used
for computing the Jacobian of discrete variables in dummy_ind and
count_ind
params : array-like
estimated model parameters
exog : array-like
exogenous variables at which to calculate the derivative
cov_params : array-like
The variance-covariance of the parameters
at : str
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation.
Only overall has any effect here.you
derivative : function or array-like
If a function, it returns the marginal effects of the model with
respect to the exogenous variables evaluated at exog. Expected to be
called derivative(params, exog). This will be numerically
differentiated. Otherwise, it can be the Jacobian of the marginal
effects with respect to the parameters.
dummy_ind : array-like
Indices of the columns of exog that contain dummy variables
count_ind : array-like
Indices of the columns of exog that contain count variables
Notes
-----
For continuous regressors, the variance-covariance is given by
Asy. Var[MargEff] = [d margeff / d params] V [d margeff / d params]'
where V is the parameter variance-covariance.
The outer Jacobians are computed via numerical differentiation if
derivative is a function.
"""
if callable(derivative):
from statsmodels.tools.numdiff import approx_fprime_cs
params = params.ravel('F') # for Multinomial
try:
jacobian_mat = approx_fprime_cs(params, derivative,
args=(exog,method))
except TypeError: # norm.cdf doesn't take complex values
from statsmodels.tools.numdiff import approx_fprime
jacobian_mat = approx_fprime(params, derivative,
args=(exog,method))
if at == 'overall':
jacobian_mat = np.mean(jacobian_mat, axis=1)
else:
jacobian_mat = jacobian_mat.squeeze() # exog was 2d row vector
if dummy_ind is not None:
jacobian_mat = _margeff_cov_params_dummy(model, jacobian_mat,
params, exog, dummy_ind, method, J)
if count_ind is not None:
jacobian_mat = _margeff_cov_params_count(model, jacobian_mat,
params, exog, count_ind, method, J)
else:
jacobian_mat = derivative
#NOTE: this won't go through for at == 'all'
return np.dot(np.dot(jacobian_mat, cov_params), jacobian_mat.T)
def margeff_cov_with_se(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
See margeff_cov_params.
Same function but returns both the covariance of the marginal effects
and their standard errors.
"""
cov_me = margeff_cov_params(model, params, exog, cov_params, at,
derivative, dummy_ind,
count_ind, method, J)
return cov_me, np.sqrt(np.diag(cov_me))
def margeff():
pass
def _check_at_is_all(method):
if method['at'] == 'all':
raise NotImplementedError("Only margeff are available when `at` is "
"all. Please input specific points if you would like to "
"do inference.")
_transform_names = dict(dydx='dy/dx',
eyex='d(lny)/d(lnx)',
dyex='dy/d(lnx)',
eydx='d(lny)/dx')
class Margins(object):
"""
Mostly a do nothing class. Lays out the methods expected of a sub-class.
This is just a sketch of what we may want out of a general margins class.
I (SS) need to look at details of other models.
"""
def __init__(self, results, get_margeff, derivative, dist=None,
margeff_args=()):
self._cache = resettable_cache()
self.results = results
self.dist = dist
self.get_margeff(margeff_args)
def _reset(self):
self._cache = resettable_cache()
def get_margeff(self, *args, **kwargs):
self._reset()
self.margeff = self.get_margeff(*args)
@cache_readonly
def tvalues(self):
raise NotImplementedError
@cache_readonly
def cov_margins(self):
raise NotImplementedError
@cache_readonly
def margins_se(self):
raise NotImplementedError
def summary_frame(self):
raise NotImplementedError
@cache_readonly
def pvalues(self):
raise NotImplementedError
def conf_int(self, alpha=.05):
raise NotImplementedError
def summary(self, alpha=.05):
raise NotImplementedError
#class DiscreteMargins(Margins):
class DiscreteMargins(object):
"""Get marginal effects of a Discrete Choice model.
Parameters
----------
results : DiscreteResults instance
The results instance of a fitted discrete choice model
args : tuple
Args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
kwargs : dict
Keyword args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
"""
def __init__(self, results, args, kwargs={}):
self._cache = resettable_cache()
self.results = results
self.get_margeff(*args, **kwargs)
def _reset(self):
self._cache = resettable_cache()
@cache_readonly
def tvalues(self):
_check_at_is_all(self.margeff_options)
return self.margeff / self.margeff_se
def summary_frame(self, alpha=.05):
"""
Returns a DataFrame summarizing the marginal effects.
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
frame : DataFrames
A DataFrame summarizing the marginal effects.
"""
_check_at_is_all(self.margeff_options)
from pandas import DataFrame
names = [_transform_names[self.margeff_options['method']],
'Std. Err.', 'z', 'Pr(>|z|)',
'Conf. Int. Low', 'Cont. Int. Hi.']
ind = self.results.model.exog.var(0) != 0 # True if not a constant
exog_names = self.results.model.exog_names
var_names = [name for i,name in enumerate(exog_names) if ind[i]]
table = np.column_stack((self.margeff, self.margeff_se, self.tvalues,
self.pvalues, self.conf_int(alpha)))
return DataFrame(table, columns=names, index=var_names)
@cache_readonly
def pvalues(self):
_check_at_is_all(self.margeff_options)
return norm.sf(np.abs(self.tvalues)) * 2
def conf_int(self, alpha=.05):
"""
Returns the confidence intervals of the marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
conf_int : ndarray
An array with lower, upper confidence intervals for the marginal
effects.
"""
_check_at_is_all(self.margeff_options)
me_se = self.margeff_se
q = norm.ppf(1 - alpha / 2)
lower = self.margeff - q * me_se
upper = self.margeff + q * me_se
return np.asarray(lzip(lower, upper))
def summary(self, alpha=.05):
"""
Returns a summary table for marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
Summary : SummaryTable
A SummaryTable instance
"""
_check_at_is_all(self.margeff_options)
results = self.results
model = results.model
title = model.__class__.__name__ + " Marginal Effects"
method = self.margeff_options['method']
top_left = [('Dep. Variable:', [model.endog_names]),
('Method:', [method]),
('At:', [self.margeff_options['at']]),]
from statsmodels.iolib.summary import (Summary, summary_params,
table_extend)
exog_names = model.exog_names[:] # copy
smry = Summary()
# sigh, we really need to hold on to this in _data...
_, const_idx = _get_const_index(model.exog)
if const_idx is not None:
exog_names.pop(const_idx)
J = int(getattr(model, "J", 1))
if J > 1:
yname, yname_list = results._get_endog_name(model.endog_names,
None, all=True)
else:
yname = model.endog_names
yname_list = [yname]
smry.add_table_2cols(self, gleft=top_left, gright=[],
yname=yname, xname=exog_names, title=title)
#NOTE: add_table_params is not general enough yet for margeff
# could use a refactor with getattr instead of hard-coded params
# tvalues etc.
table = []
conf_int = self.conf_int(alpha)
margeff = self.margeff
margeff_se = self.margeff_se
tvalues = self.tvalues
pvalues = self.pvalues
if J > 1:
for eq in range(J):
restup = (results, margeff[:,eq], margeff_se[:,eq],
tvalues[:,eq], pvalues[:,eq], conf_int[:,:,eq])
tble = summary_params(restup, yname=yname_list[eq],
xname=exog_names, alpha=alpha, use_t=False,
skip_header=True)
tble.title = yname_list[eq]
# overwrite coef with method name
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[%3.1f%% Conf. Int.]' % (100-alpha*100)]
tble.insert_header_row(0, header)
#from IPython.core.debugger import Pdb; Pdb().set_trace()
table.append(tble)
table = table_extend(table, keep_headers=True)
else:
restup = (results, margeff, margeff_se, tvalues, pvalues, conf_int)
table = summary_params(restup, yname=yname, xname=exog_names,
alpha=alpha, use_t=False, skip_header=True)
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[%3.1f%% Conf. Int.]' % (100-alpha*100)]
table.insert_header_row(0, header)
smry.tables.append(table)
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is all
only margeff will be available.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semielasticity -- dy/d(lnx)
- 'eydx' - estimate semeilasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array-like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
effects : ndarray
the marginal effect corresponding to the input options
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
self._reset() # always reset the cache when this is called
#TODO: if at is not all or overall, we can also put atexog values
# in summary table head
method = method.lower()
at = at.lower()
_check_margeff_args(at, method)
self.margeff_options = dict(method=method, at=at)
results = self.results
model = results.model
params = results.params
exog = model.exog.copy() # copy because values are changed
effects_idx, const_idx = _get_const_index(exog)
if dummy:
_check_discrete_args(at, method)
dummy_idx, dummy = _get_dummy_index(exog, const_idx)
else:
dummy_idx = None
if count:
_check_discrete_args(at, method)
count_idx, count = _get_count_index(exog, const_idx)
else:
count_idx = None
# get the exogenous variables
exog = _get_margeff_exog(exog, at, atexog, effects_idx)
# get base marginal effects, handled by sub-classes
effects = model._derivative_exog(params, exog, method,
dummy_idx, count_idx)
J = getattr(model, 'J', 1)
effects_idx = np.tile(effects_idx, J) # adjust for multi-equation.
effects = _effects_at(effects, at)
if at == 'all':
if J > 1:
K = model.K - np.any(~effects_idx) # subtract constant
self.margeff = effects[:, effects_idx].reshape(-1, K, J,
order='F')
else:
self.margeff = effects[:, effects_idx]
else:
# Set standard error of the marginal effects by Delta method.
margeff_cov, margeff_se = margeff_cov_with_se(model, params, exog,
results.cov_params(), at,
model._derivative_exog,
dummy_idx, count_idx,
method, J)
# reshape for multi-equation
if J > 1:
K = model.K - np.any(~effects_idx) # subtract constant
self.margeff = effects[effects_idx].reshape(K, J, order='F')
self.margeff_se = margeff_se[effects_idx].reshape(K, J,
order='F')
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
else:
# don't care about at constant
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
self.margeff_se = margeff_se[effects_idx]
self.margeff = effects[effects_idx]
|
bsd-3-clause
|
RoAbreu/AulaJavaScripts
|
PROJETO/backend/appengine/routes/livroNovos/admin/home.py
|
1
|
1170
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from tekton import router
from gaecookie.decorator import no_csrf
from livroNovo_app import facade
from routes.livroNovos.admin import new, edit
def delete(_handler, livro_novo_id):
facade.delete_livro_novo_cmd(livro_novo_id)()
_handler.redirect(router.to_path(index))
@no_csrf
def index():
cmd = facade.list_livro_novos_cmd()
livro_novos = cmd()
edit_path = router.to_path(edit)
delete_path = router.to_path(delete)
short_form = facade.livro_novo_short_form()
def short_livro_novo_dict(livro_novo):
livro_novo_dct = short_form.fill_with_model(livro_novo)
livro_novo_dct['edit_path'] = router.to_path(edit_path, livro_novo_dct['id'])
livro_novo_dct['delete_path'] = router.to_path(delete_path, livro_novo_dct['id'])
return livro_novo_dct
short_livro_novos = [short_livro_novo_dict(livro_novo) for livro_novo in livro_novos]
context = {'livro_novos': short_livro_novos,
'new_path': router.to_path(new)}
return TemplateResponse(context)
|
mit
|
sjlehtin/django
|
django/contrib/admin/migrations/0001_initial.py
|
95
|
1893
|
import django.contrib.admin.models
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '__first__'),
]
operations = [
migrations.CreateModel(
name='LogEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('action_time', models.DateTimeField(auto_now=True, verbose_name='action time')),
('object_id', models.TextField(null=True, verbose_name='object id', blank=True)),
('object_repr', models.CharField(max_length=200, verbose_name='object repr')),
('action_flag', models.PositiveSmallIntegerField(verbose_name='action flag')),
('change_message', models.TextField(verbose_name='change message', blank=True)),
('content_type', models.ForeignKey(
to_field='id',
on_delete=models.SET_NULL,
blank=True, null=True,
to='contenttypes.ContentType',
verbose_name='content type',
)),
('user', models.ForeignKey(
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
verbose_name='user',
)),
],
options={
'ordering': ('-action_time',),
'db_table': 'django_admin_log',
'verbose_name': 'log entry',
'verbose_name_plural': 'log entries',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.admin.models.LogEntryManager()),
],
),
]
|
bsd-3-clause
|
CVML/cvxpy
|
cvxpy/atoms/atom.py
|
4
|
7417
|
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from .. import settings as s
from .. import utilities as u
from .. import interface as intf
from ..expressions.constants import Constant, CallbackParam
from ..expressions.variables import Variable
from ..expressions.expression import Expression
import abc
import sys
if sys.version_info >= (3, 0):
from functools import reduce
class Atom(Expression):
""" Abstract base class for atoms. """
__metaclass__ = abc.ABCMeta
# args are the expressions passed into the Atom constructor.
def __init__(self, *args):
# Throws error if args is empty.
if len(args) == 0:
raise TypeError(
"No arguments given to %s." % self.__class__.__name__
)
# Convert raw values to Constants.
self.args = [Atom.cast_to_const(arg) for arg in args]
self.validate_arguments()
self.init_dcp_attr()
# Returns the string representation of the function call.
def name(self):
return "%s(%s)" % (self.__class__.__name__,
", ".join([arg.name() for arg in self.args]))
def init_dcp_attr(self):
"""Determines the curvature, sign, and shape from the arguments.
"""
# Initialize _shape. Raises an error for invalid argument sizes.
shape = self.shape_from_args()
sign = self.sign_from_args()
curvature = Atom.dcp_curvature(self.func_curvature(),
self.args,
self.monotonicity())
self._dcp_attr = u.DCPAttr(sign, curvature, shape)
# Returns argument curvatures as a list.
def argument_curvatures(self):
return [arg.curvature for arg in self.args]
# Raises an error if the arguments are invalid.
def validate_arguments(self):
pass
# The curvature of the atom if all arguments conformed to DCP.
# Alternatively, the curvature of the atom's function.
@abc.abstractmethod
def func_curvature(self):
return NotImplemented
# Returns a list with the monotonicity in each argument.
# monotonicity can depend on the sign of the argument.
@abc.abstractmethod
def monotonicity(self):
return NotImplemented
# Applies DCP composition rules to determine curvature in each argument.
# The overall curvature is the sum of the argument curvatures.
@staticmethod
def dcp_curvature(curvature, args, monotonicities):
if len(args) != len(monotonicities):
raise Exception('The number of args be'
' equal to the number of monotonicities.')
arg_curvatures = []
for arg, monotonicity in zip(args, monotonicities):
arg_curv = u.monotonicity.dcp_curvature(monotonicity, curvature,
arg._dcp_attr.sign,
arg._dcp_attr.curvature)
arg_curvatures.append(arg_curv)
return reduce(lambda x,y: x+y, arg_curvatures)
# Represent the atom as an affine objective and affine/basic SOC constraints.
def canonicalize(self):
# Constant atoms are treated as a leaf.
if self.is_constant():
# Parameterized expressions are evaluated later.
if self.parameters():
rows, cols = self.size
param = CallbackParam(lambda: self.value, rows, cols)
return param.canonical_form
# Non-parameterized expressions are evaluated immediately.
else:
return Constant(self.value).canonical_form
else:
arg_objs = []
constraints = []
for arg in self.args:
obj, constr = arg.canonical_form
arg_objs.append(obj)
constraints += constr
# Special info required by the graph implementation.
data = self.get_data()
graph_obj, graph_constr = self.graph_implementation(arg_objs,
self.size,
data)
return (graph_obj, constraints + graph_constr)
@abc.abstractmethod
def graph_implementation(self, arg_objs, size, data=None):
"""Reduces the atom to an affine expression and list of constraints.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
return NotImplemented
def variables(self):
"""Returns all the variables present in the arguments.
"""
var_list = []
for arg in self.args:
var_list += arg.variables()
# Remove duplicates.
return list(set(var_list))
def parameters(self):
"""Returns all the parameters present in the arguments.
"""
param_list = []
for arg in self.args:
param_list += arg.parameters()
# Remove duplicates.
return list(set(param_list))
@property
def value(self):
# Catch the case when the expression is known to be
# zero through DCP analysis.
if self.is_zero():
result = intf.DEFAULT_INTF.zeros(*self.size)
else:
arg_values = []
for arg in self.args:
# A argument without a value makes all higher level
# values None.
# But if the atom is constant with non-constant
# arguments it doesn't depend on its arguments,
# so it isn't None.
if arg.value is None and not self.is_constant():
return None
else:
arg_values.append(arg.value)
result = self.numeric(arg_values)
# Reduce to a scalar if possible.
if intf.size(result) == (1, 1):
return intf.scalar_value(result)
else:
return result
# Wraps an atom's numeric function that requires numpy ndarrays as input.
# Ensures both inputs and outputs are the correct matrix types.
@staticmethod
def numpy_numeric(numeric_func):
def new_numeric(self, values):
interface = intf.DEFAULT_INTF
values = [interface.const_to_matrix(v, convert_scalars=True)
for v in values]
result = numeric_func(self, values)
return intf.DEFAULT_INTF.const_to_matrix(result)
return new_numeric
|
gpl-3.0
|
noba3/KoTos
|
addons/plugin.image.picasa/gdata/tlslite/utils/xmltools.py
|
101
|
7380
|
"""Helper functions for XML.
This module has misc. helper functions for working with XML DOM nodes."""
from compat import *
import os
import re
if os.name == "java":
# Only for Jython
from javax.xml.parsers import *
import java
builder = DocumentBuilderFactory.newInstance().newDocumentBuilder()
def parseDocument(s):
stream = java.io.ByteArrayInputStream(java.lang.String(s).getBytes())
return builder.parse(stream)
else:
from xml.dom import minidom
from xml.sax import saxutils
def parseDocument(s):
return minidom.parseString(s)
def parseAndStripWhitespace(s):
try:
element = parseDocument(s).documentElement
except BaseException, e:
raise SyntaxError(str(e))
stripWhitespace(element)
return element
#Goes through a DOM tree and removes whitespace besides child elements,
#as long as this whitespace is correctly tab-ified
def stripWhitespace(element, tab=0):
element.normalize()
lastSpacer = "\n" + ("\t"*tab)
spacer = lastSpacer + "\t"
#Zero children aren't allowed (i.e. <empty/>)
#This makes writing output simpler, and matches Canonical XML
if element.childNodes.length==0: #DON'T DO len(element.childNodes) - doesn't work in Jython
raise SyntaxError("Empty XML elements not allowed")
#If there's a single child, it must be text context
if element.childNodes.length==1:
if element.firstChild.nodeType == element.firstChild.TEXT_NODE:
#If it's an empty element, remove
if element.firstChild.data == lastSpacer:
element.removeChild(element.firstChild)
return
#If not text content, give an error
elif element.firstChild.nodeType == element.firstChild.ELEMENT_NODE:
raise SyntaxError("Bad whitespace under '%s'" % element.tagName)
else:
raise SyntaxError("Unexpected node type in XML document")
#Otherwise there's multiple child element
child = element.firstChild
while child:
if child.nodeType == child.ELEMENT_NODE:
stripWhitespace(child, tab+1)
child = child.nextSibling
elif child.nodeType == child.TEXT_NODE:
if child == element.lastChild:
if child.data != lastSpacer:
raise SyntaxError("Bad whitespace under '%s'" % element.tagName)
elif child.data != spacer:
raise SyntaxError("Bad whitespace under '%s'" % element.tagName)
next = child.nextSibling
element.removeChild(child)
child = next
else:
raise SyntaxError("Unexpected node type in XML document")
def checkName(element, name):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Missing element: '%s'" % name)
if name == None:
return
if element.tagName != name:
raise SyntaxError("Wrong element name: should be '%s', is '%s'" % (name, element.tagName))
def getChild(element, index, name=None):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getChild()")
child = element.childNodes.item(index)
if child == None:
raise SyntaxError("Missing child: '%s'" % name)
checkName(child, name)
return child
def getChildIter(element, index):
class ChildIter:
def __init__(self, element, index):
self.element = element
self.index = index
def next(self):
if self.index < len(self.element.childNodes):
retVal = self.element.childNodes.item(self.index)
self.index += 1
else:
retVal = None
return retVal
def checkEnd(self):
if self.index != len(self.element.childNodes):
raise SyntaxError("Too many elements under: '%s'" % self.element.tagName)
return ChildIter(element, index)
def getChildOrNone(element, index):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getChild()")
child = element.childNodes.item(index)
return child
def getLastChild(element, index, name=None):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getLastChild()")
child = element.childNodes.item(index)
if child == None:
raise SyntaxError("Missing child: '%s'" % name)
if child != element.lastChild:
raise SyntaxError("Too many elements under: '%s'" % element.tagName)
checkName(child, name)
return child
#Regular expressions for syntax-checking attribute and element content
nsRegEx = "http://trevp.net/cryptoID\Z"
cryptoIDRegEx = "([a-km-z3-9]{5}\.){3}[a-km-z3-9]{5}\Z"
urlRegEx = "http(s)?://.{1,100}\Z"
sha1Base64RegEx = "[A-Za-z0-9+/]{27}=\Z"
base64RegEx = "[A-Za-z0-9+/]+={0,4}\Z"
certsListRegEx = "(0)?(1)?(2)?(3)?(4)?(5)?(6)?(7)?(8)?(9)?\Z"
keyRegEx = "[A-Z]\Z"
keysListRegEx = "(A)?(B)?(C)?(D)?(E)?(F)?(G)?(H)?(I)?(J)?(K)?(L)?(M)?(N)?(O)?(P)?(Q)?(R)?(S)?(T)?(U)?(V)?(W)?(X)?(Y)?(Z)?\Z"
dateTimeRegEx = "\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ\Z"
shortStringRegEx = ".{1,100}\Z"
exprRegEx = "[a-zA-Z0-9 ,()]{1,200}\Z"
notAfterDeltaRegEx = "0|([1-9][0-9]{0,8})\Z" #A number from 0 to (1 billion)-1
booleanRegEx = "(true)|(false)"
def getReqAttribute(element, attrName, regEx=""):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getReqAttribute()")
value = element.getAttribute(attrName)
if not value:
raise SyntaxError("Missing Attribute: " + attrName)
if not re.match(regEx, value):
raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value))
element.removeAttribute(attrName)
return str(value) #de-unicode it; this is needed for bsddb, for example
def getAttribute(element, attrName, regEx=""):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in getAttribute()")
value = element.getAttribute(attrName)
if value:
if not re.match(regEx, value):
raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value))
element.removeAttribute(attrName)
return str(value) #de-unicode it; this is needed for bsddb, for example
def checkNoMoreAttributes(element):
if element.nodeType != element.ELEMENT_NODE:
raise SyntaxError("Wrong node type in checkNoMoreAttributes()")
if element.attributes.length!=0:
raise SyntaxError("Extra attributes on '%s'" % element.tagName)
def getText(element, regEx=""):
textNode = element.firstChild
if textNode == None:
raise SyntaxError("Empty element '%s'" % element.tagName)
if textNode.nodeType != textNode.TEXT_NODE:
raise SyntaxError("Non-text node: '%s'" % element.tagName)
if not re.match(regEx, textNode.data):
raise SyntaxError("Bad Text Value for '%s': '%s' " % (element.tagName, textNode.data))
return str(textNode.data) #de-unicode it; this is needed for bsddb, for example
#Function for adding tabs to a string
def indent(s, steps, ch="\t"):
tabs = ch*steps
if s[-1] != "\n":
s = tabs + s.replace("\n", "\n"+tabs)
else:
s = tabs + s.replace("\n", "\n"+tabs)
s = s[ : -len(tabs)]
return s
def escape(s):
return saxutils.escape(s)
|
gpl-2.0
|
IgnitedAndExploded/pyfire
|
bin/server.py
|
1
|
1928
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Sample Server
This module starts the main TCP listener for XMPP clients
:copyright: 2011 by the pyfire Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import os.path
# Add pyfire to namespace
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(path)
import errno
import functools
import contextlib
import socket
import thread
from zmq.eventloop import ioloop
from pyfire import configuration as config
from pyfire import zmq_forwarder, stanza_processor
from pyfire.auth.backends import DummyTrueValidator
from pyfire.server import XMPPServer, XMPPConnection
from pyfire.singletons import get_validation_registry, get_publisher
def start_client_listener():
publisher = get_publisher()
validation_registry = get_validation_registry()
validator = DummyTrueValidator()
validation_registry.register('dummy', validator)
io_loop = ioloop.IOLoop.instance()
server = XMPPServer(io_loop)
server.bind(config.get('listeners', 'clientport'),
config.get('listeners', 'ip'))
server.start()
try:
io_loop.start()
except (KeyboardInterrupt, SystemExit):
io_loop.stop()
print "exited cleanly"
def fire_up():
import pyfire.storage
import pyfire.contact
pyfire.storage.Base.metadata.create_all(pyfire.storage.engine)
# create a forwader/router for internal communication
fwd = zmq_forwarder.ZMQForwarder(config.get('ipc', 'forwarder'))
thread.start_new_thread(fwd.start, ())
# create a stamza processor for local domains
stanza_proc = stanza_processor.StanzaProcessor(config.getlist('listeners', 'domains'))
thread.start_new_thread(stanza_proc.start, ())
# start listener for incomming Connections
start_client_listener()
if __name__ == '__main__':
fire_up()
|
bsd-3-clause
|
cneill/designate
|
designate/storage/impl_sqlalchemy/migrate_repo/versions/039_support_soa_records.py
|
8
|
6035
|
# Copyright (c) 2014 Rackspace Hosting
#
# Author: Betsy Luzader <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
from sqlalchemy import MetaData, Table, Enum
from sqlalchemy.sql import select
from migrate.changeset.constraint import UniqueConstraint
from oslo_db import exception
from designate import utils
meta = MetaData()
def _build_soa_record(zone, servers):
return "%s %s. %d %d %d %d %d" % (servers[0]['name'],
zone['email'].replace("@", "."),
zone['serial'],
zone['refresh'],
zone['retry'],
zone['expire'],
zone['minimum'])
def _build_hash(recordset_id, data):
md5 = hashlib.md5()
md5.update("%s:%s:%s" % (recordset_id, data, None))
return md5.hexdigest()
def upgrade(migrate_engine):
meta.bind = migrate_engine
# Get associated database tables
servers_table = Table('servers', meta, autoload=True)
zones_table = Table('domains', meta, autoload=True)
records_table = Table('records', meta, autoload=True)
dialect = migrate_engine.url.get_dialect().name
RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS',
'PTR', 'SSHFP', 'SOA']
recordsets_table = Table('recordsets', meta, autoload=True)
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith("postgresql"):
with migrate_engine.connect() as conn:
conn.execution_options(isolation_level="AUTOCOMMIT")
conn.execute(
"ALTER TYPE recordset_types ADD VALUE 'SOA' "
"AFTER 'SSHFP'")
conn.close()
recordsets_table.c.type.alter(type=Enum(name='recordset_types',
*RECORD_TYPES))
# Re-add constraint for sqlite
if dialect.startswith('sqlite'):
constraint = UniqueConstraint('domain_id', 'name', 'type',
name='unique_recordset',
table=recordsets_table)
constraint.create()
# Get the server names which are used to create NS & SOA records
servers = select(
columns=[
servers_table.c.name
]
).execute().fetchall()
# Get all the zones
zones = select(
columns=[
zones_table.c.id,
zones_table.c.created_at,
zones_table.c.tenant_id,
zones_table.c.name,
zones_table.c.email,
zones_table.c.serial,
zones_table.c.refresh,
zones_table.c.retry,
zones_table.c.expire,
zones_table.c.minimum
]
).execute().fetchall()
# NOTE(per kiall): Since we need a unique UUID for each recordset etc, and
# need to maintain cross DB compatibility, we're stuck doing
# this in code
for zone in zones:
# Create the SOA Recordset, returning the UUID primary key to be used
# in creating the associated SOA Record
soa_pk = recordsets_table.insert().execute(
id=utils.generate_uuid().replace('-', ''),
created_at=zone.created_at,
domain_id=zone.id,
tenant_id=zone.tenant_id,
name=zone.name,
type='SOA',
version=1
).inserted_primary_key[0]
# Create the SOA Record
soa_data = _build_soa_record(zone, servers)
records_table.insert().execute(
id=utils.generate_uuid().replace('-', ''),
created_at=zone.created_at,
domain_id=zone.id,
tenant_id=zone.tenant_id,
recordset_id=soa_pk,
data=soa_data,
hash=_build_hash(soa_pk, soa_data),
managed=True,
version=1
)
# Create the NS Recorset, returning the UUID primary key to be used
# in creating the associated NS record
# NS records could already exist, so check for duplicates
try:
ns_pk = recordsets_table.insert().execute(
id=utils.generate_uuid().replace('-', ''),
created_at=zone.created_at,
domain_id=zone.id,
tenant_id=zone.tenant_id,
name=zone.name,
type='NS',
version=1
).inserted_primary_key[0]
except exception.DBDuplicateEntry:
# If there's already an NS recordset, retrieve it
ns_pk = select([recordsets_table.c.id])\
.where(recordsets_table.c.domain_id == zone.id)\
.where(recordsets_table.c.tenant_id == zone.tenant_id)\
.where(recordsets_table.c.name == zone.name)\
.where(recordsets_table.c.type == 'NS')\
.execute().scalar()
# Create the NS records, one for each server
for server in servers:
records_table.insert().execute(
id=utils.generate_uuid().replace('-', ''),
created_at=zone.created_at,
domain_id=zone.id,
tenant_id=zone.tenant_id,
recordset_id=ns_pk,
data=server.name,
hash=_build_hash(ns_pk, server.name),
managed=True,
version=1
)
def downgrade(migrate_engine):
pass
|
apache-2.0
|
wanam/Adam-Kernel-GS5-LTE
|
tools/perf/scripts/python/sched-migration.py
|
11215
|
11670
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
gpl-2.0
|
etkirsch/scikit-learn
|
sklearn/metrics/cluster/supervised.py
|
207
|
27395
|
"""Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
|
bsd-3-clause
|
MRigal/django
|
django/contrib/gis/serializers/geojson.py
|
275
|
2672
|
from __future__ import unicode_literals
from django.contrib.gis.gdal import HAS_GDAL
from django.core.serializers.base import (
SerializationError, SerializerDoesNotExist,
)
from django.core.serializers.json import Serializer as JSONSerializer
if HAS_GDAL:
from django.contrib.gis.gdal import CoordTransform, SpatialReference
class Serializer(JSONSerializer):
"""
Convert a queryset to GeoJSON, http://geojson.org/
"""
def _init_options(self):
super(Serializer, self)._init_options()
self.geometry_field = self.json_kwargs.pop('geometry_field', None)
self.srid = self.json_kwargs.pop('srid', 4326)
def start_serialization(self):
self._init_options()
self._cts = {} # cache of CoordTransform's
self.stream.write(
'{"type": "FeatureCollection", "crs": {"type": "name", "properties": {"name": "EPSG:%d"}},'
' "features": [' % self.srid)
def end_serialization(self):
self.stream.write(']}')
def start_object(self, obj):
super(Serializer, self).start_object(obj)
self._geometry = None
if self.geometry_field is None:
# Find the first declared geometry field
for field in obj._meta.fields:
if hasattr(field, 'geom_type'):
self.geometry_field = field.name
break
def get_dump_object(self, obj):
data = {
"type": "Feature",
"properties": self._current,
}
if self._geometry:
if self._geometry.srid != self.srid:
# If needed, transform the geometry in the srid of the global geojson srid
if not HAS_GDAL:
raise SerializationError(
'Unable to convert geometry to SRID %s when GDAL is not installed.' % self.srid
)
if self._geometry.srid not in self._cts:
srs = SpatialReference(self.srid)
self._cts[self._geometry.srid] = CoordTransform(self._geometry.srs, srs)
self._geometry.transform(self._cts[self._geometry.srid])
data["geometry"] = eval(self._geometry.geojson)
else:
data["geometry"] = None
return data
def handle_field(self, obj, field):
if field.name == self.geometry_field:
self._geometry = field.value_from_object(obj)
else:
super(Serializer, self).handle_field(obj, field)
class Deserializer(object):
def __init__(self, *args, **kwargs):
raise SerializerDoesNotExist("geojson is a serialization-only serializer")
|
bsd-3-clause
|
tailorian/Sick-Beard
|
lib/hachoir_metadata/filter.py
|
90
|
1668
|
from lib.hachoir_metadata.timezone import UTC
from datetime import date, datetime
# Year in 1850..2030
MIN_YEAR = 1850
MAX_YEAR = 2030
class Filter:
def __init__(self, valid_types, min=None, max=None):
self.types = valid_types
self.min = min
self.max = max
def __call__(self, value):
if not isinstance(value, self.types):
return True
if self.min is not None and value < self.min:
return False
if self.max is not None and self.max < value:
return False
return True
class NumberFilter(Filter):
def __init__(self, min=None, max=None):
Filter.__init__(self, (int, long, float), min, max)
class DatetimeFilter(Filter):
def __init__(self, min=None, max=None):
Filter.__init__(self, (date, datetime),
datetime(MIN_YEAR, 1, 1),
datetime(MAX_YEAR, 12, 31))
self.min_date = date(MIN_YEAR, 1, 1)
self.max_date = date(MAX_YEAR, 12, 31)
self.min_tz = datetime(MIN_YEAR, 1, 1, tzinfo=UTC)
self.max_tz = datetime(MAX_YEAR, 12, 31, tzinfo=UTC)
def __call__(self, value):
"""
Use different min/max values depending on value type
(datetime with timezone, datetime or date).
"""
if not isinstance(value, self.types):
return True
if hasattr(value, "tzinfo") and value.tzinfo:
return (self.min_tz <= value <= self.max_tz)
elif isinstance(value, datetime):
return (self.min <= value <= self.max)
else:
return (self.min_date <= value <= self.max_date)
DATETIME_FILTER = DatetimeFilter()
|
gpl-3.0
|
cdjones32/vertx-web
|
src/test/sockjs-protocol/venv/lib/python2.7/site-packages/setuptools/command/bdist_egg.py
|
286
|
18718
|
"""setuptools.command.bdist_egg
Build .egg distributions"""
# This module should be kept compatible with Python 2.3
import sys, os, marshal
from setuptools import Command
from distutils.dir_util import remove_tree, mkpath
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
from distutils import log
from distutils.errors import DistutilsSetupError
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from types import CodeType
from setuptools.compat import basestring, next
from setuptools.extension import Library
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
f = open(pyfile,'w')
f.write('\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __loader__, __file__",
" import sys, pkg_resources, imp",
" __file__ = pkg_resources.resource_filename(__name__,%r)"
% resource,
" __loader__ = None; del __bootstrap__, __loader__",
" imp.load_dynamic(__name__,__file__)",
"__bootstrap__()",
"" # terminal \n
]))
f.close()
# stub __init__.py for packages distributed without one
NS_PKG_STUB = '__import__("pkg_resources").declare_namespace(__name__)'
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename+'.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files,[]
for item in old:
if isinstance(item,tuple) and len(item)==2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized==site_packages or normalized.startswith(
site_packages+os.sep
):
item = realpath[len(site_packages)+1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self,cmdname,**kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname,self.bdist_dir)
kw.setdefault('skip_build',self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root; instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p,ext_name) in enumerate(ext_outputs):
filename,ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename)+'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep,'/')
to_compile.extend(self.make_init_files())
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root,'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts',install_dir=script_dir,no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root,'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info,'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution,'dist_files',[]).append(
('bdist_egg',get_python_version(),self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base,dirs,files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base,name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution,'zip_safe',None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def make_init_files(self):
"""Create missing package __init__ files"""
init_files = []
for base,dirs,files in walk_egg(self.bdist_dir):
if base==self.bdist_dir:
# don't put an __init__ in the root
continue
for name in files:
if name.endswith('.py'):
if '__init__.py' not in files:
pkg = base[len(self.bdist_dir)+1:].replace(os.sep,'.')
if self.distribution.has_contents_for(pkg):
log.warn("Creating missing __init__.py for %s",pkg)
filename = os.path.join(base,'__init__.py')
if not self.dry_run:
f = open(filename,'w'); f.write(NS_PKG_STUB)
f.close()
init_files.append(filename)
break
else:
# not a package, don't traverse to subdirectories
dirs[:] = []
return init_files
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation',{}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info,'')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir:''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base]+filename)
for filename in dirs:
paths[os.path.join(base,filename)] = paths[base]+filename+'/'
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext,Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir,filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base,dirs,files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base,dirs,files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag,fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir,'EGG-INFO',fn)):
return flag
if not can_scan(): return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag,fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe)==flag:
f=open(fn,'wt'); f.write('\n'); f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base,name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir)+1:].replace(os.sep,'.')
module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0]
if sys.version_info < (3, 3):
skip = 8 # skip magic & date
else:
skip = 12 # skip magic & date & file size
f = open(filename,'rb'); f.read(skip)
code = marshal.load(f); f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
if '__name__' in symbols and '__main__' in symbols and '.' not in module:
if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5
log.warn("%s: top-level module may be 'python -m' script", module)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names: yield name
for const in code.co_consts:
if isinstance(const,basestring):
yield const
elif isinstance(const,CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None,
mode='w'
):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir)+1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
if compress is None:
compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits
compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)]
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
#
|
apache-2.0
|
blaggacao/odoo
|
openerp/addons/test_converter/tests/test_html.py
|
61
|
13714
|
# -*- encoding: utf-8 -*-
import json
import os
import datetime
from lxml import etree
from openerp.tests import common
from openerp.tools import html_escape as e
from openerp.addons.base.ir import ir_qweb
directory = os.path.dirname(__file__)
class TestExport(common.TransactionCase):
_model = None
def setUp(self):
super(TestExport, self).setUp()
self.Model = self.registry(self._model)
self.columns = self.Model._all_columns
def get_column(self, name):
return self.Model._all_columns[name].column
def get_converter(self, name, type=None):
column = self.get_column(name)
for postfix in type, column._type, '':
fs = ['ir', 'qweb', 'field']
if postfix is None: continue
if postfix: fs.append(postfix)
try:
model = self.registry('.'.join(fs))
break
except KeyError: pass
return lambda value, options=None, context=None: e(model.value_to_html(
self.cr, self.uid, value, column, options=options, context=context))
class TestBasicExport(TestExport):
_model = 'test_converter.test_model'
class TestCharExport(TestBasicExport):
def test_char(self):
converter = self.get_converter('char')
value = converter('foo')
self.assertEqual(value, 'foo')
value = converter("foo<bar>")
self.assertEqual(value, "foo<bar>")
class TestIntegerExport(TestBasicExport):
def test_integer(self):
converter = self.get_converter('integer')
value = converter(42)
self.assertEqual(value, "42")
class TestFloatExport(TestBasicExport):
def setUp(self):
super(TestFloatExport, self).setUp()
self.registry('res.lang').write(self.cr, self.uid, [1], {
'grouping': '[3,0]'
})
def test_float(self):
converter = self.get_converter('float')
value = converter(42.0)
self.assertEqual(value, "42.0")
value = converter(42.0100)
self.assertEqual(value, "42.01")
value = converter(42.01234)
self.assertEqual(value, "42.01234")
value = converter(1234567.89)
self.assertEqual(value, '1,234,567.89')
def test_numeric(self):
converter = self.get_converter('numeric')
value = converter(42.0)
self.assertEqual(value, '42.00')
value = converter(42.01234)
self.assertEqual(value, '42.01')
class TestCurrencyExport(TestExport):
_model = 'test_converter.monetary'
def setUp(self):
super(TestCurrencyExport, self).setUp()
self.Currency = self.registry('res.currency')
self.base = self.create(self.Currency, name="Source", symbol=u'source')
def create(self, model, context=None, **values):
return model.browse(
self.cr, self.uid,
model.create(self.cr, self.uid, values, context=context),
context=context)
def convert(self, obj, dest):
converter = self.registry('ir.qweb.field.monetary')
options = {
'widget': 'monetary',
'display_currency': 'c2'
}
context = dict(inherit_branding=True)
converted = converter.to_html(
self.cr, self.uid, 'value', obj, options,
etree.Element('span'),
{'field': 'obj.value', 'field-options': json.dumps(options)},
'', ir_qweb.QWebContext(self.cr, self.uid, {'obj': obj, 'c2': dest, }),
context=context,
)
return converted
def test_currency_post(self):
currency = self.create(self.Currency, name="Test", symbol=u"test")
obj = self.create(self.Model, value=0.12)
converted = self.convert(obj, dest=currency)
self.assertEqual(
converted,
'<span data-oe-model="{obj._model._name}" data-oe-id="{obj.id}" '
'data-oe-field="value" data-oe-type="monetary" '
'data-oe-expression="obj.value">'
'<span class="oe_currency_value">0.12</span>'
' {symbol}</span>'.format(
obj=obj,
symbol=currency.symbol.encode('utf-8')
),)
def test_currency_pre(self):
currency = self.create(
self.Currency, name="Test", symbol=u"test", position='before')
obj = self.create(self.Model, value=0.12)
converted = self.convert(obj, dest=currency)
self.assertEqual(
converted,
'<span data-oe-model="{obj._model._name}" data-oe-id="{obj.id}" '
'data-oe-field="value" data-oe-type="monetary" '
'data-oe-expression="obj.value">'
'{symbol} '
'<span class="oe_currency_value">0.12</span>'
'</span>'.format(
obj=obj,
symbol=currency.symbol.encode('utf-8')
),)
def test_currency_precision(self):
""" Precision should be the currency's, not the float field's
"""
currency = self.create(self.Currency, name="Test", symbol=u"test",)
obj = self.create(self.Model, value=0.1234567)
converted = self.convert(obj, dest=currency)
self.assertEqual(
converted,
'<span data-oe-model="{obj._model._name}" data-oe-id="{obj.id}" '
'data-oe-field="value" data-oe-type="monetary" '
'data-oe-expression="obj.value">'
'<span class="oe_currency_value">0.12</span>'
' {symbol}</span>'.format(
obj=obj,
symbol=currency.symbol.encode('utf-8')
),)
class TestTextExport(TestBasicExport):
def test_text(self):
converter = self.get_converter('text')
value = converter("This is my text-kai")
self.assertEqual(value, "This is my text-kai")
value = converter("""
. The current line (address) in the buffer.
$ The last line in the buffer.
n The nth, line in the buffer where n is a number in the range [0,$].
$ The last line in the buffer.
- The previous line. This is equivalent to -1 and may be repeated with cumulative effect.
-n The nth previous line, where n is a non-negative number.
+ The next line. This is equivalent to +1 and may be repeated with cumulative effect.
""")
self.assertEqual(value, """<br>
. The current line (address) in the buffer.<br>
$ The last line in the buffer.<br>
n The nth, line in the buffer where n is a number in the range [0,$].<br>
$ The last line in the buffer.<br>
- The previous line. This is equivalent to -1 and may be repeated with cumulative effect.<br>
-n The nth previous line, where n is a non-negative number.<br>
+ The next line. This is equivalent to +1 and may be repeated with cumulative effect.<br>
""")
value = converter("""
fgdkls;hjas;lj <b>fdslkj</b> d;lasjfa lkdja <a href=http://spam.com>lfks</a>
fldkjsfhs <i style="color: red"><a href="http://spamspam.com">fldskjh</a></i>
""")
self.assertEqual(value, """<br>
fgdkls;hjas;lj <b>fdslkj</b> d;lasjfa lkdja <a href=http://spam.com>lfks</a><br>
fldkjsfhs <i style="color: red"><a href="http://spamspam.com">fldskjh</a></i><br>
""")
class TestMany2OneExport(TestBasicExport):
def test_many2one(self):
Sub = self.registry('test_converter.test_model.sub')
id0 = self.Model.create(self.cr, self.uid, {
'many2one': Sub.create(self.cr, self.uid, {'name': "Foo"})
})
id1 = self.Model.create(self.cr, self.uid, {
'many2one': Sub.create(self.cr, self.uid, {'name': "Fo<b>o</b>"})
})
def converter(record):
column = self.get_column('many2one')
model = self.registry('ir.qweb.field.many2one')
return e(model.record_to_html(
self.cr, self.uid, 'many2one', record, column))
value = converter(self.Model.browse(self.cr, self.uid, id0))
self.assertEqual(value, "Foo")
value = converter(self.Model.browse(self.cr, self.uid, id1))
self.assertEqual(value, "Fo<b>o</b>")
class TestBinaryExport(TestBasicExport):
def test_image(self):
column = self.get_column('binary')
converter = self.registry('ir.qweb.field.image')
with open(os.path.join(directory, 'test_vectors', 'image'), 'rb') as f:
content = f.read()
encoded_content = content.encode('base64')
value = e(converter.value_to_html(
self.cr, self.uid, encoded_content, column))
self.assertEqual(
value, '<img src="data:image/jpeg;base64,%s">' % (
encoded_content
))
with open(os.path.join(directory, 'test_vectors', 'pdf'), 'rb') as f:
content = f.read()
with self.assertRaises(ValueError):
e(converter.value_to_html(
self.cr, self.uid, 'binary', content.encode('base64'), column))
with open(os.path.join(directory, 'test_vectors', 'pptx'), 'rb') as f:
content = f.read()
with self.assertRaises(ValueError):
e(converter.value_to_html(
self.cr, self.uid, 'binary', content.encode('base64'), column))
class TestSelectionExport(TestBasicExport):
def test_selection(self):
[record] = self.Model.browse(self.cr, self.uid, [self.Model.create(self.cr, self.uid, {
'selection': 2,
'selection_str': 'C',
})])
column_name = 'selection'
column = self.get_column(column_name)
converter = self.registry('ir.qweb.field.selection')
value = converter.record_to_html(
self.cr, self.uid, column_name, record, column)
self.assertEqual(value, "réponse B")
column_name = 'selection_str'
column = self.get_column(column_name)
value = converter.record_to_html(
self.cr, self.uid, column_name, record, column)
self.assertEqual(value, "Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?")
class TestHTMLExport(TestBasicExport):
def test_html(self):
converter = self.get_converter('html')
input = '<span>span</span>'
value = converter(input)
self.assertEqual(value, input)
class TestDatetimeExport(TestBasicExport):
def setUp(self):
super(TestDatetimeExport, self).setUp()
# set user tz to known value
Users = self.registry('res.users')
Users.write(self.cr, self.uid, self.uid, {
'tz': 'Pacific/Niue'
}, context=None)
def test_date(self):
converter = self.get_converter('date')
value = converter('2011-05-03')
# default lang/format is US
self.assertEqual(value, '05/03/2011')
def test_datetime(self):
converter = self.get_converter('datetime')
value = converter('2011-05-03 11:12:13')
# default lang/format is US
self.assertEqual(value, '05/03/2011 00:12:13')
def test_custom_format(self):
converter = self.get_converter('datetime')
converter2 = self.get_converter('date')
opts = {'format': 'MMMM d'}
value = converter('2011-03-02 11:12:13', options=opts)
value2 = converter2('2001-03-02', options=opts)
self.assertEqual(
value,
'March 2'
)
self.assertEqual(
value2,
'March 2'
)
class TestDurationExport(TestBasicExport):
def setUp(self):
super(TestDurationExport, self).setUp()
# needs to have lang installed otherwise falls back on en_US
self.registry('res.lang').load_lang(self.cr, self.uid, 'fr_FR')
def test_negative(self):
converter = self.get_converter('float', 'duration')
with self.assertRaises(ValueError):
converter(-4)
def test_missing_unit(self):
converter = self.get_converter('float', 'duration')
with self.assertRaises(ValueError):
converter(4)
def test_basic(self):
converter = self.get_converter('float', 'duration')
result = converter(4, {'unit': 'hour'}, {'lang': 'fr_FR'})
self.assertEqual(result, u'4 heures')
result = converter(50, {'unit': 'second'}, {'lang': 'fr_FR'})
self.assertEqual(result, u'50 secondes')
def test_multiple(self):
converter = self.get_converter('float', 'duration')
result = converter(1.5, {'unit': 'hour'}, {'lang': 'fr_FR'})
self.assertEqual(result, u"1 heure 30 minutes")
result = converter(72, {'unit': 'second'}, {'lang': 'fr_FR'})
self.assertEqual(result, u"1 minute 12 secondes")
class TestRelativeDatetime(TestBasicExport):
# not sure how a test based on "current time" should be tested. Even less
# so as it would mostly be a test of babel...
def setUp(self):
super(TestRelativeDatetime, self).setUp()
# needs to have lang installed otherwise falls back on en_US
self.registry('res.lang').load_lang(self.cr, self.uid, 'fr_FR')
def test_basic(self):
converter = self.get_converter('datetime', 'relative')
t = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
result = converter(t, context={'lang': 'fr_FR'})
self.assertEqual(result, u"il y a 1 heure")
|
agpl-3.0
|
huongttlan/statsmodels
|
statsmodels/examples/ex_sandwich.py
|
34
|
2737
|
# -*- coding: utf-8 -*-
"""examples for sandwich estimators of covariance
Author: Josef Perktold
"""
from statsmodels.compat.python import lzip
import numpy as np
from numpy.testing import assert_almost_equal
import statsmodels.api as sm
import statsmodels.stats.sandwich_covariance as sw
#import statsmodels.sandbox.panel.sandwich_covariance_generic as swg
nobs = 100
kvars = 4 #including constant
x = np.random.randn(nobs, kvars-1)
exog = sm.add_constant(x)
params_true = np.ones(kvars)
y_true = np.dot(exog, params_true)
sigma = 0.1 + np.exp(exog[:,-1])
endog = y_true + sigma * np.random.randn(nobs)
self = sm.OLS(endog, exog).fit()
print(self.HC3_se)
print(sw.se_cov(sw.cov_hc3(self)))
#test standalone refactoring
assert_almost_equal(sw.se_cov(sw.cov_hc0(self)), self.HC0_se, 15)
assert_almost_equal(sw.se_cov(sw.cov_hc1(self)), self.HC1_se, 15)
assert_almost_equal(sw.se_cov(sw.cov_hc2(self)), self.HC2_se, 15)
assert_almost_equal(sw.se_cov(sw.cov_hc3(self)), self.HC3_se, 15)
print(self.HC0_se)
print(sw.se_cov(sw.cov_hac_simple(self, nlags=0, use_correction=False)))
#test White as HAC with nlags=0, same as nlags=1 ?
bse_hac0 = sw.se_cov(sw.cov_hac_simple(self, nlags=0, use_correction=False))
assert_almost_equal(bse_hac0, self.HC0_se, 15)
print(bse_hac0)
#test White as HAC with nlags=0, same as nlags=1 ?
bse_hac0c = sw.se_cov(sw.cov_hac_simple(self, nlags=0, use_correction=True))
assert_almost_equal(bse_hac0c, self.HC1_se, 15)
bse_w = sw.se_cov(sw.cov_white_simple(self, use_correction=False))
print(bse_w)
#test White
assert_almost_equal(bse_w, self.HC0_se, 15)
bse_wc = sw.se_cov(sw.cov_white_simple(self, use_correction=True))
print(bse_wc)
#test White
assert_almost_equal(bse_wc, self.HC1_se, 15)
groups = np.repeat(np.arange(5), 20)
idx = np.nonzero(np.diff(groups))[0].tolist()
groupidx = lzip([0]+idx, idx+[len(groups)])
ngroups = len(groupidx)
print(sw.se_cov(sw.cov_cluster(self, groups)))
#two strange looking corner cases BUG?
print(sw.se_cov(sw.cov_cluster(self, np.ones(len(endog), int), use_correction=False)))
print(sw.se_cov(sw.cov_crosssection_0(self, np.arange(len(endog)))))
#these results are close to simple (no group) white, 50 groups 2 obs each
groups = np.repeat(np.arange(50), 100//50)
print(sw.se_cov(sw.cov_cluster(self, groups)))
#2 groups with 50 obs each, what was the interpretation again?
groups = np.repeat(np.arange(2), 100//2)
print(sw.se_cov(sw.cov_cluster(self, groups)))
"http://www.kellogg.northwestern.edu/faculty/petersen/htm/papers/se/test_data.txt"
'''
test <- read.table(
url(paste("http://www.kellogg.northwestern.edu/",
"faculty/petersen/htm/papers/se/",
"test_data.txt",sep="")),
col.names=c("firmid", "year", "x", "y"))
'''
|
bsd-3-clause
|
oew1v07/scikit-image
|
doc/examples/plot_register_translation.py
|
14
|
2463
|
"""
=====================================
Cross-Correlation (Phase Correlation)
=====================================
In this example, we use phase correlation to identify the relative shift
between two similar-sized images.
The ``register_translation`` function uses cross-correlation in Fourier space,
optionally employing an upsampled matrix-multiplication DFT to achieve
arbitrary subpixel precision. [1]_
.. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
"Efficient subpixel image registration algorithms," Optics Letters 33,
156-158 (2008).
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import register_translation
from skimage.feature.register_translation import _upsampled_dft
from scipy.ndimage import fourier_shift
image = data.camera()
shift = (-2.4, 1.32)
# (-2.4, 1.32) pixel offset relative to reference coin
offset_image = fourier_shift(np.fft.fftn(image), shift)
offset_image = np.fft.ifftn(offset_image)
print("Known offset (y, x):")
print(shift)
# pixel precision first
shift, error, diffphase = register_translation(image, offset_image)
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3))
ax1.imshow(image)
ax1.set_axis_off()
ax1.set_title('Reference image')
ax2.imshow(offset_image.real)
ax2.set_axis_off()
ax2.set_title('Offset image')
# View the output of a cross-correlation to show what the algorithm is
# doing behind the scenes
image_product = np.fft.fft2(image) * np.fft.fft2(offset_image).conj()
cc_image = np.fft.fftshift(np.fft.ifft2(image_product))
ax3.imshow(cc_image.real)
ax3.set_axis_off()
ax3.set_title("Cross-correlation")
plt.show()
print("Detected pixel offset (y, x):")
print(shift)
# subpixel precision
shift, error, diffphase = register_translation(image, offset_image, 100)
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3))
ax1.imshow(image)
ax1.set_axis_off()
ax1.set_title('Reference image')
ax2.imshow(offset_image.real)
ax2.set_axis_off()
ax2.set_title('Offset image')
# Calculate the upsampled DFT, again to show what the algorithm is doing
# behind the scenes. Constants correspond to calculated values in routine.
# See source code for details.
cc_image = _upsampled_dft(image_product, 150, 100, (shift*100)+75).conj()
ax3.imshow(cc_image.real)
ax3.set_axis_off()
ax3.set_title("Supersampled XC sub-area")
plt.show()
print("Detected subpixel offset (y, x):")
print(shift)
|
bsd-3-clause
|
gamechanger/deferrable
|
deferrable/deferrable.py
|
1
|
12658
|
import sys
import time
import logging
from uuid import uuid1
import socket
from traceback import format_exc
from .pickling import loads, dumps, build_later_item, unpickle_method_call, pretty_unpickle
from .debounce import (get_debounce_strategy, set_debounce_keys_for_push_now,
set_debounce_keys_for_push_delayed, DebounceStrategy)
from .ttl import add_ttl_metadata_to_item, item_is_expired
from .backoff import apply_exponential_backoff_options, apply_exponential_backoff_delay
from .redis import initialize_redis_client
from .delay import MAXIMUM_DELAY_SECONDS
class Deferrable(object):
"""
The Deferrable class provides an interface for deferred, distributed execution of
module-level functions, using the provided backend for transport.
Once instantiated, the Deferrable object is primarily used through two
public methods:
- @instance.deferrable: Decorator used to register a function for deferred execution.
- instance.run_once(): Method to pop one deferred function off the backend queue and
execute it, subject to execution properties on the deferrable
instance and the specific deferred task itself (e.g. TTL)
The following events are emitted by Deferrable and may be consumed by
registering event handlers with the appropriate `on_{event}` methods,
each of which takes the queue item as its sole argument. Event handlers
regarding queue operations (e.g. pop) are called *after* the operation
has taken place.
- on_push : item pushed to the non-error queue
- on_pop : pop was attempted and returned an item
- on_empty : pop was attempted but did not return an item
- on_complete : item completed in the non-error queue
- on_expire : TTL expiration
- on_retry : item execution errored but will be retried
- on_error : item execution errored and was pushed to the error queue
- on_debounce_hit : item was not queued subject to debounce constraints
- on_debounce_miss : item is configured for debounce but was queued
- on_debounce_error : exception encountered while processing debounce logic (item will still be queued)
"""
def __init__(self, backend, redis_client=None, default_error_classes=None, default_max_attempts=5):
self.backend = backend
self._redis_client = redis_client
self.default_error_classes = default_error_classes
self.default_max_attempts = default_max_attempts
self._metadata_producer_consumers = []
self._event_consumers = []
@property
def redis_client(self):
if not hasattr(self, '_initialized_redis_client'):
self._initialized_redis_client = initialize_redis_client(self._redis_client)
return self._initialized_redis_client
def deferrable(self, *args, **kwargs):
"""Decorator. Use this to register a function with this Deferrable
instance. Example usage:
@deferrable_instance.deferrable
def some_function():
pass
Any arguments given to `deferrable` are passed as-is to `_deferrable`.
@deferrable_instance.deferrable(error_classes=[ValueError])
def some_function():
pass
"""
if len(args) == 1 and callable(args[0]) and not kwargs:
method = args[0]
return self._deferrable(method)
return lambda method: self._deferrable(method, *args, **kwargs)
def run_once(self):
"""Provided as a convenience function for consumers that are not
concerned with envelope-level heartbeats (touch operations). If your
consumer needs to implement touch, you should probably do these
steps separately inside your consumer."""
envelope, item = self.backend.queue.pop()
return self.process(envelope, item)
def process(self, envelope, item):
if not envelope:
self._emit('empty', item)
return
self._emit('pop', item)
item_error_classes = loads(item['error_classes']) or tuple()
for producer_consumer in self._metadata_producer_consumers:
producer_consumer._consume_metadata_from_item(item)
try:
if item_is_expired(item):
logging.warn("Deferrable job dropped with expired TTL: {}".format(pretty_unpickle(item)))
self._emit('expire', item)
self.backend.queue.complete(envelope)
self._emit('complete', item)
return
method, args, kwargs = unpickle_method_call(item)
method(*args, **kwargs)
except tuple(item_error_classes):
attempts, max_attempts = item['attempts'], item['max_attempts']
if attempts >= max_attempts - 1:
self._push_item_to_error_queue(item)
else:
item['attempts'] += 1
apply_exponential_backoff_delay(item)
self.backend.queue.push(item)
self._emit('retry', item)
except Exception:
self._push_item_to_error_queue(item)
self.backend.queue.complete(envelope)
self._emit('complete', item)
def register_metadata_producer_consumer(self, producer_consumer):
for existing in self._metadata_producer_consumers:
if existing.NAMESPACE == producer_consumer.NAMESPACE:
raise ValueError('NAMESPACE {} is already in use'.format(producer_consumer.NAMESPACE))
self._metadata_producer_consumers.append(producer_consumer)
def clear_metadata_producer_consumers(self):
self._metadata_producer_consumers = []
def register_event_consumer(self, event_consumer):
self._event_consumers.append(event_consumer)
def clear_event_consumers(self):
self._event_consumers = []
def _emit(self, event, item):
"""Run any handler methods on registered event consumers for the given event,
passing the item to the method. Processes the event consumers in the order
they were registered."""
handler_name = 'on_{}'.format(event)
for event_consumer in self._event_consumers:
if hasattr(event_consumer, handler_name):
getattr(event_consumer, handler_name)(item)
def _push_item_to_error_queue(self, item):
"""Put information about the current exception into the item's `error`
key and push the transformed item to the error queue."""
exc_info = sys.exc_info()
assert exc_info[0], "_push_error_item must be called from inside an exception handler"
error_info = {
'error_type': str(exc_info[0].__name__),
'error_text': str(exc_info[1]),
'traceback': format_exc(),
'hostname': socket.gethostname(),
'ts': time.time(),
'id': str(uuid1())
}
item['error'] = error_info
item['last_push_time'] = time.time()
if 'delay' in item:
del item['delay']
self.backend.error_queue.push(item)
self._emit('error', item)
def _validate_deferrable_args_compile_time(self, delay_seconds, debounce_seconds, debounce_always_delay, ttl_seconds):
"""Validation check which can be run at compile-time on decorated functions. This
cannot do any bounds checking on the time arguments, which can be reified from
callables at each individual .later() invocation."""
if debounce_seconds and not self.redis_client:
raise ValueError('redis_client is required for debounce')
if delay_seconds and debounce_seconds:
raise ValueError('You cannot delay and debounce at the same time (debounce uses delay internally).')
if debounce_always_delay and not debounce_seconds:
raise ValueError('debounce_always_delay is an option to debounce_seconds, which was not set. Probably a mistake.')
def _validate_deferrable_args_run_time(self, delay_seconds, debounce_seconds, ttl_seconds):
"""Validation check run once all variables have been reified. This is where you
can do bounds checking on time variables."""
if delay_seconds > MAXIMUM_DELAY_SECONDS or debounce_seconds > MAXIMUM_DELAY_SECONDS:
raise ValueError('Delay or debounce window cannot exceed {} seconds'.format(MAXIMUM_DELAY_SECONDS))
if ttl_seconds:
if delay_seconds > ttl_seconds or debounce_seconds > ttl_seconds:
raise ValueError('delay_seconds or debounce_seconds must be less than ttl_seconds')
def _apply_delay_and_skip_for_debounce(self, item, debounce_seconds, debounce_always_delay):
"""Modifies the item in place to meet the debouncing constraints set by `debounce_seconds`
and `debounce_always_delay`. For more detail, see the `debouncing` module.
- delay: Seconds by which to delay the item.
- debounce_skip: If set to True, the item gets debounced and will not be queued.
If an exception is encountered, we set `delay` to `None` so that the item is immediately
queued for processing. We do not want a failure in debounce to stop the item from being
processed."""
try:
debounce_strategy, seconds_to_delay = get_debounce_strategy(self.redis_client, item, debounce_seconds, debounce_always_delay)
if debounce_strategy == DebounceStrategy.SKIP:
item['debounce_skip'] = True
self._emit('debounce_hit', item)
return
self._emit('debounce_miss', item)
if debounce_strategy == DebounceStrategy.PUSH_NOW:
set_debounce_keys_for_push_now(self.redis_client, item, debounce_seconds)
elif debounce_strategy == DebounceStrategy.PUSH_DELAYED:
set_debounce_keys_for_push_delayed(self.redis_client, item, seconds_to_delay, debounce_seconds)
item['delay'] = seconds_to_delay
except: # Skip debouncing if we hit an error, don't fail completely
logging.exception("Encountered error while attempting to process debounce")
item['delay'] = 0
self._emit('debounce_error', item)
def _deferrable(self, method, error_classes=None, max_attempts=None,
delay_seconds=0, debounce_seconds=0, debounce_always_delay=False, ttl_seconds=0,
use_exponential_backoff=True):
self._validate_deferrable_args_compile_time(delay_seconds, debounce_seconds, debounce_always_delay, ttl_seconds)
def later(*args, **kwargs):
delay_actual = delay_seconds() if callable(delay_seconds) else delay_seconds
debounce_actual = debounce_seconds() if callable(debounce_seconds) else debounce_seconds
ttl_actual = ttl_seconds() if callable(ttl_seconds) else ttl_seconds
self._validate_deferrable_args_run_time(delay_actual, debounce_actual, ttl_actual)
item = build_later_item(method, *args, **kwargs)
now = time.time()
item_error_classes = error_classes if error_classes is not None else self.default_error_classes
item_max_attempts = max_attempts if max_attempts is not None else self.default_max_attempts
item.update({
'group': self.backend.group,
'error_classes': dumps(item_error_classes),
'attempts': 0,
'max_attempts': item_max_attempts,
'first_push_time': now,
'last_push_time': now,
'original_delay_seconds': delay_actual,
'original_debounce_seconds': debounce_actual,
'original_debounce_always_delay': debounce_always_delay
})
apply_exponential_backoff_options(item, use_exponential_backoff)
if ttl_actual:
add_ttl_metadata_to_item(item, ttl_actual)
if debounce_actual:
self._apply_delay_and_skip_for_debounce(item, debounce_actual, debounce_always_delay)
if item.get('debounce_skip'):
return
else:
item['delay'] = delay_actual
# Final delay value calculated
item['original_delay'] = item['delay']
for producer_consumer in self._metadata_producer_consumers:
producer_consumer._apply_metadata_to_item(item)
self.backend.queue.push(item)
self._emit('push', item)
method.later = later
return method
|
mit
|
JavML/django
|
tests/aggregation/tests.py
|
57
|
45690
|
from __future__ import unicode_literals
import datetime
import re
from decimal import Decimal
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
F, Avg, Count, DecimalField, DurationField, FloatField, Func, IntegerField,
Max, Min, Sum, Value,
)
from django.test import TestCase
from django.test.utils import Approximate, CaptureQueriesContext
from django.utils import six, timezone
from .models import Author, Book, Publisher, Store
class AggregateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3, duration=datetime.timedelta(days=1))
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1, duration=datetime.timedelta(days=2))
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_aggregate_in_order_by(self):
msg = (
'Using an aggregate in order_by() without also including it in '
'annotate() is not allowed: Avg(F(book__rating)'
)
with self.assertRaisesMessage(FieldError, msg):
Author.objects.values('age').order_by(Avg('book__rating'))
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["age__sum"], 254)
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2)
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2)
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__rating__avg"], 4.0)
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["publisher__num_awards__sum"], 30)
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__price__sum"], Decimal("270.27"))
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 57)
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__publisher__num_awards__min"], 1)
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2)
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=self.b1.pk)
self.assertEqual(
b.name,
'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_defer(self):
qs = Book.objects.annotate(
page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
rows = [
(1, "159059725", 447, "The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.name)
)
def test_annotate_defer_select_related(self):
qs = Book.objects.select_related('contact').annotate(
page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
rows = [
(1, "159059725", 447, "Adrian Holovaty",
"The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name)
)
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 51.5),
('Practical Django Projects', 29.0),
('Python Web Development with Django', Approximate(30.3, places=1)),
('Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 4.5),
('Brad Dayley', 3.0),
('Jacob Kaplan-Moss', 4.5),
('James Bennett', 4.0),
('Paul Bissex', 4.0),
('Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 1),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 1),
('Peter Norvig', 2),
('Stuart Russell', 1),
('Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 7),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
('Practical Django Projects', 3),
('Python Web Development with Django', 7),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
('Apress', Decimal("59.69")),
("Jonno's House of Books", None),
('Morgan Kaufmann', Decimal("75.00")),
('Prentice Hall', Decimal("112.49")),
('Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = (
Book.objects
.filter(pk=self.b1.pk)
.annotate(mean_age=Avg('authors__age'))
.values('pk', 'isbn', 'mean_age')
)
self.assertEqual(
list(books), [
{
"pk": 1,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books), [
{
"name": "The Definitive Guide to Django: Web Development Done Right"
}
]
)
books = Book.objects.filter(pk=self.b1.pk).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = (
Book.objects
.values("rating")
.annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age"))
.order_by("rating")
)
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertEqual(len(authors), 9)
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 32.0),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 29.5),
('James Bennett', 34.0),
('Jeffrey Forcier', 27.0),
('Paul Bissex', 31.0),
('Peter Norvig', 46.0),
('Stuart Russell', 57.0),
('Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_count_star(self):
with self.assertNumQueries(1) as ctx:
Book.objects.aggregate(n=Count("*"))
sql = ctx.captured_queries[0]['sql']
self.assertIn('SELECT COUNT(*) ', sql)
def test_non_grouped_annotation_not_in_group_by(self):
"""
An annotation not included in values() before an aggregate should be
excluded from the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating')
.annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count')
)
self.assertEqual(
list(qs), [
{'rating': 4.0, 'count': 2},
]
)
def test_grouped_annotation_in_group_by(self):
"""
An annotation included in values() before an aggregate should be
included in the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating', 'xprice')
.annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count')
)
self.assertEqual(
list(qs), [
{'rating': 4.0, 'count': 1},
{'rating': 4.0, 'count': 2},
]
)
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{
"rating": 4.5,
"oldest": 35,
},
{
"rating": 3.0,
"oldest": 45
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 5.0,
"oldest": 57,
}
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{
"rating": 5.0,
"oldest": 57,
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 3.0,
"oldest": 45,
},
{
"rating": 4.5,
"oldest": 35,
}
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_avg_duration_field(self):
self.assertEqual(
Publisher.objects.aggregate(Avg('duration', output_field=DurationField())),
{'duration__avg': datetime.timedelta(days=1, hours=12)}
)
def test_sum_duration_field(self):
self.assertEqual(
Publisher.objects.aggregate(Sum('duration', output_field=DurationField())),
{'duration__sum': datetime.timedelta(days=3)}
)
def test_sum_distinct_aggregate(self):
"""
Sum on a distict() QuerySet should aggregate only the distinct items.
"""
authors = Author.objects.filter(book__in=[5, 6])
self.assertEqual(authors.count(), 3)
distinct_authors = authors.distinct()
self.assertEqual(distinct_authors.count(), 2)
# Selected author ages are 57 and 46
age_sum = distinct_authors.aggregate(Sum('age'))
self.assertEqual(age_sum['age__sum'], 103)
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = (
Publisher.objects
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1, book__price__lt=Decimal("40.0"))
.order_by("pk")
)
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = (
Publisher.objects
.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Sams",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__exact=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = (
Author.objects
.annotate(num_friends=Count("friends__id", distinct=True))
.filter(num_friends=0)
.order_by("pk")
)
self.assertQuerysetEqual(
authors, [
"Brad Dayley",
],
lambda a: a.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
],
lambda p: p.name
)
publishers = (
Publisher.objects
.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
books = (
Book.objects
.annotate(num_authors=Count("authors__id"))
.filter(authors__name__contains="Norvig", num_authors__gt=1)
)
self.assertQuerysetEqual(
books, [
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = (
Book.objects
.annotate(num_authors=Count("authors__id"))
.filter(authors__name__contains="Norvig", num_authors__gt=1)
.aggregate(Avg("rating"))
)
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(
earliest_book=Min("book__pubdate"),
).exclude(earliest_book=None).order_by("earliest_book").values(
'earliest_book',
'num_awards',
'id',
'name',
)
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': 4,
'name': 'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': 3,
'name': 'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': 1,
'name': 'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': 2,
'name': 'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = (
Book.objects
.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("pk", "isbn", "mean_age")
)
self.assertEqual(
list(books), [
(1, "159059725", 34.5),
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(
list(books), [
('159059725',)
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(
list(books), [
(34.5,)
]
)
books = (
Book.objects
.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("mean_age", flat=True)
)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
def test_dates_with_aggregation(self):
"""
Test that .dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year')
self.assertQuerysetEqual(
dates, [
"datetime.date(1991, 1, 1)",
"datetime.date(1995, 1, 1)",
"datetime.date(2007, 1, 1)",
"datetime.date(2008, 1, 1)"
]
)
def test_values_aggregation(self):
# Refs #20782
max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating'))
self.assertEqual(max_rating['max_rating'], 5)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id')
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3})
def test_ticket17424(self):
"""
Check that doing exclude() on a foreign model after annotate()
doesn't crash.
"""
all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk'))
annotated_books = Book.objects.order_by('pk').annotate(one=Count("id"))
# The value doesn't matter, we just need any negative
# constraint on a related model that's a noop.
excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
# Try to generate query tree
str(excluded_books.query)
self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
def test_ticket12886(self):
"""
Check that aggregation over sliced queryset works correctly.
"""
qs = Book.objects.all().order_by('-rating')[0:3]
vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating']
self.assertAlmostEqual(vals, 4.5, places=2)
def test_ticket11881(self):
"""
Check that subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE
or select_related() stuff.
"""
qs = Book.objects.all().select_for_update().order_by(
'pk').select_related('publisher').annotate(max_pk=Max('pk'))
with CaptureQueriesContext(connection) as captured_queries:
qs.aggregate(avg_pk=Avg('max_pk'))
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]['sql'].lower()
self.assertNotIn('for update', qstr)
forced_ordering = connection.ops.force_no_ordering()
if forced_ordering:
# If the backend needs to force an ordering we make sure it's
# the only "ORDER BY" clause present in the query.
self.assertEqual(
re.findall(r'order by (\w+)', qstr),
[', '.join(f[1][0] for f in forced_ordering).lower()]
)
else:
self.assertNotIn('order by', qstr)
self.assertEqual(qstr.count(' join '), 0)
def test_decimal_max_digits_has_no_effect(self):
Book.objects.all().delete()
a1 = Author.objects.first()
p1 = Publisher.objects.first()
thedate = timezone.now()
for i in range(10):
Book.objects.create(
isbn="abcde{}".format(i), name="none", pages=10, rating=4.0,
price=9999.98, contact=a1, publisher=p1, pubdate=thedate)
book = Book.objects.aggregate(price_sum=Sum('price'))
self.assertEqual(book['price_sum'], Decimal("99999.80"))
def test_nonaggregate_aggregation_throws(self):
with six.assertRaisesRegex(self, TypeError, 'fail is not an aggregate expression'):
Book.objects.aggregate(fail=F('price'))
def test_nonfield_annotation(self):
book = Book.objects.annotate(val=Max(Value(2, output_field=IntegerField()))).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(Value(2), output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
def test_missing_output_field_raises_error(self):
with six.assertRaisesRegex(self, FieldError, 'Cannot resolve expression type, unknown output_field'):
Book.objects.annotate(val=Max(2)).first()
def test_annotation_expressions(self):
authors = Author.objects.annotate(combined_ages=Sum(F('age') + F('friends__age'))).order_by('name')
authors2 = Author.objects.annotate(combined_ages=Sum('age') + Sum('friends__age')).order_by('name')
for qs in (authors, authors2):
self.assertEqual(len(qs), 9)
self.assertQuerysetEqual(
qs, [
('Adrian Holovaty', 132),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 129),
('James Bennett', 63),
('Jeffrey Forcier', 128),
('Paul Bissex', 120),
('Peter Norvig', 103),
('Stuart Russell', 103),
('Wesley J. Chun', 176)
],
lambda a: (a.name, a.combined_ages)
)
def test_aggregation_expressions(self):
a1 = Author.objects.aggregate(av_age=Sum('age') / Count('*'))
a2 = Author.objects.aggregate(av_age=Sum('age') / Count('age'))
a3 = Author.objects.aggregate(av_age=Avg('age'))
self.assertEqual(a1, {'av_age': 37})
self.assertEqual(a2, {'av_age': 37})
self.assertEqual(a3, {'av_age': Approximate(37.4, places=1)})
def test_avg_decimal_field(self):
v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price')))['avg_price']
self.assertIsInstance(v, float)
self.assertEqual(v, Approximate(47.39, places=2))
def test_order_of_precedence(self):
p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price') + 2) * 3)
self.assertEqual(p1, {'avg_price': Approximate(148.18, places=2)})
p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg('price') + 2 * 3)
self.assertEqual(p2, {'avg_price': Approximate(53.39, places=2)})
def test_combine_different_types(self):
with six.assertRaisesRegex(self, FieldError, 'Expression contains mixed types. You must set output_field'):
Book.objects.annotate(sums=Sum('rating') + Sum('pages') + Sum('price')).get(pk=self.b4.pk)
b1 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=IntegerField())).get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
b2 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=FloatField())).get(pk=self.b4.pk)
self.assertEqual(b2.sums, 383.69)
b3 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=DecimalField())).get(pk=self.b4.pk)
self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2))
def test_complex_aggregations_require_kwarg(self):
with six.assertRaisesRegex(self, TypeError, 'Complex annotations require an alias'):
Author.objects.annotate(Sum(F('age') + F('friends__age')))
with six.assertRaisesRegex(self, TypeError, 'Complex aggregates require an alias'):
Author.objects.aggregate(Sum('age') / Count('age'))
with six.assertRaisesRegex(self, TypeError, 'Complex aggregates require an alias'):
Author.objects.aggregate(Sum(1))
def test_aggregate_over_complex_annotation(self):
qs = Author.objects.annotate(
combined_ages=Sum(F('age') + F('friends__age')))
age = qs.aggregate(max_combined_age=Max('combined_ages'))
self.assertEqual(age['max_combined_age'], 176)
age = qs.aggregate(max_combined_age_doubled=Max('combined_ages') * 2)
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age=Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age'], 954)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age_doubled=Sum('combined_ages') + Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age_doubled'], 954 * 2)
def test_values_annotation_with_expression(self):
# ensure the F() is promoted to the group by clause
qs = Author.objects.values('name').annotate(another_age=Sum('age') + F('age'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['another_age'], 68)
qs = qs.annotate(friend_count=Count('friends'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['friend_count'], 2)
qs = qs.annotate(combined_age=Sum('age') + F('friends__age')).filter(
name="Adrian Holovaty").order_by('-combined_age')
self.assertEqual(
list(qs), [
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 69
},
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 63
}
]
)
vals = qs.values('name', 'combined_age')
self.assertEqual(
list(vals), [
{
"name": 'Adrian Holovaty',
"combined_age": 69
},
{
"name": 'Adrian Holovaty',
"combined_age": 63
}
]
)
def test_annotate_values_aggregate(self):
alias_age = Author.objects.annotate(
age_alias=F('age')
).values(
'age_alias',
).aggregate(sum_age=Sum('age_alias'))
age = Author.objects.values('age').aggregate(sum_age=Sum('age'))
self.assertEqual(alias_age['sum_age'], age['sum_age'])
def test_annotate_over_annotate(self):
author = Author.objects.annotate(
age_alias=F('age')
).annotate(
sum_age=Sum('age_alias')
).get(name="Adrian Holovaty")
other_author = Author.objects.annotate(
sum_age=Sum('age')
).get(name="Adrian Holovaty")
self.assertEqual(author.sum_age, other_author.sum_age)
def test_annotated_aggregate_over_annotated_aggregate(self):
with self.assertRaisesMessage(FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"):
Book.objects.annotate(Max('id')).annotate(Sum('id__max'))
class MyMax(Max):
def as_sql(self, compiler, connection):
self.set_source_expressions(self.get_source_expressions()[0:1])
return super(MyMax, self).as_sql(compiler, connection)
with self.assertRaisesMessage(FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"):
Book.objects.annotate(Max('id')).annotate(my_max=MyMax('id__max', 'price'))
def test_multi_arg_aggregate(self):
class MyMax(Max):
def as_sql(self, compiler, connection):
self.set_source_expressions(self.get_source_expressions()[0:1])
return super(MyMax, self).as_sql(compiler, connection)
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
Book.objects.aggregate(MyMax('pages', 'price'))
with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'):
Book.objects.annotate(MyMax('pages', 'price'))
Book.objects.aggregate(max_field=MyMax('pages', 'price'))
def test_add_implementation(self):
class MySum(Sum):
pass
# test completely changing how the output is rendered
def lower_case_function_override(self, compiler, connection):
sql, params = compiler.compile(self.source_expressions[0])
substitutions = dict(function=self.function.lower(), expressions=sql)
substitutions.update(self.extra)
return self.template % substitutions, params
setattr(MySum, 'as_' + connection.vendor, lower_case_function_override)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test changing the dict and delegating
def lower_case_function_super(self, compiler, connection):
self.extra['function'] = self.function.lower()
return super(MySum, self).as_sql(compiler, connection)
setattr(MySum, 'as_' + connection.vendor, lower_case_function_super)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test overriding all parts of the template
def be_evil(self, compiler, connection):
substitutions = dict(function='MAX', expressions='2')
substitutions.update(self.extra)
return self.template % substitutions, ()
setattr(MySum, 'as_' + connection.vendor, be_evil)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('MAX('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 2)
def test_complex_values_aggregation(self):
max_rating = Book.objects.values('rating').aggregate(
double_max_rating=Max('rating') + Max('rating'))
self.assertEqual(max_rating['double_max_rating'], 5 * 2)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id') + 5
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3 + 5})
def test_expression_on_aggregation(self):
# Create a plain expression
class Greatest(Func):
function = 'GREATEST'
def as_sqlite(self, compiler, connection):
return super(Greatest, self).as_sql(compiler, connection, function='MAX')
qs = Publisher.objects.annotate(
price_or_median=Greatest(Avg('book__rating'), Avg('book__price'))
).filter(price_or_median__gte=F('num_awards')).order_by('num_awards')
self.assertQuerysetEqual(
qs, [1, 3, 7, 9], lambda v: v.num_awards)
qs2 = Publisher.objects.annotate(
rating_or_num_awards=Greatest(Avg('book__rating'), F('num_awards'),
output_field=FloatField())
).filter(rating_or_num_awards__gt=F('num_awards')).order_by('num_awards')
self.assertQuerysetEqual(
qs2, [1, 3], lambda v: v.num_awards)
|
bsd-3-clause
|
jkankiewicz/kivy
|
kivy/uix/rst.py
|
4
|
33744
|
'''
reStructuredText renderer
=========================
.. versionadded:: 1.1.0
`reStructuredText <http://docutils.sourceforge.net/rst.html>`_ is an
easy-to-read, what-you-see-is-what-you-get plaintext markup syntax and parser
system.
.. note::
This widget requires the ``docutils`` package to run. Install it with
``pip`` or include it as one of your deployment requirements.
.. warning::
This widget is highly experimental. The styling and implementation should
not be considered stable until this warning has been removed.
Usage with Text
---------------
::
text = """
.. _top:
Hello world
===========
This is an **emphased text**, some ``interpreted text``.
And this is a reference to top_::
$ print("Hello world")
"""
document = RstDocument(text=text)
The rendering will output:
.. image:: images/rstdocument.png
Usage with Source
-----------------
You can also render a rst file using the :attr:`~RstDocument.source` property::
document = RstDocument(source='index.rst')
You can reference other documents using the role ``:doc:``. For example, in the
document ``index.rst`` you can write::
Go to my next document: :doc:`moreinfo.rst`
It will generate a link that, when clicked, opens the ``moreinfo.rst``
document.
'''
__all__ = ('RstDocument', )
import os
from os.path import dirname, join, exists, abspath
from kivy.clock import Clock
from kivy.compat import PY2
from kivy.properties import ObjectProperty, NumericProperty, \
DictProperty, ListProperty, StringProperty, \
BooleanProperty, OptionProperty, AliasProperty
from kivy.lang import Builder
from kivy.utils import get_hex_from_color, get_color_from_hex
from kivy.uix.widget import Widget
from kivy.uix.scrollview import ScrollView
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.image import AsyncImage, Image
from kivy.uix.videoplayer import VideoPlayer
from kivy.uix.anchorlayout import AnchorLayout
from kivy.animation import Animation
from kivy.logger import Logger
from docutils.parsers import rst
from docutils.parsers.rst import roles
from docutils import nodes, frontend, utils
from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.roles import set_classes
#
# Handle some additional roles
#
if 'KIVY_DOC' not in os.environ:
class role_doc(nodes.Inline, nodes.TextElement):
pass
class role_video(nodes.General, nodes.TextElement):
pass
class VideoDirective(Directive):
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'width': directives.nonnegative_int,
'height': directives.nonnegative_int}
def run(self):
set_classes(self.options)
node = role_video(source=self.arguments[0], **self.options)
return [node]
generic_docroles = {
'doc': role_doc}
for rolename, nodeclass in generic_docroles.items():
generic = roles.GenericRole(rolename, nodeclass)
role = roles.CustomRole(rolename, generic, {'classes': [rolename]})
roles.register_local_role(rolename, role)
directives.register_directive('video', VideoDirective)
Builder.load_string('''
#:import parse_color kivy.parser.parse_color
<RstDocument>:
content: content
scatter: scatter
do_scroll_x: False
canvas.before:
Color:
rgba: parse_color(root.colors['background'])
Rectangle:
pos: self.pos
size: self.size
Scatter:
id: scatter
size_hint_y: None
height: content.minimum_height
width: root.width
scale: 1
do_translation: False, False
do_scale: False
do_rotation: False
GridLayout:
id: content
cols: 1
height: self.minimum_height
width: root.width
padding: 10
<RstTitle>:
markup: True
valign: 'top'
font_size:
sp(self.document.base_font_size - self.section * (
self.document.base_font_size / 31.0 * 2))
size_hint_y: None
height: self.texture_size[1] + dp(20)
text_size: self.width, None
bold: True
canvas:
Color:
rgba: parse_color(self.document.underline_color)
Rectangle:
pos: self.x, self.y + 5
size: self.width, 1
<RstParagraph>:
markup: True
valign: 'top'
size_hint_y: None
height: self.texture_size[1] + self.my
text_size: self.width - self.mx, None
font_size: sp(self.document.base_font_size / 2.0)
<RstTerm>:
size_hint: None, None
height: label.height
anchor_x: 'left'
Label:
id: label
text: root.text
markup: True
valign: 'top'
size_hint: None, None
size: self.texture_size[0] + dp(10), self.texture_size[1] + dp(10)
font_size: sp(root.document.base_font_size / 2.0)
<RstBlockQuote>:
cols: 2
content: content
size_hint_y: None
height: content.height
Widget:
size_hint_x: None
width: 20
GridLayout:
id: content
cols: 1
size_hint_y: None
height: self.minimum_height
<RstLiteralBlock>:
cols: 1
content: content
size_hint_y: None
height: content.texture_size[1] + dp(20)
canvas:
Color:
rgb: parse_color('#cccccc')
Rectangle:
pos: self.x - 1, self.y - 1
size: self.width + 2, self.height + 2
Color:
rgb: parse_color('#eeeeee')
Rectangle:
pos: self.pos
size: self.size
Label:
id: content
markup: True
valign: 'top'
text_size: self.width - 20, None
font_name: 'data/fonts/RobotoMono-Regular.ttf'
color: (0, 0, 0, 1)
<RstList>:
cols: 2
size_hint_y: None
height: self.minimum_height
<RstListItem>:
cols: 1
size_hint_y: None
height: self.minimum_height
<RstSystemMessage>:
cols: 1
size_hint_y: None
height: self.minimum_height
canvas:
Color:
rgba: 1, 0, 0, .3
Rectangle:
pos: self.pos
size: self.size
<RstWarning>:
content: content
cols: 1
padding: 20
size_hint_y: None
height: self.minimum_height
canvas:
Color:
rgba: 1, 0, 0, .5
Rectangle:
pos: self.x + 10, self.y + 10
size: self.width - 20, self.height - 20
GridLayout:
cols: 1
id: content
size_hint_y: None
height: self.minimum_height
<RstNote>:
content: content
cols: 1
padding: 20
size_hint_y: None
height: self.minimum_height
canvas:
Color:
rgba: 0, 1, 0, .5
Rectangle:
pos: self.x + 10, self.y + 10
size: self.width - 20, self.height - 20
GridLayout:
cols: 1
id: content
size_hint_y: None
height: self.minimum_height
<RstImage>:
size_hint: None, None
size: self.texture_size[0], self.texture_size[1] + dp(10)
<RstAsyncImage>:
size_hint: None, None
size: self.texture_size[0], self.texture_size[1] + dp(10)
<RstDefinitionList>:
cols: 1
size_hint_y: None
height: self.minimum_height
font_size: sp(self.document.base_font_size / 2.0)
<RstDefinition>:
cols: 2
size_hint_y: None
height: self.minimum_height
font_size: sp(self.document.base_font_size / 2.0)
<RstFieldList>:
cols: 2
size_hint_y: None
height: self.minimum_height
<RstFieldName>:
markup: True
valign: 'top'
size_hint: 0.2, 1
color: (0, 0, 0, 1)
bold: True
text_size: self.width-10, self.height - 10
valign: 'top'
font_size: sp(self.document.base_font_size / 2.0)
<RstFieldBody>:
cols: 1
size_hint_y: None
height: self.minimum_height
<RstTable>:
size_hint_y: None
height: self.minimum_height
<RstEntry>:
cols: 1
size_hint_y: None
height: self.minimum_height
canvas:
Color:
rgb: .2, .2, .2
Line:
points: [\
self.x,\
self.y,\
self.right,\
self.y,\
self.right,\
self.top,\
self.x,\
self.top,\
self.x,\
self.y]
<RstTransition>:
size_hint_y: None
height: 20
canvas:
Color:
rgb: .2, .2, .2
Line:
points: [self.x, self.center_y, self.right, self.center_y]
<RstListBullet>:
markup: True
valign: 'top'
size_hint_x: None
width: self.texture_size[0] + dp(10)
text_size: None, self.height - dp(10)
font_size: sp(self.document.base_font_size / 2.0)
<RstEmptySpace>:
size_hint: 0.01, 0.01
<RstDefinitionSpace>:
size_hint: None, 0.1
width: 50
font_size: sp(self.document.base_font_size / 2.0)
<RstVideoPlayer>:
options: {'allow_stretch': True}
canvas.before:
Color:
rgba: (1, 1, 1, 1)
BorderImage:
source: 'atlas://data/images/defaulttheme/player-background'
pos: self.x - 25, self.y - 25
size: self.width + 50, self.height + 50
border: (25, 25, 25, 25)
''')
class RstVideoPlayer(VideoPlayer):
pass
class RstDocument(ScrollView):
'''Base widget used to store an Rst document. See module documentation for
more information.
'''
source = StringProperty(None)
'''Filename of the RST document.
:attr:`source` is a :class:`~kivy.properties.StringProperty` and
defaults to None.
'''
source_encoding = StringProperty('utf-8')
'''Encoding to be used for the :attr:`source` file.
:attr:`source_encoding` is a :class:`~kivy.properties.StringProperty` and
defaults to `utf-8`.
.. Note::
It is your responsibility to ensure that the value provided is a
valid codec supported by python.
'''
source_error = OptionProperty('strict',
options=('strict', 'ignore', 'replace',
'xmlcharrefreplace',
'backslashreplac'))
'''Error handling to be used while encoding the :attr:`source` file.
:attr:`source_error` is an :class:`~kivy.properties.OptionProperty` and
defaults to `strict`. Can be one of 'strict', 'ignore', 'replace',
'xmlcharrefreplace' or 'backslashreplac'.
'''
text = StringProperty(None)
'''RST markup text of the document.
:attr:`text` is a :class:`~kivy.properties.StringProperty` and defaults to
None.
'''
document_root = StringProperty(None)
'''Root path where :doc: will search for rst documents. If no path is
given, it will use the directory of the first loaded source file.
:attr:`document_root` is a :class:`~kivy.properties.StringProperty` and
defaults to None.
'''
base_font_size = NumericProperty(31)
'''Font size for the biggest title, 31 by default. All other font sizes are
derived from this.
.. versionadded:: 1.8.0
'''
show_errors = BooleanProperty(False)
'''Indicate whether RST parsers errors should be shown on the screen
or not.
:attr:`show_errors` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
def _get_bgc(self):
return get_color_from_hex(self.colors.background)
def _set_bgc(self, value):
self.colors.background = get_hex_from_color(value)[1:]
background_color = AliasProperty(_get_bgc, _set_bgc, bind=('colors',))
'''Specifies the background_color to be used for the RstDocument.
.. versionadded:: 1.8.0
:attr:`background_color` is an :class:`~kivy.properties.AliasProperty`
for colors['background'].
'''
colors = DictProperty({
'background': 'e5e6e9ff',
'link': 'ce5c00ff',
'paragraph': '202020ff',
'title': '204a87ff',
'bullet': '000000ff'})
'''Dictionary of all the colors used in the RST rendering.
.. warning::
This dictionary is needs special handling. You also need to call
:meth:`RstDocument.render` if you change them after loading.
:attr:`colors` is a :class:`~kivy.properties.DictProperty`.
'''
title = StringProperty('')
'''Title of the current document.
:attr:`title` is a :class:`~kivy.properties.StringProperty` and defaults to
''. It is read-only.
'''
toctrees = DictProperty({})
'''Toctree of all loaded or preloaded documents. This dictionary is filled
when a rst document is explicitly loaded or where :meth:`preload` has been
called.
If the document has no filename, e.g. when the document is loaded from a
text file, the key will be ''.
:attr:`toctrees` is a :class:`~kivy.properties.DictProperty` and defaults
to {}.
'''
underline_color = StringProperty('204a9699')
'''underline color of the titles, expressed in html color notation
:attr:`underline_color` is a
:class:`~kivy.properties.StringProperty` and defaults to '204a9699'.
.. versionadded: 1.9.0
'''
# internals.
content = ObjectProperty(None)
scatter = ObjectProperty(None)
anchors_widgets = ListProperty([])
refs_assoc = DictProperty({})
def __init__(self, **kwargs):
self._trigger_load = Clock.create_trigger(self._load_from_text, -1)
self._parser = rst.Parser()
self._settings = frontend.OptionParser(
components=(rst.Parser, )).get_default_values()
super(RstDocument, self).__init__(**kwargs)
def on_source(self, instance, value):
if not value:
return
if self.document_root is None:
# set the documentation root to the directory name of the
# first tile
self.document_root = abspath(dirname(value))
self._load_from_source()
def on_text(self, instance, value):
self._trigger_load()
def render(self):
'''Force document rendering.
'''
self._load_from_text()
def resolve_path(self, filename):
'''Get the path for this filename. If the filename doesn't exist,
it returns the document_root + filename.
'''
if exists(filename):
return filename
return join(self.document_root, filename)
def preload(self, filename, encoding='utf-8', errors='strict'):
'''Preload a rst file to get its toctree and its title.
The result will be stored in :attr:`toctrees` with the ``filename`` as
key.
'''
with open(filename, 'rb') as fd:
text = fd.read().decode(encoding, errors)
# parse the source
document = utils.new_document('Document', self._settings)
self._parser.parse(text, document)
# fill the current document node
visitor = _ToctreeVisitor(document)
document.walkabout(visitor)
self.toctrees[filename] = visitor.toctree
return text
def _load_from_source(self):
filename = self.resolve_path(self.source)
self.text = self.preload(filename,
self.source_encoding,
self.source_error)
def _load_from_text(self, *largs):
try:
# clear the current widgets
self.content.clear_widgets()
self.anchors_widgets = []
self.refs_assoc = {}
# parse the source
document = utils.new_document('Document', self._settings)
text = self.text
if PY2 and type(text) is str:
text = text.decode('utf-8')
self._parser.parse(text, document)
# fill the current document node
visitor = _Visitor(self, document)
document.walkabout(visitor)
self.title = visitor.title or 'No title'
except:
Logger.exception('Rst: error while loading text')
def on_ref_press(self, node, ref):
self.goto(ref)
def goto(self, ref, *largs):
'''Scroll to the reference. If it's not found, nothing will be done.
For this text::
.. _myref:
This is something I always wanted.
You can do::
from kivy.clock import Clock
from functools import partial
doc = RstDocument(...)
Clock.schedule_once(partial(doc.goto, 'myref'), 0.1)
.. note::
It is preferable to delay the call of the goto if you just loaded
the document because the layout might not be finished or the
size of the RstDocument has not yet been determined. In
either case, the calculation of the scrolling would be
wrong.
You can, however, do a direct call if the document is already
loaded.
.. versionadded:: 1.3.0
'''
# check if it's a file ?
if ref.endswith('.rst'):
# whether it's a valid or invalid file, let source deal with it
self.source = ref
return
# get the association
ref = self.refs_assoc.get(ref, ref)
# search into all the nodes containing anchors
ax = ay = None
for node in self.anchors_widgets:
if ref in node.anchors:
ax, ay = node.anchors[ref]
break
# not found, stop here
if ax is None:
return
# found, calculate the real coordinate
# get the anchor coordinate inside widget space
ax += node.x
ay = node.top - ay
# ay += node.y
# what's the current coordinate for us?
sx, sy = self.scatter.x, self.scatter.top
# ax, ay = self.scatter.to_parent(ax, ay)
ay -= self.height
dx, dy = self.convert_distance_to_scroll(0, ay)
dy = max(0, min(1, dy))
Animation(scroll_y=dy, d=.25, t='in_out_expo').start(self)
def add_anchors(self, node):
self.anchors_widgets.append(node)
class RstTitle(Label):
section = NumericProperty(0)
document = ObjectProperty(None)
class RstParagraph(Label):
mx = NumericProperty(10)
my = NumericProperty(10)
document = ObjectProperty(None)
class RstTerm(AnchorLayout):
text = StringProperty('')
document = ObjectProperty(None)
class RstBlockQuote(GridLayout):
content = ObjectProperty(None)
class RstLiteralBlock(GridLayout):
content = ObjectProperty(None)
class RstList(GridLayout):
pass
class RstListItem(GridLayout):
content = ObjectProperty(None)
class RstListBullet(Label):
document = ObjectProperty(None)
class RstSystemMessage(GridLayout):
pass
class RstWarning(GridLayout):
content = ObjectProperty(None)
class RstNote(GridLayout):
content = ObjectProperty(None)
class RstImage(Image):
pass
class RstAsyncImage(AsyncImage):
pass
class RstDefinitionList(GridLayout):
document = ObjectProperty(None)
class RstDefinition(GridLayout):
document = ObjectProperty(None)
class RstFieldList(GridLayout):
pass
class RstFieldName(Label):
document = ObjectProperty(None)
class RstFieldBody(GridLayout):
pass
class RstGridLayout(GridLayout):
pass
class RstTable(GridLayout):
pass
class RstEntry(GridLayout):
pass
class RstTransition(Widget):
pass
class RstEmptySpace(Widget):
pass
class RstDefinitionSpace(Widget):
document = ObjectProperty(None)
class _ToctreeVisitor(nodes.NodeVisitor):
def __init__(self, *largs):
self.toctree = self.current = []
self.queue = []
self.text = ''
nodes.NodeVisitor.__init__(self, *largs)
def push(self, tree):
self.queue.append(tree)
self.current = tree
def pop(self):
self.current = self.queue.pop()
def dispatch_visit(self, node):
cls = node.__class__
if cls is nodes.section:
section = {
'ids': node['ids'],
'names': node['names'],
'title': '',
'children': []}
if isinstance(self.current, dict):
self.current['children'].append(section)
else:
self.current.append(section)
self.push(section)
elif cls is nodes.title:
self.text = ''
elif cls is nodes.Text:
self.text += node
def dispatch_departure(self, node):
cls = node.__class__
if cls is nodes.section:
self.pop()
elif cls is nodes.title:
self.current['title'] = self.text
class _Visitor(nodes.NodeVisitor):
def __init__(self, root, *largs):
self.root = root
self.title = None
self.current_list = []
self.current = None
self.idx_list = None
self.text = ''
self.text_have_anchor = False
self.section = 0
self.do_strip_text = False
self.substitution = {}
nodes.NodeVisitor.__init__(self, *largs)
def push(self, widget):
self.current_list.append(self.current)
self.current = widget
def pop(self):
self.current = self.current_list.pop()
def dispatch_visit(self, node):
cls = node.__class__
if cls is nodes.document:
self.push(self.root.content)
elif cls is nodes.comment:
return
elif cls is nodes.section:
self.section += 1
elif cls is nodes.substitution_definition:
name = node.attributes['names'][0]
self.substitution[name] = node.children[0]
elif cls is nodes.substitution_reference:
node = self.substitution[node.attributes['refname']]
self.text += node
elif cls is nodes.title:
label = RstTitle(section=self.section, document=self.root)
self.current.add_widget(label)
self.push(label)
# assert(self.text == '')
elif cls is nodes.Text:
# check if parent isn't a special directive
if hasattr(node, 'parent'):
if node.parent.tagname == 'substitution_definition':
# .. |ref| replace:: something
return
elif node.parent.tagname == 'substitution_reference':
# |ref|
return
elif node.parent.tagname == 'comment':
# .. COMMENT
return
if self.do_strip_text:
node = node.replace('\n', ' ')
node = node.replace(' ', ' ')
node = node.replace('\t', ' ')
node = node.replace(' ', ' ')
if node.startswith(' '):
node = ' ' + node.lstrip(' ')
if node.endswith(' '):
node = node.rstrip(' ') + ' '
if self.text.endswith(' ') and node.startswith(' '):
node = node[1:]
self.text += node
elif cls is nodes.paragraph:
self.do_strip_text = True
label = RstParagraph(document=self.root)
if isinstance(self.current, RstEntry):
label.mx = 10
self.current.add_widget(label)
self.push(label)
elif cls is nodes.literal_block:
box = RstLiteralBlock()
self.current.add_widget(box)
self.push(box)
elif cls is nodes.emphasis:
self.text += '[i]'
elif cls is nodes.strong:
self.text += '[b]'
elif cls is nodes.literal:
self.text += '[font=fonts/RobotoMono-Regular.ttf]'
elif cls is nodes.block_quote:
box = RstBlockQuote()
self.current.add_widget(box)
self.push(box.content)
assert(self.text == '')
elif cls is nodes.enumerated_list:
box = RstList()
self.current.add_widget(box)
self.push(box)
self.idx_list = 0
elif cls is nodes.bullet_list:
box = RstList()
self.current.add_widget(box)
self.push(box)
self.idx_list = None
elif cls is nodes.list_item:
bullet = '-'
if self.idx_list is not None:
self.idx_list += 1
bullet = '%d.' % self.idx_list
bullet = self.colorize(bullet, 'bullet')
item = RstListItem()
self.current.add_widget(RstListBullet(
text=bullet, document=self.root))
self.current.add_widget(item)
self.push(item)
elif cls is nodes.system_message:
label = RstSystemMessage()
if self.root.show_errors:
self.current.add_widget(label)
self.push(label)
elif cls is nodes.warning:
label = RstWarning()
self.current.add_widget(label)
self.push(label.content)
assert(self.text == '')
elif cls is nodes.note:
label = RstNote()
self.current.add_widget(label)
self.push(label.content)
assert(self.text == '')
elif cls is nodes.image:
# docutils parser breaks path with spaces
# e.g. "C:/my path" -> "C:/mypath"
uri = node['uri']
align = node.get('align', 'center')
image_size = [
node.get('width'),
node.get('height')
]
# use user's size if defined
def set_size(img, size):
img.size = [
size[0] or img.width,
size[1] or img.height
]
if uri.startswith('/') and self.root.document_root:
uri = join(self.root.document_root, uri[1:])
if uri.startswith('http://') or uri.startswith('https://'):
image = RstAsyncImage(source=uri)
image.bind(on_load=lambda *a: set_size(image, image_size))
else:
image = RstImage(source=uri)
set_size(image, image_size)
root = AnchorLayout(
size_hint_y=None,
anchor_x=align,
height=image.height
)
image.bind(height=root.setter('height'))
root.add_widget(image)
self.current.add_widget(root)
elif cls is nodes.definition_list:
lst = RstDefinitionList(document=self.root)
self.current.add_widget(lst)
self.push(lst)
elif cls is nodes.term:
assert(isinstance(self.current, RstDefinitionList))
term = RstTerm(document=self.root)
self.current.add_widget(term)
self.push(term)
elif cls is nodes.definition:
assert(isinstance(self.current, RstDefinitionList))
definition = RstDefinition(document=self.root)
definition.add_widget(RstDefinitionSpace(document=self.root))
self.current.add_widget(definition)
self.push(definition)
elif cls is nodes.field_list:
fieldlist = RstFieldList()
self.current.add_widget(fieldlist)
self.push(fieldlist)
elif cls is nodes.field_name:
name = RstFieldName(document=self.root)
self.current.add_widget(name)
self.push(name)
elif cls is nodes.field_body:
body = RstFieldBody()
self.current.add_widget(body)
self.push(body)
elif cls is nodes.table:
table = RstTable(cols=0)
self.current.add_widget(table)
self.push(table)
elif cls is nodes.colspec:
self.current.cols += 1
elif cls is nodes.entry:
entry = RstEntry()
self.current.add_widget(entry)
self.push(entry)
elif cls is nodes.transition:
self.current.add_widget(RstTransition())
elif cls is nodes.reference:
name = node.get('name', node.get('refuri'))
self.text += '[ref=%s][color=%s]' % (
name, self.root.colors.get(
'link', self.root.colors.get('paragraph')))
if 'refname' in node and 'name' in node:
self.root.refs_assoc[node['name']] = node['refname']
elif cls is nodes.target:
name = None
if 'ids' in node:
name = node['ids'][0]
elif 'names' in node:
name = node['names'][0]
self.text += '[anchor=%s]' % name
self.text_have_anchor = True
elif cls is role_doc:
self.doc_index = len(self.text)
elif cls is role_video:
pass
def dispatch_departure(self, node):
cls = node.__class__
if cls is nodes.document:
self.pop()
elif cls is nodes.section:
self.section -= 1
elif cls is nodes.title:
assert(isinstance(self.current, RstTitle))
if not self.title:
self.title = self.text
self.set_text(self.current, 'title')
self.pop()
elif cls is nodes.Text:
pass
elif cls is nodes.paragraph:
self.do_strip_text = False
assert(isinstance(self.current, RstParagraph))
self.set_text(self.current, 'paragraph')
self.pop()
elif cls is nodes.literal_block:
assert(isinstance(self.current, RstLiteralBlock))
self.set_text(self.current.content, 'literal_block')
self.pop()
elif cls is nodes.emphasis:
self.text += '[/i]'
elif cls is nodes.strong:
self.text += '[/b]'
elif cls is nodes.literal:
self.text += '[/font]'
elif cls is nodes.block_quote:
self.pop()
elif cls is nodes.enumerated_list:
self.idx_list = None
self.pop()
elif cls is nodes.bullet_list:
self.pop()
elif cls is nodes.list_item:
self.pop()
elif cls is nodes.system_message:
self.pop()
elif cls is nodes.warning:
self.pop()
elif cls is nodes.note:
self.pop()
elif cls is nodes.definition_list:
self.pop()
elif cls is nodes.term:
assert(isinstance(self.current, RstTerm))
self.set_text(self.current, 'term')
self.pop()
elif cls is nodes.definition:
self.pop()
elif cls is nodes.field_list:
self.pop()
elif cls is nodes.field_name:
assert(isinstance(self.current, RstFieldName))
self.set_text(self.current, 'field_name')
self.pop()
elif cls is nodes.field_body:
self.pop()
elif cls is nodes.table:
self.pop()
elif cls is nodes.colspec:
pass
elif cls is nodes.entry:
self.pop()
elif cls is nodes.reference:
self.text += '[/color][/ref]'
elif cls is role_doc:
docname = self.text[self.doc_index:]
rst_docname = docname
if rst_docname.endswith('.rst'):
docname = docname[:-4]
else:
rst_docname += '.rst'
# try to preload it
filename = self.root.resolve_path(rst_docname)
self.root.preload(filename)
# if exist, use the title of the first section found in the
# document
title = docname
if filename in self.root.toctrees:
toctree = self.root.toctrees[filename]
if len(toctree):
title = toctree[0]['title']
# replace the text with a good reference
text = '[ref=%s]%s[/ref]' % (
rst_docname,
self.colorize(title, 'link'))
self.text = self.text[:self.doc_index] + text
elif cls is role_video:
width = node['width'] if 'width' in node.attlist() else 400
height = node['height'] if 'height' in node.attlist() else 300
uri = node['source']
if uri.startswith('/') and self.root.document_root:
uri = join(self.root.document_root, uri[1:])
video = RstVideoPlayer(
source=uri,
size_hint=(None, None),
size=(width, height))
anchor = AnchorLayout(size_hint_y=None, height=height + 20)
anchor.add_widget(video)
self.current.add_widget(anchor)
def set_text(self, node, parent):
text = self.text
if parent == 'term' or parent == 'field_name':
text = '[b]%s[/b]' % text
# search anchors
node.text = self.colorize(text, parent)
node.bind(on_ref_press=self.root.on_ref_press)
if self.text_have_anchor:
self.root.add_anchors(node)
self.text = ''
self.text_have_anchor = False
def colorize(self, text, name):
return '[color=%s]%s[/color]' % (
self.root.colors.get(name, self.root.colors['paragraph']),
text)
if __name__ == '__main__':
from kivy.base import runTouchApp
import sys
runTouchApp(RstDocument(source=sys.argv[1]))
|
mit
|
suto/infernal-twin
|
build/reportlab/src/reportlab/pdfbase/pdfpattern.py
|
34
|
3771
|
__doc__="""helper for importing pdf structures into a ReportLab generated document
"""
from reportlab.pdfbase.pdfdoc import format, PDFObject, pdfdocEnc
from reportlab.lib.utils import strTypes
def _patternSequenceCheck(pattern_sequence):
allowedTypes = strTypes if isinstance(strTypes, tuple) else (strTypes,)
allowedTypes = allowedTypes + (PDFObject,PDFPatternIf)
for x in pattern_sequence:
if not isinstance(x,allowedTypes):
if len(x)!=1:
raise ValueError("sequence elts must be strings/bytes/PDFPatternIfs or singletons containing strings: "+ascii(x))
if not isinstance(x[0],strTypes):
raise ValueError("Singletons must contain strings/bytes or PDFObject instances only: "+ascii(x[0]))
class PDFPattern(PDFObject):
__RefOnly__ = 1
def __init__(self, pattern_sequence, **keywordargs):
"""
Description of a kind of PDF object using a pattern.
Pattern sequence should contain strings, singletons of form [string] or
PDFPatternIf objects.
Strings are literal strings to be used in the object.
Singletons are names of keyword arguments to include.
PDFpatternIf objects allow some conditionality.
Keyword arguments can be non-instances which are substituted directly in string conversion,
or they can be object instances in which case they should be pdfdoc.* style
objects with a x.format(doc) method.
Keyword arguments may be set on initialization or subsequently using __setitem__, before format.
"constant object" instances can also be inserted in the patterns.
"""
_patternSequenceCheck(pattern_sequence)
self.pattern = pattern_sequence
self.arguments = keywordargs
def __setitem__(self, item, value):
self.arguments[item] = value
def __getitem__(self, item):
return self.arguments[item]
def eval(self,L):
arguments = self.arguments
document = self.__document
for x in L:
if isinstance(x,strTypes):
yield pdfdocEnc(x)
elif isinstance(x,PDFObject):
yield x.format(document)
elif isinstance(x,PDFPatternIf):
result = list(self.eval(x.cond))
cond = result and result[0]
for z in self.eval(x.thenPart if cond else x.elsePart):
yield z
else:
name = x[0]
value = arguments.get(name, None)
if value is None:
raise ValueError("%s value not defined" % ascii(name))
if isinstance(value,PDFObject):
yield format(value,document)
elif isinstance(value,strTypes):
yield pdfdocEnc(value)
else:
yield pdfdocEnc(str(value))
def format(self, document):
self.__document = document
try:
return b"".join(self.eval(self.pattern))
finally:
del self.__document
def clone(self):
c = object.__new__(self.__class__)
c.pattern = self.pattern
c.arguments = self.arguments
return c
class PDFPatternIf(object):
'''cond will be evaluated as [cond] in PDFpattern eval.
It should evaluate to a list with value 0/1 etc etc.
thenPart is a list to be evaluated if the cond evaulates true,
elsePart is the false sequence.
'''
def __init__(self,cond,thenPart=[],elsePart=[]):
if not isinstance(cond,list): cond = [cond]
for x in cond, thenPart, elsePart:
_patternSequenceCheck(x)
self.cond = cond
self.thenPart = thenPart
self.elsePart = elsePart
|
gpl-3.0
|
andreoliw/clitoolkit
|
clit/db.py
|
1
|
5596
|
"""Database module."""
import argparse
from pathlib import Path
from subprocess import PIPE
from typing import List, Optional
from clit.docker import DockerContainer
from clit.files import existing_directory_type, existing_file_type, shell
POSTGRES_DOCKER_CONTAINER_NAME = "postgres10"
class DatabaseServer:
"""A database server URI parser."""
uri: str
protocol: str
user: Optional[str]
password: Optional[str]
server: str
port: Optional[int]
def __init__(self, uri):
"""Parser the server URI and extract needed parts."""
self.uri = uri
protocol_user_password, server_port = uri.split("@")
self.protocol, user_password = protocol_user_password.split("://")
if ":" in user_password:
self.user, self.password = user_password.split(":")
else:
self.user, self.password = None, None
if ":" in server_port:
self.server, self.port = server_port.split(":")
self.port = int(self.port)
else:
self.server, self.port = server_port, None
@property
def uri_without_port(self):
"""Return the URI without the port."""
parts = self.uri.split(":")
if len(parts) != 4:
# Return the unmodified URI if we don't have port.
return self.uri
return ":".join(parts[:-1])
class PostgreSQLServer(DatabaseServer):
"""A PostgreSQL database server URI parser and more stuff."""
databases: List[str] = []
inside_docker = False
psql: str = ""
pg_dump: str = ""
def __init__(self, *args, **kwargs):
"""Determine which psql executable exists on this machine."""
super().__init__(*args, **kwargs)
self.psql = shell("which psql", quiet=True, return_lines=True)[0]
if not self.psql:
self.psql = "psql_docker"
self.inside_docker = True
self.pg_dump = shell("which pg_dump", quiet=True, return_lines=True)[0]
if not self.pg_dump:
self.pg_dump = "pg_dump_docker"
self.inside_docker = True
@property
def docker_uri(self):
"""Return a URI without port if we are inside Docker."""
return self.uri_without_port if self.inside_docker else self.uri
def list_databases(self) -> "PostgreSQLServer":
"""List databases."""
process = shell(
f"{self.psql} -c 'SELECT datname FROM pg_database WHERE datistemplate = false' "
f"--tuples-only {self.docker_uri}",
quiet=True,
stdout=PIPE,
)
if process.returncode:
print(f"Error while listing databases.\nstdout={process.stdout}\nstderr={process.stderr}")
exit(10)
self.databases = sorted(db.strip() for db in process.stdout.strip().split())
return self
def backup(parser, args):
"""Backup PostgreSQL databases."""
pg = PostgreSQLServer(args.server_uri).list_databases()
container = DockerContainer(POSTGRES_DOCKER_CONTAINER_NAME)
for database in pg.databases:
sql_file: Path = Path(args.backup_dir) / f"{pg.protocol}_{pg.server}_{pg.port}" / f"{database}.sql"
sql_file.parent.mkdir(parents=True, exist_ok=True)
if pg.inside_docker:
sql_file = container.replace_mount_dir(sql_file)
shell(f"{pg.pg_dump} --clean --create --if-exists --file={sql_file} {pg.docker_uri}/{database}")
def restore(parser, args):
"""Restore PostgreSQL databases."""
pg = PostgreSQLServer(args.server_uri).list_databases()
new_database = args.database_name or args.sql_file.stem
if new_database in pg.databases:
print(f"The database {new_database!r} already exists in the server. Provide a new database name.")
exit(1)
if new_database != args.sql_file.stem:
# TODO Optional argument --owner to set the database owner
print(f"TODO: Create a user named {new_database!r} if it doesn't exist (or raise an error)")
print(f"TODO: Parse the .sql file and replace DATABASE/OWNER {args.sql_file.stem!r} by {new_database!r}")
exit(2)
shell(f"{pg.psql} {args.server_uri} < {args.sql_file}")
# TODO: Convert to click
def xpostgres():
"""Extra PostgreSQL tools like backup, restore, user creation, etc."""
parser = argparse.ArgumentParser(description="PostgreSQL helper tools")
parser.add_argument("server_uri", help="database server URI (postgresql://user:password@server:port)")
parser.set_defaults(chosen_function=None)
subparsers = parser.add_subparsers(title="commands")
parser_backup = subparsers.add_parser("backup", help="backup a PostgreSQL database to a SQL file")
parser_backup.add_argument("backup_dir", type=existing_directory_type, help="directory to store the backups")
parser_backup.set_defaults(chosen_function=backup)
parser_restore = subparsers.add_parser("restore", help="restore a PostgreSQL database from a SQL file")
parser_restore.add_argument(
"sql_file", type=existing_file_type, help="full path of the .sql file created by the 'backup' command"
)
parser_restore.add_argument("database_name", nargs="?", help="database name (default: basename of .sql file)")
parser_restore.set_defaults(chosen_function=restore)
# TODO Subcommand create-user new-user-name or alias user new-user-name to create a new user
# TODO xpostgres user myuser [mypass]
args = parser.parse_args()
if not args.chosen_function:
parser.print_help()
return
args.chosen_function(parser, args)
return
|
bsd-3-clause
|
mihailignatenko/erp
|
addons/mrp/product.py
|
131
|
4440
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class product_template(osv.osv):
_inherit = "product.template"
def _bom_orders_count(self, cr, uid, ids, field_name, arg, context=None):
Bom = self.pool('mrp.bom')
res = {}
for product_tmpl_id in ids:
nb = Bom.search_count(cr, uid, [('product_tmpl_id', '=', product_tmpl_id)], context=context)
res[product_tmpl_id] = {
'bom_count': nb,
}
return res
def _bom_orders_count_mo(self, cr, uid, ids, name, arg, context=None):
res = {}
for product_tmpl_id in self.browse(cr, uid, ids):
res[product_tmpl_id.id] = sum([p.mo_count for p in product_tmpl_id.product_variant_ids])
return res
_columns = {
'bom_ids': fields.one2many('mrp.bom', 'product_tmpl_id','Bill of Materials'),
'bom_count': fields.function(_bom_orders_count, string='# Bill of Material', type='integer', multi="_bom_order_count"),
'mo_count': fields.function(_bom_orders_count_mo, string='# Manufacturing Orders', type='integer'),
'produce_delay': fields.float('Manufacturing Lead Time', help="Average delay in days to produce this product. In the case of multi-level BOM, the manufacturing lead times of the components will be added."),
'track_production': fields.boolean('Track Manufacturing Lots', help="Forces to specify a Serial Number for all moves containing this product and generated by a Manufacturing Order"),
}
_defaults = {
'produce_delay': 1,
}
def action_view_mos(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'mrp.act_product_mrp_production', context=context)
if len(ids) == 1 and len(products) == 1:
result['context'] = "{'default_product_id': " + str(products[0]) + ", 'search_default_product_id': " + str(products[0]) + "}"
else:
result['domain'] = "[('product_id','in',[" + ','.join(map(str, products)) + "])]"
result['context'] = "{}"
return result
class product_product(osv.osv):
_inherit = "product.product"
def _bom_orders_count(self, cr, uid, ids, field_name, arg, context=None):
Production = self.pool('mrp.production')
res = {}
for product_id in ids:
res[product_id] = Production.search_count(cr,uid, [('product_id', '=', product_id)], context=context)
return res
_columns = {
'mo_count': fields.function(_bom_orders_count, string='# Manufacturing Orders', type='integer'),
}
def action_view_bom(self, cr, uid, ids, context=None):
tmpl_obj = self.pool.get("product.template")
products = set()
for product in self.browse(cr, uid, ids, context=context):
products.add(product.product_tmpl_id.id)
result = tmpl_obj._get_act_window_dict(cr, uid, 'mrp.product_open_bom', context=context)
# bom specific to this variant or global to template
domain = [
'|',
('product_id', 'in', ids),
'&',
('product_id', '=', False),
('product_tmpl_id', 'in', list(products)),
]
result['context'] = "{}"
result['domain'] = str(domain)
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
nwjs/chromium.src
|
remoting/host/installer/build-installer-archive.py
|
5
|
8906
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a zip archive for the Chrome Remote Desktop Host installer.
This script builds a zip file that contains all the files needed to build an
installer for Chrome Remote Desktop Host.
This zip archive is then used by the signing bots to:
(1) Sign the binaries
(2) Build the final installer
TODO(garykac) We should consider merging this with build-webapp.py.
"""
import os
import shutil
import subprocess
import sys
import zipfile
sys.path.append(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, os.pardir,
"build", "android", "gyp"))
from util import build_utils
def cleanDir(dir):
"""Deletes and recreates the dir to make sure it is clean.
Args:
dir: The directory to clean.
"""
try:
shutil.rmtree(dir)
except OSError:
if os.path.exists(dir):
raise
else:
pass
os.makedirs(dir, 0775)
def buildDefDictionary(definitions):
"""Builds the definition dictionary from the VARIABLE=value array.
Args:
defs: Array of variable definitions: 'VARIABLE=value'.
Returns:
Dictionary with the definitions.
"""
defs = {}
for d in definitions:
(key, val) = d.split('=')
defs[key] = val
return defs
def remapSrcFile(dst_root, src_roots, src_file):
"""Calculates destination file path and creates directory.
Any matching |src_roots| prefix is stripped from |src_file| before
appending to |dst_root|.
For example, given:
dst_root = '/output'
src_roots = ['host/installer/mac']
src_file = 'host/installer/mac/Scripts/keystone_install.sh'
The final calculated path is:
'/output/Scripts/keystone_install.sh'
The |src_file| must match one of the |src_roots| prefixes. If there are no
matches, then an error is reported.
If multiple |src_roots| match, then only the first match is applied. Because
of this, if you have roots that share a common prefix, the longest string
should be first in this array.
Args:
dst_root: Target directory where files are copied.
src_roots: Array of path prefixes which will be stripped of |src_file|
(if they match) before appending it to the |dst_root|.
src_file: Source file to be copied.
Returns:
Full path to destination file in |dst_root|.
"""
# Strip of directory prefix.
found_root = False
for root in src_roots:
root = os.path.normpath(root)
src_file = os.path.normpath(src_file)
if os.path.commonprefix([root, src_file]) == root:
src_file = os.path.relpath(src_file, root)
found_root = True
break
if not found_root:
error('Unable to match prefix for %s' % src_file)
dst_file = os.path.join(dst_root, src_file)
# Make sure target directory exists.
dst_dir = os.path.dirname(dst_file)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir, 0775)
return dst_file
def copyFileWithDefs(src_file, dst_file, defs):
"""Copies from src_file to dst_file, performing variable substitution.
Any @@VARIABLE@@ in the source is replaced with the value of VARIABLE
in the |defs| dictionary when written to the destination file.
Args:
src_file: Full or relative path to source file to copy.
dst_file: Relative path (and filename) where src_file should be copied.
defs: Dictionary of variable definitions.
"""
data = open(src_file, 'r').read()
for key, val in defs.iteritems():
try:
data = data.replace('@@' + key + '@@', val)
except TypeError:
print repr(key), repr(val)
open(dst_file, 'w').write(data)
shutil.copystat(src_file, dst_file)
def copyZipIntoArchive(out_dir, files_root, zip_file):
"""Expands the zip_file into the out_dir, preserving the directory structure.
Args:
out_dir: Target directory where unzipped files are copied.
files_root: Path prefix which is stripped of zip_file before appending
it to the out_dir.
zip_file: Relative path (and filename) to the zip file.
"""
base_zip_name = os.path.basename(zip_file)
# We don't use the 'zipfile' module here because it doesn't restore all the
# file permissions correctly. We use the 'unzip' command manually.
old_dir = os.getcwd();
os.chdir(os.path.dirname(zip_file))
subprocess.call(['unzip', '-qq', '-o', base_zip_name])
os.chdir(old_dir)
# Unzip into correct dir in out_dir.
out_zip_path = remapSrcFile(out_dir, files_root, zip_file)
out_zip_dir = os.path.dirname(out_zip_path)
(src_dir, ignore1) = os.path.splitext(zip_file)
(base_dir_name, ignore2) = os.path.splitext(base_zip_name)
shutil.copytree(src_dir, os.path.join(out_zip_dir, base_dir_name))
def buildHostArchive(temp_dir, zip_path, source_file_roots, source_files,
gen_files, gen_files_dst, defs):
"""Builds a zip archive with the files needed to build the installer.
Args:
temp_dir: Temporary dir used to build up the contents for the archive.
zip_path: Full path to the zip file to create.
source_file_roots: Array of path prefixes to strip off |files| when adding
to the archive.
source_files: The array of files to add to archive. The path structure is
preserved (except for the |files_root| prefix).
gen_files: Full path to binaries to add to archive.
gen_files_dst: Relative path of where to add binary files in archive.
This array needs to parallel |binaries_src|.
defs: Dictionary of variable definitions.
"""
cleanDir(temp_dir)
for f in source_files:
dst_file = remapSrcFile(temp_dir, source_file_roots, f)
base_file = os.path.basename(f)
(base, ext) = os.path.splitext(f)
if ext == '.zip':
copyZipIntoArchive(temp_dir, source_file_roots, f)
elif ext in ['.packproj', '.pkgproj', '.plist', '.props', '.sh', '.json']:
copyFileWithDefs(f, dst_file, defs)
else:
shutil.copy2(f, dst_file)
for bs, bd in zip(gen_files, gen_files_dst):
dst_file = os.path.join(temp_dir, bd)
if not os.path.exists(os.path.dirname(dst_file)):
os.makedirs(os.path.dirname(dst_file))
if os.path.isdir(bs):
shutil.copytree(bs, dst_file)
else:
shutil.copy2(bs, dst_file)
build_utils.ZipDir(
zip_path, temp_dir,
compress_fn=lambda _: zipfile.ZIP_DEFLATED,
zip_prefix_path=os.path.splitext(os.path.basename(zip_path))[0])
def error(msg):
sys.stderr.write('ERROR: %s\n' % msg)
sys.exit(1)
def usage():
"""Display basic usage information."""
print ('Usage: %s\n'
' <temp-dir> <zip-path>\n'
' --source-file-roots <list of roots to strip off source files...>\n'
' --source-files <list of source files...>\n'
' --generated-files <list of generated target files...>\n'
' --generated-files-dst <dst for each generated file...>\n'
' --defs <list of VARIABLE=value definitions...>'
) % sys.argv[0]
def main():
if len(sys.argv) < 2:
usage()
error('Too few arguments')
temp_dir = sys.argv[1]
zip_path = sys.argv[2]
arg_mode = ''
source_file_roots = []
source_files = []
generated_files = []
generated_files_dst = []
definitions = []
for arg in sys.argv[3:]:
if arg == '--source-file-roots':
arg_mode = 'src-roots'
elif arg == '--source-files':
arg_mode = 'files'
elif arg == '--generated-files':
arg_mode = 'gen-src'
elif arg == '--generated-files-dst':
arg_mode = 'gen-dst'
elif arg == '--defs':
arg_mode = 'defs'
elif arg_mode == 'src-roots':
source_file_roots.append(arg)
elif arg_mode == 'files':
source_files.append(arg)
elif arg_mode == 'gen-src':
generated_files.append(arg)
elif arg_mode == 'gen-dst':
generated_files_dst.append(arg)
elif arg_mode == 'defs':
definitions.append(arg)
else:
usage()
error('Expected --source-files')
# Make sure at least one file was specified.
if len(source_files) == 0 and len(generated_files) == 0:
error('At least one input file must be specified.')
# Sort roots to ensure the longest one is first. See comment in remapSrcFile
# for why this is necessary.
source_file_roots = map(os.path.normpath, source_file_roots)
source_file_roots.sort(key=len, reverse=True)
# Verify that the 2 generated_files arrays have the same number of elements.
if len(generated_files) != len(generated_files_dst):
error('len(--generated-files) != len(--generated-files-dst)')
defs = buildDefDictionary(definitions)
result = buildHostArchive(temp_dir, zip_path, source_file_roots,
source_files, generated_files, generated_files_dst,
defs)
return 0
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
dusenberrymw/Pine-Data-Tools
|
lib/preprocess.py
|
1
|
2543
|
#! /usr/bin/env python3
"""
Preprocess csv data, line by line
-Reads from csv input_file, and writes to new csv output_file
Usage:
proprocess.py <input_file.csv> <output_file.csv> [<headers_present = 0>]
[<skip_missing_or_incorrect_data = 1>] [<default_value = 0>]
"""
import csv
import sys
def process_csv(input_file, output_file, headers_present=0,
skip_missing_or_incorrect_data=1, default_value=0):
pos_values = ['y','yes','positive','abnormal','male','m','t']
neg_values = ['n','no','negative','normal','female','f','none']
with open(input_file) as i, open(output_file, 'w') as o:
reader = csv.reader(i)
writer = csv.writer(o)
if headers_present:
new_headers = process_headers(next(reader))
writer.writerow(new_headers)
for line in reader:
processed_line = process_line(line, pos_values, neg_values,
skip_missing_or_incorrect_data,
default_value)
if processed_line: # if list not empty
writer.writerow(processed_line)
def process_headers(headers):
new_headers = []
for item in headers:
item = (item.strip().replace(" ", "_").replace("|", "_")
.replace(":", "_").replace("?", ""))
new_headers.append(item)
return new_headers
def process_line(line, pos_values, neg_values, skip_missing_or_incorrect_data=1,
default_value=0):
processed_line = []
for item in line:
item = item.strip().lower()
if item in pos_values:
item = 1
elif item in neg_values:
item = 0
else:
# this line is missing a data point
if skip_missing_or_incorrect_data:
# skip line
processed_line = []
break # return empty list
else:
item = default_value
processed_line.append(item)
return processed_line
if __name__ == '__main__':
input_file = sys.argv[1]
output_file = sys.argv[2]
headers_present = 0
skip_missing_or_incorrect_data = 1
default_value = 0
try:
headers_present = int(sys.argv[3])
skip_missing_or_incorrect_data = int(sys.argv[4])
default_value = int(sys.argv[5])
except IndexError:
pass
process_csv(input_file, output_file, headers_present,
skip_missing_or_incorrect_data, default_value)
|
mit
|
eastlhu/zulip
|
zerver/lib/response.py
|
124
|
1316
|
from __future__ import absolute_import
from django.http import HttpResponse, HttpResponseNotAllowed
import ujson
class HttpResponseUnauthorized(HttpResponse):
status_code = 401
def __init__(self, realm):
HttpResponse.__init__(self)
self["WWW-Authenticate"] = 'Basic realm="%s"' % (realm,)
def json_unauthorized(message):
resp = HttpResponseUnauthorized("zulip")
resp.content = ujson.dumps({"result": "error",
"msg": message}) + "\n"
return resp
def json_method_not_allowed(methods):
resp = HttpResponseNotAllowed(methods)
resp.content = ujson.dumps({"result": "error",
"msg": "Method Not Allowed",
"allowed_methods": methods})
return resp
def json_response(res_type="success", msg="", data={}, status=200):
content = {"result": res_type, "msg": msg}
content.update(data)
return HttpResponse(content=ujson.dumps(content) + "\n",
content_type='application/json', status=status)
def json_success(data={}):
return json_response(data=data)
def json_error(msg, data={}, status=400):
return json_response(res_type="error", msg=msg, data=data, status=status)
def json_unhandled_exception():
return json_response(res_type="error", msg="Internal server error", status=500)
|
apache-2.0
|
ChenglongChen/caffe-windows
|
examples/kaggle-bowl/caffe_windows/utils/compute_logloss.py
|
1
|
1105
|
#!/usr/bin/env python
"""
@file compute_logloss.py
@brief compute logloss
@author ChenglongChen
"""
import sys
import numpy as np
import pandas as pd
def softmax(score):
num = score.shape[0]
maxes = np.amax(score, axis=1).reshape((num, 1))
e = np.exp(score - maxes)
prob = e / np.sum(e, axis=1).reshape((num, 1))
return prob
def computeLogloss(prob, label, eps=1e-15):
# clip
prob = np.clip(prob, eps, 1 - eps)
# normalization
prob /= prob.sum(axis=1)[:,np.newaxis]
p = prob[np.arange(len(label)),label.astype(int)]
loss = -np.mean(np.log(p))
return loss
def main():
list_file = sys.argv[1]
prob_file = sys.argv[2]
list_in = np.loadtxt(list_file, dtype=str)
true_label = np.asarray(list_in[:,1], dtype="int")
prob = pd.read_csv(prob_file, index_col=0).values
if len(sys.argv) == 4 and sys.argv[3] == "raw":
prob = softmax(prob)
#prob = 1e-5
#prob = sp.maximum(sp.minimum(prob, 1.0-eps), eps)
logloss = computeLogloss(prob, true_label)
print( logloss )
if __name__ == "__main__":
main()
|
bsd-2-clause
|
KarrLab/Karr-Lab-build-utils
|
karr_lab_build_utils/templates/_package_/__main__.py
|
1
|
2009
|
""" {{ name }} command line interface
:Author: Name <email>
:Date: {{ date }}
:Copyright: {{ year }}, Karr Lab
:License: MIT
"""
import cement
import {{name}}
import {{name}}.core
class BaseController(cement.Controller):
""" Base controller for command line application """
class Meta:
label = 'base'
description = "{{ name }}"
arguments = [
(['-v', '--version'], dict(action='version', version={{ name }}.__version__)),
]
@cement.ex(help='command_1 description')
def cmd1(self):
""" command_1 description """
print('command_1 output')
@cement.ex(help='command_2 description')
def cmd2(self):
""" command_2 description """
print('command_2 output')
@cement.ex(hide=True)
def _default(self):
self._parser.print_help()
class Command3WithArgumentsController(cement.Controller):
""" Command3 description """
class Meta:
label = 'command-3'
description = 'Command3 description'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['arg_1'], dict(
type=str, help='Description of arg_1')),
(['arg_2'], dict(
type=str, help='Description of arg_2')),
(['--opt-arg-3'], dict(
type=str, default='default value of opt-arg-1', help='Description of opt-arg-3')),
(['--opt-arg-4'], dict(
type=float, default=float('nan'), help='Description of opt-arg-4')),
]
@cement.ex(hide=True)
def _default(self):
args = self.app.pargs
args.arg_1
args.arg_2
args.opt_arg_3
args.opt_arg_4
class App(cement.App):
""" Command line application """
class Meta:
label = '{{ name }}'
base_controller = 'base'
handlers = [
BaseController,
Command3WithArgumentsController,
]
def main():
with App() as app:
app.run()
|
mit
|
synmnstr/flexx
|
exp/reactive.py
|
22
|
9230
|
""" Attempt to implement a reactive system in Python. With that I mean
a system in which signals are bound implicitly, as in Shiny.
The Signal is the core element here. It is like the Property in
HasProps. A function can easily be turned into a Signal object by
decorating it.
Other than Properties, signals have a function associated with them.
that compute stuff (maybe rename signal to behavior). They also cache their
value.
"""
import re
import sys
import time
import inspect
class Signal:
#todo: or is this a "behavior"?
""" Wrap a function in a class to allow marking it dirty and caching
last result.
"""
def __init__(self, fun):
self._fun = fun
self._value = None
self._dirty = True
self._dependers = []
def __call__(self, *args):
if self._dirty:
self._value = self._fun(*args)
self._dirty = False
return self._value
def set_dirty(self):
self._dirty = True
for dep in self._dependers:
dep.set_dirty()
dep()
class Input(Signal):
""" A signal defined simply by a value that can be set. You can
consider this a source signal.
"""
def __init__(self):
Signal.__init__(self, lambda x=None:None)
def set(self, value):
self._value = value
self.set_dirty()
class Output(Signal):
pass # I don't think that I need this?
def check_deps(fun, locals, globals):
""" Analyse the source code of fun to get the signals that the reactive
function depends on. It then registers the function at these signals.
"""
# Get source of function and find uses of inputs
# todo: use AST parsing instead
s = inspect.getsource(fun._fun)
matches = re.findall(r'([a-zA-Z0-9_.]+?)\.get_signal\([\'\"](\w+?)[\'\"]\)', s)
fun._nmatches = 0
# print('found %i deps on %r' % (len(matches), fun))
# For each used input, try to retrieve the actual object
for match in matches:
ob = locals.get(match[0], globals.get(match[0], None))
if ob is None:
print('could not locate dependency %r' % match[0])
else:
ob._bind_signal(match[1], fun)
fun._nmatches += 1
# print('bound signal for', ob)
#dep = getattr(ob, 'input_'+match[1])
#print('found dep ', dep)
# Detect outputs
#matches = re.findall(r'([a-zA-Z0-9_.]+?)\.set_output\([\'\"](\w+?)[\'\"]\,', s)
# Detect calls
if fun._nmatches:
matches = re.findall(r'([a-zA-Z0-9_.]+?)\(', s)
for match in matches:
ob = locals.get(match[0], globals.get(match[0], None))
if isinstance(ob, Signal):
ob._dependers.append(fun)
# # For each used input, try to retrieve the actual object
# for match in matches:
# ob = locals.get(match[0], globals.get(match[0], None))
# if ob is None:
# print('could not locate dependency %r' % match[0])
# else:
# ob._bind_signal(match[1], fun2)
fun._deps_checked = len(matches)
def react(fun):
""" decorator
"""
# fun should be called, when any of its deps gets called
# can I get that info?
# Probably with sys.settrace(), but that is CPython specific.
# Evaluating source code via inspect?
# Evaluating the AST?
# -> We can detect "input.slider" or something, but can we detect
# what object its called on? What is "w"?
# Note: only works on Python implementations that have a stack
_frame = sys._getframe(1)
# todo: from trellis:
# if isinstance(rule, types.FunctionType): # only pick up name if a function
# if frame.f_locals.get(rule.__name__) is rule: # and locally-defined!
# name = name or rule.__name__
if not isinstance(fun, Signal):
fun = Signal(fun)
check_deps(fun, _frame.f_locals, _frame.f_globals)
if fun._nmatches:
return fun
else:
return fun._fun # return original, its probbaly a method that we shoukd try later
class Reactive:
""" Base class for classes that can have signals and reactive
methods.
"""
SIGNALS = []
def __init__(self):
self._signals = {}
self._downstream = {}
for name, val in self.SIGNALS:
self._signals[name] = val
for name in dir(self.__class__):
cls_ob = getattr(self.__class__, name)
if hasattr(cls_ob, '_deps_checked'):
fun = getattr(self, name)
# print('re-trying reactive')
react(fun)
def _emit_signal(self, name, value):
self._signals[name] = value
for f in self._downstream.get(name, ()):
f.set_dirty()
f()
def _bind_signal(self, name, fun):
funcs = self._downstream.setdefault(name, [])
if fun not in funcs:
funcs.append(fun)
def bind_signals(self, *args):
# Alternative: explicit binding
def bind(fun):
if not isinstance(fun, Signal):
fun = Signal(fun)
for name in names:
funcs = self._downstream.setdefault(name, [])
if fun not in funcs:
funcs.append(fun)
return fun
fun = None
names = []
for arg in args:
if callable(arg):
fun = arg
else:
names.append(arg)
print('binding ', names)
if fun is None:
return bind
else:
return bin(fun)
def get_signal(self, name):
# i = inspect.getframeinfo(inspect.currentframe())
if False:
s = inspect.stack()
caller = s[1]
print(caller[0].f_locals.keys())
print(caller[0].f_globals.keys())
id = caller[1], caller[2], caller[3]
# if 'self' in f_locals:
# fun = f_locals()
self.caller = caller
self.caller2 = sys._getframe(1)
fun = caller[0].f_globals[id[-1]]
print(id, fun)
self._bind_signal(name, fun)
return self._signals[name]
def set_output(self, name, value):
# def xx(fun):
# def yy():
# value = fun()
# f = getattr(self, 'on_' + name)
# f(value)
# fun()
# return yy
# return xx
f = getattr(self, 'on_' + name)
f(value)
print('-----')
class Widget(Reactive):
SIGNALS = [('slider1', 0), ('slider2', 0)]
def manual_slider1(self, v):
""" Simulate changing a slider value.
"""
# if this is called, outpus should be shown
# todo: also store latest value
self._emit_signal('slider1', v)
def manual_slider2(self, v):
self._emit_signal('slider2', v)
def on_show(self, val):
print('hooray!', val)
class Widget1(Widget):
@react
def bla1(self):
x = self.get_signal('slider1') * 3
self.set_output('show', x)
w = Widget1()
@react
def something_that_takes_long():
# when slider1 changes, it should invoke bla! (and other inputs/signals that depend on it
print('this may take a while')
time.sleep(1)
return w.get_signal('slider1') * 2
@react
def bla():
# to an output.
x = something_that_takes_long()
x += w.get_signal('slider2')
w.set_output('show', x - 1)
# todo: set_output, or return and connect somehow?
print('-----')
class Widget2(Widget):
def on_slider1_change(self): # but can only bind to single change
x = self.get_signal('slider1') * 3
self.set_output('show', x)
# # maybe this? but then slider1 of what?
# @binding('slider1', 'slider2')
# def xxx(self):
# pass
w2 = Widget2()
@w2.bind_signals('slider1')
def something_that_takes_long():
# when slider1 changes, it should invoke bla! (and other inputs/signals that depend on it
print('this may take a while')
time.sleep(1)
return w2.get_signal('slider1') * 2
#@some_widget.bind_signals('foo')
#@some_other_widget.bind_signals('bar')
@w2.bind_signals('slider2')
# todo: w2.bind_signals('slider2', something_that_takes_long)
def bla():
# to an output.
x = something_that_takes_long()
x += w2.get_signal('slider2')
w2.set_output('show', x - 1)
# todo: set_output, or return and connect somehow?
print('-----')
class Temp2(Reactive):
""" Simple example of object that has signals for temperature in both
Celcius and Fahrenheit. Changing either automatically changes the other.
"""
SIGNALS = [('C', 0), ('F', 32)]
t2 = Temp2()
self = t2
@t2.bind_signals('F')
def _c2f():
self._signals['C'] = (self.get_signal('F')-32) / 1.8
@t2.bind_signals('C')
def _f2c():
self._signals['F'] = self.get_signal('C') * 1.8 + 32
@t2.bind_signals('C', 'F')
def _show():
print('C:', self.get_signal('C'))
print('F:', self.get_signal('F'))
|
bsd-2-clause
|
adamjmcgrath/glancydesign
|
django/core/management/commands/makemessages.py
|
154
|
16507
|
import fnmatch
import glob
import os
import re
import sys
from itertools import dropwhile
from optparse import make_option
from subprocess import PIPE, Popen
from django.core.management.base import CommandError, NoArgsCommand
from django.utils.text import get_text_list
pythonize_re = re.compile(r'(?:^|\n)\s*//')
plural_forms_re = re.compile(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL)
def handle_extensions(extensions=('html',)):
"""
organizes multiple extensions that are separated with commas or passed by
using --extension/-e multiple times.
for example: running 'django-admin makemessages -e js,txt -e xhtml -a'
would result in a extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
['.html', '.js']
>>> handle_extensions(['.html, txt,.tpl'])
['.html', '.tpl', '.txt']
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ','').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
# we don't want *.py files here because of the way non-*.py files
# are handled in make_messages() (they are copied to file.ext.py files to
# trick xgettext to parse them as Python files)
return set([x for x in ext_list if x != '.py'])
def _popen(cmd):
"""
Friendly wrapper around Popen for Windows
"""
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt', universal_newlines=True)
return p.communicate()
def walk(root, topdown=True, onerror=None, followlinks=False):
"""
A version of os.walk that can follow symlinks for Python < 2.6
"""
for dirpath, dirnames, filenames in os.walk(root, topdown, onerror):
yield (dirpath, dirnames, filenames)
if followlinks:
for d in dirnames:
p = os.path.join(dirpath, d)
if os.path.islink(p):
for link_dirpath, link_dirnames, link_filenames in walk(p):
yield (link_dirpath, link_dirnames, link_filenames)
def is_ignored(path, ignore_patterns):
"""
Helper function to check if the given path should be ignored or not.
"""
for pattern in ignore_patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def find_files(root, ignore_patterns, verbosity, symlinks=False):
"""
Helper function to get all files in the given root.
"""
all_files = []
for (dirpath, dirnames, filenames) in walk(".", followlinks=symlinks):
for f in filenames:
norm_filepath = os.path.normpath(os.path.join(dirpath, f))
if is_ignored(norm_filepath, ignore_patterns):
if verbosity > 1:
sys.stdout.write('ignoring file %s in %s\n' % (f, dirpath))
else:
all_files.extend([(dirpath, f)])
all_files.sort()
return all_files
def copy_plural_forms(msgs, locale, domain, verbosity):
"""
Copies plural forms header contents from a Django catalog of locale to
the msgs string, inserting it at the right place. msgs should be the
contents of a newly created .po file.
"""
import django
django_dir = os.path.normpath(os.path.join(os.path.dirname(django.__file__)))
if domain == 'djangojs':
domains = ('djangojs', 'django')
else:
domains = ('django',)
for domain in domains:
django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain)
if os.path.exists(django_po):
m = plural_forms_re.search(open(django_po, 'rU').read())
if m:
if verbosity > 1:
sys.stderr.write("copying plural forms: %s\n" % m.group('value'))
lines = []
seen = False
for line in msgs.split('\n'):
if not line and not seen:
line = '%s\n' % m.group('value')
seen = True
lines.append(line)
msgs = '\n'.join(lines)
break
return msgs
def make_messages(locale=None, domain='django', verbosity='1', all=False,
extensions=None, symlinks=False, ignore_patterns=[], no_wrap=False,
no_obsolete=False):
"""
Uses the locale directory from the Django SVN tree or an application/
project to process all
"""
# Need to ensure that the i18n framework is enabled
from django.conf import settings
if settings.configured:
settings.USE_I18N = True
else:
settings.configure(USE_I18N = True)
from django.utils.translation import templatize
invoked_for_django = False
if os.path.isdir(os.path.join('conf', 'locale')):
localedir = os.path.abspath(os.path.join('conf', 'locale'))
invoked_for_django = True
# Ignoring all contrib apps
ignore_patterns += ['contrib/*']
elif os.path.isdir('locale'):
localedir = os.path.abspath('locale')
else:
raise CommandError("This script should be run from the Django SVN tree or your project or app tree. If you did indeed run it from the SVN checkout or your project or application, maybe you are just missing the conf/locale (in the django tree) or locale (for project and application) directory? It is not created automatically, you have to create it by hand if you want to enable i18n for your project or application.")
if domain not in ('django', 'djangojs'):
raise CommandError("currently makemessages only supports domains 'django' and 'djangojs'")
if (locale is None and not all) or domain is None:
message = "Type '%s help %s' for usage information." % (os.path.basename(sys.argv[0]), sys.argv[1])
raise CommandError(message)
# We require gettext version 0.15 or newer.
output = _popen('xgettext --version')[0]
match = re.search(r'(?P<major>\d+)\.(?P<minor>\d+)', output)
if match:
xversion = (int(match.group('major')), int(match.group('minor')))
if xversion < (0, 15):
raise CommandError("Django internationalization requires GNU gettext 0.15 or newer. You are using version %s, please upgrade your gettext toolset." % match.group())
languages = []
if locale is not None:
languages.append(locale)
elif all:
locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % localedir))
languages = [os.path.basename(l) for l in locale_dirs]
wrap = no_wrap and '--no-wrap' or ''
for locale in languages:
if verbosity > 0:
print "processing language", locale
basedir = os.path.join(localedir, locale, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % domain)
potfile = os.path.join(basedir, '%s.pot' % domain)
if os.path.exists(potfile):
os.unlink(potfile)
for dirpath, file in find_files(".", ignore_patterns, verbosity, symlinks=symlinks):
file_base, file_ext = os.path.splitext(file)
if domain == 'djangojs' and file_ext in extensions:
if verbosity > 1:
sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
src = open(os.path.join(dirpath, file), "rU").read()
src = pythonize_re.sub('\n#', src)
thefile = '%s.py' % file
f = open(os.path.join(dirpath, thefile), "w")
try:
f.write(src)
finally:
f.close()
cmd = (
'xgettext -d %s -L Perl %s --keyword=gettext_noop '
'--keyword=gettext_lazy --keyword=ngettext_lazy:1,2 '
'--keyword=pgettext:1c,2 --keyword=npgettext:1c,2,3 '
'--from-code UTF-8 --add-comments=Translators -o - "%s"' % (
domain, wrap, os.path.join(dirpath, thefile)
)
)
msgs, errors = _popen(cmd)
if errors:
os.unlink(os.path.join(dirpath, thefile))
if os.path.exists(potfile):
os.unlink(potfile)
raise CommandError(
"errors happened while running xgettext on %s\n%s" %
(file, errors))
if msgs:
old = '#: ' + os.path.join(dirpath, thefile)[2:]
new = '#: ' + os.path.join(dirpath, file)[2:]
msgs = msgs.replace(old, new)
if os.path.exists(potfile):
# Strip the header
msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
else:
msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
f = open(potfile, 'ab')
try:
f.write(msgs)
finally:
f.close()
os.unlink(os.path.join(dirpath, thefile))
elif domain == 'django' and (file_ext == '.py' or file_ext in extensions):
thefile = file
orig_file = os.path.join(dirpath, file)
if file_ext in extensions:
src = open(orig_file, "rU").read()
thefile = '%s.py' % file
f = open(os.path.join(dirpath, thefile), "w")
try:
f.write(templatize(src, orig_file[2:]))
finally:
f.close()
if verbosity > 1:
sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
cmd = (
'xgettext -d %s -L Python %s --keyword=gettext_noop '
'--keyword=gettext_lazy --keyword=ngettext_lazy:1,2 '
'--keyword=ugettext_noop --keyword=ugettext_lazy '
'--keyword=ungettext_lazy:1,2 --keyword=pgettext:1c,2 '
'--keyword=npgettext:1c,2,3 --keyword=pgettext_lazy:1c,2 '
'--keyword=npgettext_lazy:1c,2,3 --from-code UTF-8 '
'--add-comments=Translators -o - "%s"' % (
domain, wrap, os.path.join(dirpath, thefile))
)
msgs, errors = _popen(cmd)
if errors:
if thefile != file:
os.unlink(os.path.join(dirpath, thefile))
if os.path.exists(potfile):
os.unlink(potfile)
raise CommandError(
"errors happened while running xgettext on %s\n%s" %
(file, errors))
if msgs:
if thefile != file:
old = '#: ' + os.path.join(dirpath, thefile)[2:]
new = '#: ' + orig_file[2:]
msgs = msgs.replace(old, new)
if os.path.exists(potfile):
# Strip the header
msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
else:
msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
f = open(potfile, 'ab')
try:
f.write(msgs)
finally:
f.close()
if thefile != file:
os.unlink(os.path.join(dirpath, thefile))
if os.path.exists(potfile):
msgs, errors = _popen('msguniq %s --to-code=utf-8 "%s"' %
(wrap, potfile))
if errors:
os.unlink(potfile)
raise CommandError(
"errors happened while running msguniq\n%s" % errors)
if os.path.exists(pofile):
f = open(potfile, 'w')
try:
f.write(msgs)
finally:
f.close()
msgs, errors = _popen('msgmerge %s -q "%s" "%s"' %
(wrap, pofile, potfile))
if errors:
os.unlink(potfile)
raise CommandError(
"errors happened while running msgmerge\n%s" % errors)
elif not invoked_for_django:
msgs = copy_plural_forms(msgs, locale, domain, verbosity)
msgs = msgs.replace(
"#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % domain, "")
f = open(pofile, 'wb')
try:
f.write(msgs)
finally:
f.close()
os.unlink(potfile)
if no_obsolete:
msgs, errors = _popen('msgattrib %s -o "%s" --no-obsolete "%s"' %
(wrap, pofile, pofile))
if errors:
raise CommandError(
"errors happened while running msgattrib\n%s" % errors)
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--locale', '-l', default=None, dest='locale',
help='Creates or updates the message files for the given locale (e.g. pt_BR).'),
make_option('--domain', '-d', default='django', dest='domain',
help='The domain of the message files (default: "django").'),
make_option('--all', '-a', action='store_true', dest='all',
default=False, help='Updates the message files for all existing locales.'),
make_option('--extension', '-e', dest='extensions',
help='The file extension(s) to examine (default: ".html", separate multiple extensions with commas, or use -e multiple times)',
action='append'),
make_option('--symlinks', '-s', action='store_true', dest='symlinks',
default=False, help='Follows symlinks to directories when examining source code and templates for translation strings.'),
make_option('--ignore', '-i', action='append', dest='ignore_patterns',
default=[], metavar='PATTERN', help='Ignore files or directories matching this glob-style pattern. Use multiple times to ignore more.'),
make_option('--no-default-ignore', action='store_false', dest='use_default_ignore_patterns',
default=True, help="Don't ignore the common glob-style patterns 'CVS', '.*' and '*~'."),
make_option('--no-wrap', action='store_true', dest='no_wrap',
default=False, help="Don't break long message lines into several lines"),
make_option('--no-obsolete', action='store_true', dest='no_obsolete',
default=False, help="Remove obsolete message strings"),
)
help = ( "Runs over the entire source tree of the current directory and "
"pulls out all strings marked for translation. It creates (or updates) a message "
"file in the conf/locale (in the django tree) or locale (for projects and "
"applications) directory.\n\nYou must run this command with one of either the "
"--locale or --all options.")
requires_model_validation = False
can_import_settings = False
def handle_noargs(self, *args, **options):
locale = options.get('locale')
domain = options.get('domain')
verbosity = int(options.get('verbosity'))
process_all = options.get('all')
extensions = options.get('extensions')
symlinks = options.get('symlinks')
ignore_patterns = options.get('ignore_patterns')
if options.get('use_default_ignore_patterns'):
ignore_patterns += ['CVS', '.*', '*~']
ignore_patterns = list(set(ignore_patterns))
no_wrap = options.get('no_wrap')
no_obsolete = options.get('no_obsolete')
if domain == 'djangojs':
extensions = handle_extensions(extensions or ['js'])
else:
extensions = handle_extensions(extensions or ['html'])
if verbosity > 1:
sys.stdout.write('examining files with the extensions: %s\n'
% get_text_list(list(extensions), 'and'))
make_messages(locale, domain, verbosity, process_all, extensions, symlinks, ignore_patterns, no_wrap, no_obsolete)
|
bsd-3-clause
|
jaksmid/gensim
|
gensim/test/test_similarities.py
|
8
|
22852
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for similarity algorithms (the similarities package).
"""
import logging
import unittest
import os
import tempfile
import numpy
import scipy
from gensim.corpora import mmcorpus, Dictionary
from gensim.models import word2vec
from gensim.models import doc2vec
from gensim.models.wrappers import fasttext
from gensim import matutils, utils, similarities
from gensim.models import Word2Vec
try:
from pyemd import emd
PYEMD_EXT = True
except ImportError:
PYEMD_EXT = False
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
# set up vars used in testing ("Deerwester" from the web tutorial)
texts = [['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
sentences = [doc2vec.TaggedDocument(words, [i])
for i, words in enumerate(texts)]
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_similarities.tst.pkl')
class _TestSimilarityABC(object):
"""
Base class for SparseMatrixSimilarity and MatrixSimilarity unit tests.
"""
def testFull(self, num_best=None, shardsize=100):
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=shardsize)
else:
index = self.cls(corpus, num_features=len(dictionary))
if isinstance(index, similarities.MatrixSimilarity):
expected = numpy.array([
[0.57735026, 0.57735026, 0.57735026, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.40824831, 0.0, 0.40824831, 0.40824831, 0.40824831, 0.40824831, 0.40824831, 0.0, 0.0, 0.0, 0.0],
[0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.40824831, 0.0, 0.0, 0.0, 0.81649661, 0.0, 0.40824831, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.57735026, 0.57735026, 0.0, 0.0, 0.57735026, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1., 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.70710677, 0.70710677, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.57735026, 0.57735026],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.57735026],
], dtype=numpy.float32)
# HACK: dictionary can be in different order, so compare in sorted order
self.assertTrue(numpy.allclose(sorted(expected.flat), sorted(index.index.flat)))
index.num_best = num_best
query = corpus[0]
sims = index[query]
expected = [(0, 0.99999994), (2, 0.28867513), (3, 0.23570226), (1, 0.23570226)][ : num_best]
# convert sims to full numpy arrays, so we can use allclose() and ignore
# ordering of items with the same similarity value
expected = matutils.sparse2full(expected, len(index))
if num_best is not None: # when num_best is None, sims is already a numpy array
sims = matutils.sparse2full(sims, len(index))
self.assertTrue(numpy.allclose(expected, sims))
if self.cls == similarities.Similarity:
index.destroy()
def testNumBest(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
return
for num_best in [None, 0, 1, 9, 1000]:
self.testFull(num_best=num_best)
def test_full2sparse_clipped(self):
vec = [0.8, 0.2, 0.0, 0.0, -0.1, -0.15]
expected = [(0, 0.80000000000000004), (1, 0.20000000000000001), (5, -0.14999999999999999)]
self.assertTrue(matutils.full2sparse_clipped(vec, topn=3), expected)
def testChunking(self):
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
else:
index = self.cls(corpus, num_features=len(dictionary))
query = corpus[:3]
sims = index[query]
expected = numpy.array([
[0.99999994, 0.23570226, 0.28867513, 0.23570226, 0.0, 0.0, 0.0, 0.0, 0.0 ],
[0.23570226, 1.0, 0.40824831, 0.33333334, 0.70710677, 0.0, 0.0, 0.0, 0.23570226 ],
[0.28867513, 0.40824831, 1.0, 0.61237246, 0.28867513, 0.0, 0.0, 0.0, 0.0 ]
], dtype=numpy.float32)
self.assertTrue(numpy.allclose(expected, sims))
# test the same thing but with num_best
index.num_best = 3
sims = index[query]
expected = [[(0, 0.99999994), (2, 0.28867513), (1, 0.23570226)],
[(1, 1.0), (4, 0.70710677), (2, 0.40824831)],
[(2, 1.0), (3, 0.61237246), (1, 0.40824831)]]
self.assertTrue(numpy.allclose(expected, sims))
if self.cls == similarities.Similarity:
index.destroy()
def testIter(self):
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
else:
index = self.cls(corpus, num_features=len(dictionary))
sims = [sim for sim in index]
expected = numpy.array([
[ 0.99999994, 0.23570226, 0.28867513, 0.23570226, 0.0, 0.0, 0.0, 0.0, 0.0 ],
[ 0.23570226, 1.0, 0.40824831, 0.33333334, 0.70710677, 0.0, 0.0, 0.0, 0.23570226 ],
[ 0.28867513, 0.40824831, 1.0, 0.61237246, 0.28867513, 0.0, 0.0, 0.0, 0.0 ],
[ 0.23570226, 0.33333334, 0.61237246, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, 0.70710677, 0.28867513, 0.0, 0.99999994, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.70710677, 0.57735026, 0.0 ],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.70710677, 0.99999994, 0.81649655, 0.40824828 ],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.81649655, 0.99999994, 0.66666663 ],
[ 0.0, 0.23570226, 0.0, 0.0, 0.0, 0.0, 0.40824828, 0.66666663, 0.99999994 ]
], dtype=numpy.float32)
self.assertTrue(numpy.allclose(expected, sims))
if self.cls == similarities.Similarity:
index.destroy()
def testPersistency(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
return
fname = testfile()
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
elif self.cls == similarities.WmdSimilarity:
index = self.cls(texts, self.w2v_model)
else:
index = self.cls(corpus, num_features=len(dictionary))
index.save(fname)
index2 = self.cls.load(fname)
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
def testPersistencyCompressed(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
return
fname = testfile() + '.gz'
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
elif self.cls == similarities.WmdSimilarity:
index = self.cls(texts, self.w2v_model)
else:
index = self.cls(corpus, num_features=len(dictionary))
index.save(fname)
index2 = self.cls.load(fname)
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
def testLarge(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
return
fname = testfile()
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
elif self.cls == similarities.WmdSimilarity:
index = self.cls(texts, self.w2v_model)
else:
index = self.cls(corpus, num_features=len(dictionary))
# store all arrays separately
index.save(fname, sep_limit=0)
index2 = self.cls.load(fname)
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
def testLargeCompressed(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
return
fname = testfile() + '.gz'
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
elif self.cls == similarities.WmdSimilarity:
index = self.cls(texts, self.w2v_model)
else:
index = self.cls(corpus, num_features=len(dictionary))
# store all arrays separately
index.save(fname, sep_limit=0)
index2 = self.cls.load(fname, mmap=None)
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
def testMmap(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
return
fname = testfile()
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
elif self.cls == similarities.WmdSimilarity:
index = self.cls(texts, self.w2v_model)
else:
index = self.cls(corpus, num_features=len(dictionary))
# store all arrays separately
index.save(fname, sep_limit=0)
# same thing, but use mmap to load arrays
index2 = self.cls.load(fname, mmap='r')
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
def testMmapCompressed(self):
if self.cls == similarities.WmdSimilarity and not PYEMD_EXT:
return
fname = testfile() + '.gz'
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
elif self.cls == similarities.WmdSimilarity:
index = self.cls(texts, self.w2v_model)
else:
index = self.cls(corpus, num_features=len(dictionary))
# store all arrays separately
index.save(fname, sep_limit=0)
# same thing, but use mmap to load arrays
self.assertRaises(IOError, self.cls.load, fname, mmap='r')
class TestMatrixSimilarity(unittest.TestCase, _TestSimilarityABC):
def setUp(self):
self.cls = similarities.MatrixSimilarity
class TestWmdSimilarity(unittest.TestCase, _TestSimilarityABC):
def setUp(self):
self.cls = similarities.WmdSimilarity
self.w2v_model = Word2Vec(texts, min_count=1)
def testFull(self, num_best=None):
# Override testFull.
if not PYEMD_EXT:
return
index = self.cls(texts, self.w2v_model)
index.num_best = num_best
query = texts[0]
sims = index[query]
if num_best is not None:
# Sparse array.
for i, sim in sims:
self.assertTrue(numpy.alltrue(sim > 0.0)) # Note that similarities are bigger than zero, as they are the 1/ 1 + distances.
else:
self.assertTrue(sims[0] == 1.0) # Similarity of a document with itself is 0.0.
self.assertTrue(numpy.alltrue(sims[1:] > 0.0))
self.assertTrue(numpy.alltrue(sims[1:] < 1.0))
def testNonIncreasing(self):
''' Check that similarities are non-increasing when `num_best` is not
`None`.'''
# NOTE: this could be implemented for other similarities as well (i.e.
# in _TestSimilarityABC).
if not PYEMD_EXT:
return
index = self.cls(texts, self.w2v_model, num_best=3)
query = texts[0]
sims = index[query]
sims2 = numpy.asarray(sims)[:, 1] # Just the similarities themselves.
# The difference of adjacent elements should be negative.
cond = sum(numpy.diff(sims2) < 0) == len(sims2) - 1
self.assertTrue(cond)
def testChunking(self):
# Override testChunking.
if not PYEMD_EXT:
return
index = self.cls(texts, self.w2v_model)
query = texts[:3]
sims = index[query]
for i in range(3):
self.assertTrue(numpy.alltrue(sims[i, i] == 1.0)) # Similarity of a document with itself is 0.0.
# test the same thing but with num_best
index.num_best = 3
sims = index[query]
for sims_temp in sims:
for i, sim in sims_temp:
self.assertTrue(numpy.alltrue(sim > 0.0))
self.assertTrue(numpy.alltrue(sim <= 1.0))
def testIter(self):
# Override testIter.
if not PYEMD_EXT:
return
index = self.cls(texts, self.w2v_model)
for sims in index:
self.assertTrue(numpy.alltrue(sims >= 0.0))
self.assertTrue(numpy.alltrue(sims <= 1.0))
class TestSparseMatrixSimilarity(unittest.TestCase, _TestSimilarityABC):
def setUp(self):
self.cls = similarities.SparseMatrixSimilarity
def testMaintainSparsity(self):
"""Sparsity is correctly maintained when maintain_sparsity=True"""
num_features = len(dictionary)
index = self.cls(corpus, num_features=num_features)
dense_sims = index[corpus]
index = self.cls(corpus, num_features=num_features, maintain_sparsity=True)
sparse_sims = index[corpus]
self.assertFalse(scipy.sparse.issparse(dense_sims))
self.assertTrue(scipy.sparse.issparse(sparse_sims))
numpy.testing.assert_array_equal(dense_sims, sparse_sims.todense())
class TestSimilarity(unittest.TestCase, _TestSimilarityABC):
def setUp(self):
self.cls = similarities.Similarity
def testSharding(self):
for num_best in [None, 0, 1, 9, 1000]:
for shardsize in [1, 2, 9, 1000]:
self.testFull(num_best=num_best, shardsize=shardsize)
def testReopen(self):
"""test re-opening partially full shards"""
index = similarities.Similarity(None, corpus[:5], num_features=len(dictionary), shardsize=9)
_ = index[corpus[0]] # forces shard close
index.add_documents(corpus[5:])
query = corpus[0]
sims = index[query]
expected = [(0, 0.99999994), (2, 0.28867513), (3, 0.23570226), (1, 0.23570226)]
expected = matutils.sparse2full(expected, len(index))
self.assertTrue(numpy.allclose(expected, sims))
index.destroy()
def testMmapCompressed(self):
pass
# turns out this test doesn't exercise this because there are no arrays
# to be mmaped!
def testChunksize(self):
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
expected = [sim for sim in index]
index.chunksize = len(index) - 1
sims = [sim for sim in index]
self.assertTrue(numpy.allclose(expected, sims))
index.destroy()
class TestWord2VecAnnoyIndexer(unittest.TestCase):
def setUp(self):
try:
import annoy
except ImportError:
raise unittest.SkipTest("Annoy library is not available")
from gensim.similarities.index import AnnoyIndexer
self.indexer = AnnoyIndexer
def testWord2Vec(self):
model = word2vec.Word2Vec(texts, min_count=1)
model.init_sims()
index = self.indexer(model, 10)
self.assertVectorIsSimilarToItself(model, index)
self.assertApproxNeighborsMatchExact(model, index)
self.assertIndexSaved(index)
self.assertLoadedIndexEqual(index, model)
def testFastText(self):
ft_home = os.environ.get('FT_HOME', None)
ft_path = os.path.join(ft_home, 'fasttext') if ft_home else None
if not ft_path:
return
corpus_file = datapath('lee.cor')
model = fasttext.FastText.train(ft_path, corpus_file)
model.init_sims()
index = self.indexer(model, 10)
self.assertVectorIsSimilarToItself(model, index)
self.assertApproxNeighborsMatchExact(model, index)
self.assertIndexSaved(index)
self.assertLoadedIndexEqual(index, model)
def testLoadMissingRaisesError(self):
from gensim.similarities.index import AnnoyIndexer
test_index = AnnoyIndexer()
self.assertRaises(IOError, test_index.load, fname='test-index')
def assertVectorIsSimilarToItself(self, model, index):
vector = model.wv.syn0norm[0]
label = model.wv.index2word[0]
approx_neighbors = index.most_similar(vector, 1)
word, similarity = approx_neighbors[0]
self.assertEqual(word, label)
self.assertEqual(similarity, 1.0)
def assertApproxNeighborsMatchExact(self, model, index):
vector = model.wv.syn0norm[0]
approx_neighbors = model.most_similar([vector], topn=5, indexer=index)
exact_neighbors = model.most_similar(positive=[vector], topn=5)
approx_words = [neighbor[0] for neighbor in approx_neighbors]
exact_words = [neighbor[0] for neighbor in exact_neighbors]
self.assertEqual(approx_words, exact_words)
def assertIndexSaved(self, index):
index.save('index')
self.assertTrue(os.path.exists('index'))
self.assertTrue(os.path.exists('index.d'))
def assertLoadedIndexEqual(self, index, model):
from gensim.similarities.index import AnnoyIndexer
index.save('index')
index2 = AnnoyIndexer()
index2.load('index')
index2.model = model
self.assertEqual(index.index.f, index2.index.f)
self.assertEqual(index.labels, index2.labels)
self.assertEqual(index.num_trees, index2.num_trees)
class TestDoc2VecAnnoyIndexer(unittest.TestCase):
def setUp(self):
try:
import annoy
except ImportError:
raise unittest.SkipTest("Annoy library is not available")
from gensim.similarities.index import AnnoyIndexer
self.model = doc2vec.Doc2Vec(sentences, min_count=1)
self.model.init_sims()
self.index = AnnoyIndexer(self.model, 300)
self.vector = self.model.docvecs.doctag_syn0norm[0]
def testDocumentIsSimilarToItself(self):
approx_neighbors = self.index.most_similar(self.vector, 1)
doc, similarity = approx_neighbors[0]
self.assertEqual(doc, 0)
self.assertEqual(similarity, 1.0)
def testApproxNeighborsMatchExact(self):
approx_neighbors = self.model.docvecs.most_similar([self.vector], topn=5, indexer=self.index)
exact_neighbors = self.model.docvecs.most_similar(
positive=[self.vector], topn=5)
approx_words = [neighbor[0] for neighbor in approx_neighbors]
exact_words = [neighbor[0] for neighbor in exact_neighbors]
self.assertEqual(approx_words, exact_words)
def testSave(self):
self.index.save('index')
self.assertTrue(os.path.exists('index'))
self.assertTrue(os.path.exists('index.d'))
def testLoadNotExist(self):
from gensim.similarities.index import AnnoyIndexer
self.test_index = AnnoyIndexer()
self.assertRaises(IOError, self.test_index.load, fname='test-index')
def testSaveLoad(self):
from gensim.similarities.index import AnnoyIndexer
self.index.save('index')
self.index2 = AnnoyIndexer()
self.index2.load('index')
self.index2.model = self.model
self.assertEqual(self.index.index.f, self.index2.index.f)
self.assertEqual(self.index.labels, self.index2.labels)
self.assertEqual(self.index.num_trees, self.index2.num_trees)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
lgpl-2.1
|
marcreyesph/scancode-toolkit
|
tests/cluecode/test_authors.py
|
6
|
9569
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os.path
from commoncode.testcase import FileBasedTesting
from cluecode_assert_utils import check_detection
class TestAuthors(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_author_addr_c(self):
test_file = self.get_test_loc('authors/author_addr_c-addr_c.c')
expected = [
u'John Doe',
]
check_detection(expected, test_file, what='authors')
def test_author_avinash(self):
test_file = self.get_test_loc('authors/author_avinash-BitVector_py.py')
expected = [
u'Avinash Kak ([email protected])',
u'Avinash Kak ([email protected])',
]
check_detection(expected, test_file, what='authors')
def test_author_avinash_kak(self):
test_file = self.get_test_loc('authors/author_avinash_kak-BitVector_py.py')
expected = [
u'Avinash Kak ([email protected])',
u'Avinash Kak ([email protected])',
]
check_detection(expected, test_file, what='authors')
def test_author_complex_author(self):
test_file = self.get_test_loc('authors/author_complex_author-strtol_c.c')
expected = [
'developed by the University of California, Berkeley and its contributors.',
]
check_detection(expected, test_file, what='authors')
def test_author_correct(self):
test_file = self.get_test_loc('authors/author_correct-detail_9_html.html')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_do_not_detect_authorize_as_author(self):
test_file = self.get_test_loc('authors/author_do_not_detect_authorize_as_author.csv')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_expat(self):
test_file = self.get_test_loc('authors/author_expat-expat_h.h')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_gary(self):
test_file = self.get_test_loc('authors/author_gary-ProjectInfo_java.java')
expected = [
"Gary O'Neall",
]
check_detection(expected, test_file, what='authors')
def test_author_gthomas_c(self):
test_file = self.get_test_loc('authors/author_gthomas_c-c.c')
expected = [
u'Author(s) gthomas, [email protected]',
u'Contributors gthomas, [email protected], [email protected]',
]
check_detection(expected, test_file, what='authors')
def test_author_in_java(self):
test_file = self.get_test_loc('authors/author_in_java-MergeSort_java.java')
expected = [
u'Scott Violet',
]
check_detection(expected, test_file, what='authors')
def test_author_in_java_tag(self):
test_file = self.get_test_loc('authors/author_in_java_tag-java.java')
expected = [
u'Apple Banana Car',
]
check_detection(expected, test_file, what='authors')
def test_author_in_postcript(self):
test_file = self.get_test_loc('authors/author_in_postcript-9__ps.ps')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_in_visio_doc(self):
test_file = self.get_test_loc('authors/author_in_visio_doc-Glitch_ERD_vsd.vsd')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_nathan(self):
test_file = self.get_test_loc('authors/author_nathan-KEYS')
expected = [
'Nathan Mittler <[email protected]>',
]
check_detection(expected, test_file, what='authors')
def test_author_no_author(self):
test_file = self.get_test_loc('authors/author_no_author-c.c')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_none(self):
test_file = self.get_test_loc('authors/author_none-wrong')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_none_c(self):
test_file = self.get_test_loc('authors/author_none_c-c.c')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_none_fp(self):
test_file = self.get_test_loc('authors/author_none_fp-false_positives_c.c')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_none_js(self):
test_file = self.get_test_loc('authors/author_none_js-editor_beta_de_js.js')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_none_license(self):
test_file = self.get_test_loc('authors/author_none_license-LICENSE')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_none_sample_java(self):
test_file = self.get_test_loc('authors/author_none_sample_java-java.java')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_russ_c(self):
test_file = self.get_test_loc('authors/author_russ_c-c.c')
# these are detected as copyrights, not authors
# u'Russ Dill <[email protected]>',
# u'Vladimir Oleynik <[email protected]>',
expected = []
check_detection(expected, test_file, what='authors')
def test_author_sample(self):
test_file = self.get_test_loc('authors/author_sample-c.c')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_samplepy(self):
test_file = self.get_test_loc('authors/author_samplepy-py.py')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_snippet(self):
test_file = self.get_test_loc('authors/author_snippet')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_stacktrace_cpp(self):
test_file = self.get_test_loc('authors/author_stacktrace_cpp-stacktrace_cpp.cpp')
expected = [
u'by [email protected]',
]
check_detection(expected, test_file, what='authors')
def test_author_treetablemodeladapter_java(self):
test_file = self.get_test_loc('authors/author_treetablemodeladapter_java-TreeTableModelAdapter_java.java')
expected = [
u'Philip Milne author Scott Violet',
]
check_detection(expected, test_file, what='authors')
def test_author_uc(self):
test_file = self.get_test_loc('authors/author_uc-LICENSE')
expected = [
'developed by the University of California, Berkeley and its contributors.',
'developed by UC Berkeley and its contributors.',
'developed by the University of California, Berkeley and its contributors.',
]
check_detection(expected, test_file, what='authors')
def test_author_var_route_c(self):
test_file = self.get_test_loc('authors/author_var_route_c-var_route_c.c')
# these are detected as copyrights, not authors
# u'Erik Schoenfelder ([email protected])',
# u'Simon Leinen ([email protected])',
expected = []
check_detection(expected, test_file, what='authors')
def test_author_vs(self):
test_file = self.get_test_loc('authors/author_vs-visual_studio.txt')
expected = []
check_detection(expected, test_file, what='authors')
def test_author_young_c(self):
test_file = self.get_test_loc('authors/author_young_c-c.c')
expected = [
u'written by Eric Young ([email protected]).',
u'Tim Hudson ([email protected]).',
u'written by Eric Young ([email protected])',
u'written by Tim Hudson ([email protected])',
]
check_detection(expected, test_file, what='authors')
def test_author_wcstok_c(self):
test_file = self.get_test_loc('authors/wcstok.c')
expected = [u'Wes Peters <[email protected]>']
check_detection(expected, test_file, what='authors')
|
apache-2.0
|
nemonik/Intellect
|
intellect/examples/testing/__init__.py
|
12
|
1629
|
"""
Copyright (c) 2011, The MITRE Corporation.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. All advertising materials mentioning features or use of this software
must display the following acknowledgement:
This product includes software developed by the author.
4. Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
|
bsd-3-clause
|
pettarin/aeneas
|
aeneas/tests/long_test_task_rconf.py
|
5
|
16852
|
#!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
from aeneas.tools.execute_task import ExecuteTaskCLI
import aeneas.globalfunctions as gf
# TODO actually parse this file to know what extras
# (festival, speect, etc.) are available to test
EXTRA_TESTS = os.path.exists(os.path.join(os.path.expanduser("~"), ".aeneas.conf"))
class TestExecuteTaskCLI(unittest.TestCase):
def execute(self, parameters, expected_exit_code):
output_path = gf.tmp_directory()
params = ["placeholder"]
for p_type, p_value in parameters:
if p_type == "in":
params.append(gf.absolute_path(p_value, __file__))
elif p_type == "out":
params.append(os.path.join(output_path, p_value))
else:
params.append(p_value)
exit_code = ExecuteTaskCLI(use_sys=False).run(arguments=params)
gf.delete_directory(output_path)
self.assertEqual(exit_code, expected_exit_code)
def test_exec_aba_no_zero_duration(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_no_zero=True"),
("out", "sonnet.srt"),
("", "-r=\"aba_no_zero_duration=0.005\"")
], 0)
def test_exec_aba_nonspeech_tolerance(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_no_zero=True"),
("out", "sonnet.srt"),
("", "-r=\"aba_nonspeech_tolerance=0.040\"")
], 0)
def test_exec_allow_unlisted_language(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=en-zz|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "--skip-validator"),
("", "-r=\"allow_unlisted_languages=True\"")
], 0)
def test_exec_c_extensions(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"c_extensions=False\"")
], 0)
def test_exec_cdtw(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"cdtw=False\"")
], 0)
def test_exec_cew(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"cdtw=False\"")
], 0)
def test_exec_cew_subprocess_enabled(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"cew_subprocess_enabled=True\"")
], 0)
def test_exec_cew_subprocess_path(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"cew_subprocess_enabled=True|cew_subprocess_path=python\"")
], 0)
def test_exec_cfw(self):
if not EXTRA_TESTS:
return
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"cfw=False|tts=festival\"")
], 0)
def test_exec_cmfcc(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"cmfcc=False\"")
], 0)
def test_exec_dtw_algorithm(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"dtw_algorithm=exact\"")
], 0)
def test_exec_dtw_margin(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"dtw_margin=100\"")
], 0)
def test_exec_ffmpeg_path(self):
if not EXTRA_TESTS:
return
home = os.path.expanduser("~")
path = os.path.join(home, ".aeneas/myffmpeg")
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"ffmpeg_path=%s\"" % path)
], 0)
def test_exec_ffmpeg_sample_rate(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"ffmpeg_sample_rate=22050\"")
], 0)
def test_exec_ffprobe_path(self):
if not EXTRA_TESTS:
return
home = os.path.expanduser("~")
path = os.path.join(home, ".aeneas/myffprobe")
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"ffprobe_path=%s\"" % path)
], 0)
def test_exec_mfcc_emphasis_factor(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"mfcc_emphasis_factor=0.95\"")
], 0)
def test_exec_mfcc_fft_order(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"mfcc_fft_order=256\"")
], 0)
def test_exec_mfcc_filters(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"mfcc_filters=32\"")
], 0)
def test_exec_mfcc_lower_frequency(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"mfcc_lower_frequency=100\"")
], 0)
def test_exec_mfcc_size(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"mfcc_size=16\"")
], 0)
def test_exec_mfcc_upper_frequency(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"mfcc_upper_frequency=5000\"")
], 0)
def test_exec_mfcc_window_length(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"mfcc_window_length=0.200\"")
], 0)
def test_exec_mfcc_window_shift(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"mfcc_window_shift=0.050\"")
], 0)
def test_exec_mfcc_mask_nonspeech(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_nonspeech_min=0.500|task_adjust_boundary_nonspeech_string=REMOVE"),
("out", "sonnet.srt"),
("", "-r=\"mfcc_mask_nonspeech=True\"")
], 0)
def test_exec_mfcc_mask_extend_speech_after(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_nonspeech_min=0.500|task_adjust_boundary_nonspeech_string=REMOVE"),
("out", "sonnet.srt"),
("", "-r=\"mfcc_mask_nonspeech=True|mfcc_mask_extend_speech_after=1\"")
], 0)
def test_exec_mfcc_mask_extend_speech_before(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_nonspeech_min=0.500|task_adjust_boundary_nonspeech_string=REMOVE"),
("out", "sonnet.srt"),
("", "-r=\"mfcc_mask_nonspeech=True|mfcc_mask_extend_speech_before=1\"")
], 0)
def test_exec_mfcc_mask_log_energy_threshold(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_nonspeech_min=0.500|task_adjust_boundary_nonspeech_string=REMOVE"),
("out", "sonnet.srt"),
("", "-r=\"mfcc_mask_nonspeech=True|mfcc_mask_log_energy_threshold=0.750\"")
], 0)
def test_exec_mfcc_mask_min_nonspeech_length(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_nonspeech_min=0.500|task_adjust_boundary_nonspeech_string=REMOVE"),
("out", "sonnet.srt"),
("", "-r=\"mfcc_mask_nonspeech=True|mfcc_mask_min_nonspeech_length=2\"")
], 0)
def test_exec_safety_checks(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"safety_checks=False\"")
], 0)
def test_exec_tmp_path(self):
tmp_path = gf.tmp_directory()
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"tmp_path=%s\"" % tmp_path)
], 0)
gf.delete_directory(tmp_path)
def test_exec_tts(self):
if not EXTRA_TESTS:
return
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"tts=festival\"")
], 0)
def test_exec_tts_cache(self):
if not EXTRA_TESTS:
return
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"tts=festival|tts_cache=True\"")
], 0)
def test_exec_tts_path(self):
if not EXTRA_TESTS:
return
home = os.path.expanduser("~")
path = os.path.join(home, ".aeneas/myespeak")
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"tts=espeak|tts_path=%s\"" % path)
], 0)
def test_exec_voice_code(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt"),
("out", "sonnet.srt"),
("", "-r=\"tts_voice_code=it\"")
], 0)
def test_exec_vad_extend_speech_after(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_algorithm=rate|task_adjust_boundary_rate_value=12.000"),
("out", "sonnet.srt"),
("", "-r=\"vad_extend_speech_after=0.100\"")
], 0)
def test_exec_vad_extend_speech_before(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_algorithm=rate|task_adjust_boundary_rate_value=12.000"),
("out", "sonnet.srt"),
("", "-r=\"vad_extend_speech_before=0.100\"")
], 0)
def test_exec_vad_log_energy_threshold(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_algorithm=rate|task_adjust_boundary_rate_value=12.000"),
("out", "sonnet.srt"),
("", "-r=\"vad_log_energy_threshold=0.750\"")
], 0)
def test_exec_vad_min_nonspeech_length(self):
self.execute([
("in", "../tools/res/audio.mp3"),
("in", "../tools/res/subtitles.txt"),
("", "task_language=eng|is_text_type=subtitles|os_task_file_format=srt|task_adjust_boundary_algorithm=rate|task_adjust_boundary_rate_value=12.000"),
("out", "sonnet.srt"),
("", "-r=\"vad_min_nonspeech_length=0.500\"")
], 0)
if __name__ == "__main__":
unittest.main()
|
agpl-3.0
|
bcarroll/authmgr
|
python-3.6.2-Win64/Lib/site-packages/flask_debugtoolbar/__init__.py
|
3
|
8540
|
import os
import warnings
from flask import Blueprint, current_app, request, g, send_from_directory
from flask.globals import _request_ctx_stack
from jinja2 import Environment, PackageLoader
from werkzeug.urls import url_quote_plus
from flask_debugtoolbar.compat import iteritems
from flask_debugtoolbar.toolbar import DebugToolbar
from flask_debugtoolbar.utils import decode_text
module = Blueprint('debugtoolbar', __name__)
def replace_insensitive(string, target, replacement):
"""Similar to string.replace() but is case insensitive
Code borrowed from:
http://forums.devshed.com/python-programming-11/case-insensitive-string-replace-490921.html
"""
no_case = string.lower()
index = no_case.rfind(target.lower())
if index >= 0:
return string[:index] + replacement + string[index + len(target):]
else: # no results so return the original string
return string
def _printable(value):
try:
return decode_text(repr(value))
except Exception as e:
return '<repr(%s) raised %s: %s>' % (
object.__repr__(value), type(e).__name__, e)
class DebugToolbarExtension(object):
_static_dir = os.path.realpath(
os.path.join(os.path.dirname(__file__), 'static'))
_redirect_codes = [301, 302, 303, 304]
def __init__(self, app=None):
self.app = app
self.debug_toolbars = {}
# Configure jinja for the internal templates and add url rules
# for static data
self.jinja_env = Environment(
autoescape=True,
extensions=['jinja2.ext.i18n', 'jinja2.ext.with_'],
loader=PackageLoader(__name__, 'templates'))
self.jinja_env.filters['urlencode'] = url_quote_plus
self.jinja_env.filters['printable'] = _printable
if app is not None:
self.init_app(app)
def init_app(self, app):
for k, v in iteritems(self._default_config(app)):
app.config.setdefault(k, v)
if not app.config['DEBUG_TB_ENABLED']:
return
if not app.config.get('SECRET_KEY'):
raise RuntimeError(
"The Flask-DebugToolbar requires the 'SECRET_KEY' config "
"var to be set")
DebugToolbar.load_panels(app)
app.before_request(self.process_request)
app.after_request(self.process_response)
app.teardown_request(self.teardown_request)
# Monkey-patch the Flask.dispatch_request method
app.dispatch_request = self.dispatch_request
app.add_url_rule('/_debug_toolbar/static/<path:filename>',
'_debug_toolbar.static', self.send_static_file)
app.register_blueprint(module, url_prefix='/_debug_toolbar/views')
def _default_config(self, app):
return {
'DEBUG_TB_ENABLED': app.debug,
'DEBUG_TB_HOSTS': (),
'DEBUG_TB_INTERCEPT_REDIRECTS': True,
'DEBUG_TB_PANELS': (
'flask_debugtoolbar.panels.versions.VersionDebugPanel',
'flask_debugtoolbar.panels.timer.TimerDebugPanel',
'flask_debugtoolbar.panels.headers.HeaderDebugPanel',
'flask_debugtoolbar.panels.request_vars.RequestVarsDebugPanel',
'flask_debugtoolbar.panels.config_vars.ConfigVarsDebugPanel',
'flask_debugtoolbar.panels.template.TemplateDebugPanel',
'flask_debugtoolbar.panels.sqlalchemy.SQLAlchemyDebugPanel',
'flask_debugtoolbar.panels.logger.LoggingPanel',
'flask_debugtoolbar.panels.route_list.RouteListDebugPanel',
'flask_debugtoolbar.panels.profiler.ProfilerDebugPanel',
),
}
def dispatch_request(self):
"""Modified version of Flask.dispatch_request to call process_view."""
req = _request_ctx_stack.top.request
app = current_app
if req.routing_exception is not None:
app.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return app.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
view_func = app.view_functions[rule.endpoint]
view_func = self.process_view(app, view_func, req.view_args)
return view_func(**req.view_args)
def _show_toolbar(self):
"""Return a boolean to indicate if we need to show the toolbar."""
if request.blueprint == 'debugtoolbar':
return False
hosts = current_app.config['DEBUG_TB_HOSTS']
if hosts and request.remote_addr not in hosts:
return False
return True
def send_static_file(self, filename):
"""Send a static file from the flask-debugtoolbar static directory."""
return send_from_directory(self._static_dir, filename)
def process_request(self):
g.debug_toolbar = self
if not self._show_toolbar():
return
real_request = request._get_current_object()
self.debug_toolbars[real_request] = (
DebugToolbar(real_request, self.jinja_env))
for panel in self.debug_toolbars[real_request].panels:
panel.process_request(real_request)
def process_view(self, app, view_func, view_kwargs):
""" This method is called just before the flask view is called.
This is done by the dispatch_request method.
"""
real_request = request._get_current_object()
try:
toolbar = self.debug_toolbars[real_request]
except KeyError:
return view_func
for panel in toolbar.panels:
new_view = panel.process_view(real_request, view_func, view_kwargs)
if new_view:
view_func = new_view
return view_func
def process_response(self, response):
real_request = request._get_current_object()
if real_request not in self.debug_toolbars:
return response
# Intercept http redirect codes and display an html page with a
# link to the target.
if current_app.config['DEBUG_TB_INTERCEPT_REDIRECTS']:
if (response.status_code in self._redirect_codes and
not real_request.is_xhr):
redirect_to = response.location
redirect_code = response.status_code
if redirect_to:
content = self.render('redirect.html', {
'redirect_to': redirect_to,
'redirect_code': redirect_code
})
response.content_length = len(content)
response.location = None
response.response = [content]
response.status_code = 200
# If the http response code is 200 then we process to add the
# toolbar to the returned html response.
if not (response.status_code == 200 and
response.is_sequence and
response.headers['content-type'].startswith('text/html')):
return response
response_html = response.data.decode(response.charset)
no_case = response_html.lower()
body_end = no_case.rfind('</body>')
if body_end >= 0:
before = response_html[:body_end]
after = response_html[body_end:]
elif no_case.startswith('<!doctype html>'):
before = response_html
after = ''
else:
warnings.warn('Could not insert debug toolbar.'
' </body> tag not found in response.')
return response
toolbar = self.debug_toolbars[real_request]
for panel in toolbar.panels:
panel.process_response(real_request, response)
toolbar_html = toolbar.render_toolbar()
content = ''.join((before, toolbar_html, after))
content = content.encode(response.charset)
response.response = [content]
response.content_length = len(content)
return response
def teardown_request(self, exc):
self.debug_toolbars.pop(request._get_current_object(), None)
def render(self, template_name, context):
template = self.jinja_env.get_template(template_name)
return template.render(**context)
|
bsd-3-clause
|
nathan-osman/python-ewp
|
setup.py
|
1
|
1092
|
from setuptools import Extension, setup
setup(
name='ewp',
version='0.1.1',
ext_modules=[Extension(
'ewp',
sources=['src/ewp.c'],
include_dirs=[
'/usr/local/opt/openssl/include', # macOS
],
libraries=['crypto', 'ssl'],
library_dirs=[
'/usr/local/opt/openssl/lib', # macOS
],
)],
test_suite='tests',
author='Nathan Osman',
author_email='[email protected]',
description="Support for PayPal's Encrypted Website Payments",
license='MIT',
url='https://github.com/nathan-osman/python-ewp',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
mit
|
c00w/bitHopper
|
bitHopper/Website/Pool_Page.py
|
1
|
1176
|
from bitHopper.Website import app, flask
import bitHopper.Tracking.Tracking
import bitHopper.Configuration.Pools
@app.route("/pool", methods=['POST', 'GET'])
def pool():
handle_worker_post(flask.request.form)
pools = bitHopper.Tracking.Tracking.build_dict()
percentage = {}
priority = {}
for pool in pools:
priority[pool] = bitHopper.Configuration.Pools.get_priority(pool)
percentage[pool] = bitHopper.Configuration.Pools.get_percentage(pool)
return flask.render_template('pool.html', pools = pools, percentage=percentage, priority=priority)
def handle_worker_post(post):
"""
Handles worker priority and percentage change operations
"""
for item in ['method','server','percentage', 'priority']:
if item not in post:
return
if post['method'] == 'set':
pool = post['server']
percentage = float(post['percentage'])
priority = float(post['priority'])
bitHopper.Configuration.Pools.set_percentage(
pool, percentage)
bitHopper.Configuration.Pools.set_priority(
pool, priority)
|
mit
|
kpkhxlgy0/SublimeText3
|
Packages/SublimeCodeIntel/libs/codeintel2/tree_javascript.py
|
7
|
44470
|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""Completion evaluation code for JavaScript"""
import logging
import types
import re
from pprint import pformat
from itertools import chain
from codeintel2.common import *
from codeintel2.util import indent
from codeintel2.tree import TreeEvaluator
class CandidatesForTreeEvaluator(TreeEvaluator):
# Note: the "alt" changes added in change 281350 make some of the
# functionality on this class *not* appropriate for the shared
# TreeEvaluator. I.e. _elem_from_scoperef et al should be moved
# *out* of CandidatesForTreeEvaluator.
# This is a dict when set, multiple elements that have the same lpath will
# be set in here, ensuring we get the correct one from an lpath lookup.
# Fixes the following bug:
# http://bugs.activestate.com/show_bug.cgi?id=71666
# Ideally, this would not be needed once elem.names[] can return a tuple,
# see the following bug for reference:
# http://bugs.activestate.com/show_bug.cgi?id=71941
_alt_elem_from_scoperef = None
def _elem_from_scoperef(self, scoperef):
"""A scoperef is (<blob>, <lpath>). Return the actual elem in
the <blob> ciElementTree being referred to.
"""
elem = scoperef[0]
i = 0
for lname in scoperef[1]:
i += 1
if self._alt_elem_from_scoperef is not None:
scoperef_names = ".".join(scoperef[1][:i])
alt_elem = self._alt_elem_from_scoperef.get(scoperef_names)
if alt_elem is not None:
elem = alt_elem
continue
elem = elem.names[lname]
return elem
def _tokenize_citdl_expr(self, expr):
chars = iter(zip(expr, chain(expr[1:], (None,))))
buffer = []
def get_pending_token():
if buffer:
yield "".join(buffer)
del buffer[:]
def get_quoted_string(ch):
quote = ch
local_buffer = []
for ch, next_ in chars:
# print "quote: quote=[%s] ch=[%s] next=[%s] token=%r" % (
# quote, ch, next_, local_buffer)
if ch == "\\":
local_buffer.append(next(chars)[0])
elif ch == quote:
if local_buffer:
yield "".join(local_buffer)
break
else:
local_buffer.append(ch)
BLOCK_MAP = {"(": ")", "[": "]"}
for ch, next_ in chars:
# print "ch=[%s] next=[%s] token=%r" % (ch, next_, buffer)
if ch in ('"', "'"): # quoted string
for token in get_pending_token():
yield token
for token in get_quoted_string(ch):
yield token
elif ch == ".":
for token in get_pending_token():
yield token
buffer = []
elif ch in BLOCK_MAP:
block = [ch, BLOCK_MAP[ch]]
emit = ch in ("[",)
for token in get_pending_token():
yield token
if next_ == block[1]:
next(chars) # consume close quote
yield block[0] + block[1]
elif next_ in ('"', "'"): # quoted string
next(chars) # consume open bracket
next_tokens = list(get_quoted_string(next_))
ch, next_ = next(chars)
if ch == block[1] and emit:
for next_token in next_tokens:
yield next_token
else:
yield block[0] + block[1]
else:
buffer.append(ch)
if buffer:
yield "".join(buffer)
def _join_citdl_expr(self, tokens):
return '.'.join(tokens).replace('.()', '()')
class JavaScriptTreeEvaluator(CandidatesForTreeEvaluator):
def eval_cplns(self):
self.log_start()
start_scoperef = self.get_start_scoperef()
self.info("start scope is %r", start_scoperef)
if self.trg.type == "names":
cplns = list(self._completion_names_from_scope(self.expr,
start_scoperef))
else:
hits = self._hits_from_citdl(self.expr, start_scoperef)
cplns = list(self._members_from_hits(hits))
if not cplns:
raise CodeIntelError("No completions found")
# For logging messages every call
# print indent('\n'.join("%s: %s" % (lvl, args and m % (args) or m)
# for lvl,m, args in self.ctlr.log))
# print indent('\n'.join(["Hit: %r" % (cpln, ) for cpln in cplns]))
return cplns
def eval_calltips(self):
self.log_start()
start_scoperef = self.get_start_scoperef()
self.info("start scope is %r", start_scoperef)
hits = self._hits_from_citdl(self.expr, start_scoperef)
if not hits:
raise CodeIntelError("No calltips found")
return self._calltips_from_hits(hits)
def eval_defns(self):
self.log_start()
start_scoperef = self.get_start_scoperef()
self.info("start scope is %r", start_scoperef)
hits = self._hits_from_citdl(self.expr, start_scoperef, defn_only=True)
if not hits:
raise CodeIntelError("No definitions found")
return [self._defn_from_hit(x) for x in hits]
def parent_scoperef_from_scoperef(self, scoperef,
started_in_builtin_window_scope=False):
"""
For JavaScript-in-the-browser the top-level scope is the
Window object instance. For now we are always presuming we
are running in the browser if the language is JavaScript.
Problem: if we *started* on the Window class then the parent
scope should be -> built-in-blob. This is what
'started_in_builtin_window_scope' is used for.
"""
blob, lpath = scoperef
global_var = self._global_var
if not started_in_builtin_window_scope \
and lpath == [global_var] and blob is self.built_in_blob:
return None
elif lpath:
return (blob, lpath[:-1])
elif blob is self.built_in_blob:
if started_in_builtin_window_scope:
return None
elif global_var is not None:
return (self.built_in_blob, [global_var])
else:
return (self.built_in_blob, [])
@property
def _global_var(self):
"""
The type of the global variable
"""
if self.trg.lang == "Node.js":
return "global"
return "Window"
_langintel = None
@property
def langintel(self):
if self._langintel is None:
self._langintel = self.mgr.langintel_from_lang(self.trg.lang)
return self._langintel
_libs = None
@property
def libs(self):
if self._libs is None:
self._libs = self.langintel.libs_from_buf(self.buf)
return self._libs
@property
def stdlib(self):
# JS stdlib is always the last one.
return self.libs[-1]
_built_in_blob = None
@property
def built_in_blob(self):
if self._built_in_blob is None:
self._built_in_blob = self.stdlib.get_blob("*")
return self._built_in_blob
## Specific element completions ##
def _hit_from_first_token(self, token, scoperef):
"""Find the token at the given or a parent scope.
Returns the found elem and the scope at which it was found. If
not found, this returns (None, None).
"""
self.log("find '%s' starting at %s", token, scoperef)
# Because we fake JavaScript classes and put the ctor
# function inside the class, we need to push start scopes at
# the class to the ctor. See test
# javascript/cpln/ctor_scope_cheat for an example of why.
try:
elem = self._elem_from_scoperef(scoperef)
except KeyError as ex:
self.warn("_hit_from_first_token:: no elem for scoperef: %r",
scoperef)
return (None, None)
if elem.get("ilk") == "class":
class_name = elem.get("name")
try:
ctor = elem.names[class_name]
except KeyError:
pass
else:
if "__ctor__" in ctor.get("attributes", ""):
scoperef = (scoperef[0], scoperef[1]+[class_name])
self.log("push scope to class ctor %s", scoperef)
started_in_builtin_window_scope = (scoperef[0] is self.built_in_blob
and scoperef[1] and scoperef[1][0] == self._global_var)
while 1:
try:
elem = self._elem_from_scoperef(scoperef)
except KeyError as ex:
raise EvalError("could not resolve scoperef %r: %s"
% (scoperef, ex))
try:
candidate = elem.names[token]
if "__ctor__" in candidate.get("attributes", ""):
# In JavaScript we include the constructor
# function for a (faked) class as a method.
# We must skip it here or resolution of 'this'
# in a JS class methods will always hit the ctor
# instead of the class (which is by far the
# common case).
raise KeyError("skipping JavaScript ctor")
self.log("is '%s' accessible on %s? yes", token, scoperef)
return candidate, scoperef
except KeyError:
self.log("is '%s' accessible on %s? no", token, scoperef)
scoperef = self.parent_scoperef_from_scoperef(scoperef,
started_in_builtin_window_scope)
if not scoperef:
return None, None
def _members_from_hits(self, hits):
members = set()
curr_blob = self.buf.blob_from_lang.get(self.lang, None)
for elem, scope in hits:
# In JavaScript we include the constructor function for a
# (faked) class as a method. Completion on an instance of
# this class shouldn't see the ctor.
skip_js_ctor = (elem.tag == "scope" and elem.get("ilk") == "class")
if elem.get("ilk") == "function":
# Functions have an implicit citdl type of "Function". See bug:
# http://bugs.activestate.com/show_bug.cgi?id=76504
try:
subhits = self._hits_from_type_inference("Function", scope)
members.update(self._members_from_hits(subhits))
except CodeIntelError:
pass # Ignore if Function was not found
for child in elem:
if elem.get("ilk") == "function" and child.get("ilk") == "argument":
# function arguments are not members, skip them.
# (we might still find properties of functions, though)
continue
# Only add locals when the current scope is the same
# as the variable scope.
attributes = child.get("attributes", "").split()
if curr_blob is not None and scope[0] != curr_blob:
if "__file_local__" in attributes:
self.log("skipping file_local %r in %r", elem, scope)
continue
if "__local__" in attributes:
# XXX: Move start_scoperef to be a part of the class
# start_scoperef = self.get_start_scoperef()
# scope_elem = start_scoperef[0]
# for lname in start_scoperef[1]:
# if elem == scope_elem:
# members.add( ("variable", child.get("name")) )
# break
# scope_elem = scope_elem.names[lname]
# else: # Don't show this variable
continue
if child.tag == "scope":
if skip_js_ctor and child.get("ilk") == "function" \
and "__ctor__" in attributes:
continue
members.add((child.get("ilk"), child.get("name")))
elif child.tag == "variable":
if len(child):
members.add(("namespace", child.get("name")))
else:
members.add(("variable", child.get("name")))
else:
raise NotImplementedError("unknown hit child tag '%s': %r"
% (child.tag, child))
for classref in elem.get("classrefs", "").split():
try:
subhits = self._hits_from_type_inference(classref, scope)
members.update(self._members_from_hits(subhits))
except CodeIntelError:
pass # Ignore when parent class not found, bug 65447
return members
def _calltip_from_func(self, elem):
# See "Determining a Function CallTip" in the spec for a
# discussion of this algorithm.
signature = elem.get("signature")
doc = elem.get("doc")
ctlines = []
if not signature:
name = elem.get("name")
# XXX Note difference for Tcl in _getSymbolCallTips.
ctlines = [name + "(...)"]
else:
ctlines = signature.splitlines(0)
if doc:
ctlines += doc.splitlines(0)
return '\n'.join(ctlines)
def _calltip_from_class(self, elem):
# If the class has a defined signature then use that.
name = elem.get("name")
signature = elem.get("signature")
doc = elem.get("doc")
if signature:
ctlines = signature.splitlines(0)
if doc:
ctlines += doc.splitlines(0)
return '\n'.join(ctlines)
elif name in elem.names:
# Typically the class element has a contructor function of
# the same name as the class.
ctor = elem.names[name]
self.log("ctor is %r", ctor)
return self._calltip_from_func(ctor)
else:
ctlines = [name + "(...)"]
if doc:
ctlines += doc.splitlines(0)
return '\n'.join(ctlines)
def _calltips_from_hits(self, hits):
"""
c.f. CitadelEvaluator._getSymbolCallTips()
"""
calltips = []
for elem, scoperef in hits:
# self.log("calltip for hit: %r", hit)
if elem.tag == "variable":
# Ignore variable hits.
self.debug("_calltips_from_hits:: ignoring variable: %r", elem)
continue
elif elem.tag == "scope":
ilk = elem.get("ilk")
if ilk == "function":
calltips.append(self._calltip_from_func(elem))
elif ilk == "class":
calltips.append(self._calltip_from_class(elem))
else:
raise NotImplementedError("unexpected scope ilk for "
"calltip hit: %r" % elem)
else:
raise NotImplementedError("unexpected elem for calltip "
"hit: %r" % elem)
## Bug 59438: adding "(from $lpath in $file)" when helpful
## in calltips.
## TODO: Don't include all (or part) when not useful:
## document.getElementsByClassName -> "(from document in
## prototype)". The "document in" in not necessary.
## TODO: Bad with empty lpath: "(from in prototype)"
## TODO: Problematic for test suite with "rand??" module names.
## TODO: Don't add for a local hit.
# blobname = scoperef[0].get("name")
# if blobname == "*":
# blobname = "stdlib"
# scopename = '.'.join(scoperef[1])
# calltips[-1] += "\n(from %s in %s)" % (scopename, blobname)
return calltips
def _hits_from_citdl(self, expr, scoperef, defn_only=False):
with self._check_infinite_recursion(expr):
if "[]" in expr:
# TODO: We cannot resolve array type inferences yet.
# Note that we do allow arrays types with a string key, since
# that's an alternative form for property access
raise CodeIntelError(
"no type-inference yet for arrays: %r" % expr)
tokens = list(self._tokenize_citdl_expr(expr))
# self.log("expr tokens: %r", tokens)
# First part... we try to match as much as possible straight up
hits, nconsumed = self._hits_from_first_part(tokens, scoperef)
if not hits:
raise CodeIntelError(
"could not resolve first part of '%s'" % expr)
self.debug("_hits_from_citdl: first part: %r -> %r",
tokens[:nconsumed], hits)
# ...the remainder.
remaining_tokens = tokens[nconsumed:]
for token in tokens[nconsumed:]:
new_hits = []
for elem, scoperef in hits:
self.debug("_hits_from_citdl: resolve %r on %r in %r",
token, elem, scoperef)
if token == "()":
try:
new_hits += self._hits_from_call(elem, scoperef)
except CodeIntelError as ex:
self.warn("could resolve call on %r: %s", elem, ex)
continue
try:
new_hit = self._hit_from_getattr(
elem, scoperef, token)
except CodeIntelError as ex:
if token == "prototype" and elem.get("ilk") == "class":
self.debug("_hits_from_citdl: using class %r for "
"its prototype", elem)
new_hits.append((elem, scoperef))
else:
self.warn(str(ex))
else:
new_hits.append(new_hit)
hits = new_hits
# Resolve any variable type inferences.
# XXX Don't we have to *recursively* resolve hits?
# If that is done, then need to watch out for infinite loop
# because _hits_from_variable_type_inference() for a variable
# with children just returns itself. I.e. you *can't* resolve
# the <variable> away.
resolved_hits = []
if self.buf:
curr_blob = self.buf.blob_from_lang.get(self.lang, {})
else:
curr_blob = None
for elem, scoperef in hits:
if scoperef[0] != curr_blob:
if "__file_local__" in elem.get("attributes", "").split():
self.log(
"skipping __file_local__ %r in %r", elem, scoperef)
continue
if elem.tag == "variable" and not defn_only:
try:
if (not elem.get("citdl")) and elem.get("ilk") == "argument":
# this is an argument, try to infer things from the
# caller
subhits = self._hits_from_argument(elem, scoperef)
else:
subhits = self._hits_from_variable_type_inference(
elem, scoperef)
except CodeIntelError as ex:
self.warn("could not resolve %r: %s", elem, ex)
else:
resolved_hits += subhits
else:
resolved_hits.append((elem, scoperef))
return resolved_hits
def _hits_from_argument(self, elem, scoperef):
"""
Return hits for an argument of a function based on its caller
@param elem The argument; must have ilk=argument
@param scoperef The scope containing the element
@returns list of hits
"""
assert elem.get("ilk") == "argument", \
"_hits_from_argument expects an argument, got a %r" % elem.get(
"ilk")
hits = []
scope = self._elem_from_scoperef(
scoperef) # the function the argument is in
args = [arg for arg in scope.findall(
"variable") if arg.get("ilk") == "argument"]
for pos in range(len(args)):
if args[pos].get("name") == elem.get("name"):
break
else:
# can't find the argument?
return []
for caller in scope.getiterator("caller"):
citdl = caller.get("citdl")
caller_pos = int(caller.get("pos") or 0) # 1-indexed
if citdl is None or caller_pos < 1:
# invalid caller
continue
for caller_hit in self._hits_from_citdl(citdl, scoperef):
caller_func = caller_hit[0] # the calling function
if caller_func.get("ilk") != "function":
# nevermind, not a function
continue
caller_args = [arg for arg in caller_func.getiterator(
"variable") if arg.get("ilk") == "argument"]
if caller_pos > len(caller_args):
# no such argument
continue
caller_arg = caller_args[caller_pos - 1]
citdl = caller_arg.get("citdl")
if not citdl:
continue
for citdl_hit in self._hits_from_citdl(citdl, caller_hit[1]):
# got the function being called, now look up the argument
# by pos
func = citdl_hit[0]
if func.get("ilk") != "function":
continue
args = [arg for arg in func.getiterator(
"variable") if arg.get("ilk") == "argument"]
if pos >= len(args):
continue
citdl = args[pos].get("citdl")
if not citdl:
continue
hits += self._hits_from_citdl(citdl, citdl_hit[1])
return hits
def _hits_from_call(self, elem, scoperef):
"""Resolve the function call inference for 'elem' at 'scoperef'."""
if elem.tag == "variable":
hits = []
var_hits = self._hits_from_variable_type_inference(elem, scoperef)
for var_elem, var_scoperef in var_hits:
if var_elem != elem:
try:
hits += self._hits_from_call(var_elem, var_scoperef)
except CodeIntelError:
pass # Keep trying other alternatives
if not hits:
raise CodeIntelError("could not resolve call on %r." % elem)
return hits
if elem.get("ilk") == "class":
return [(elem, scoperef)]
if elem.get("ilk") != "function":
raise CodeIntelError("_hits_from_call:: unexpected element type %r"
% elem)
# CommonJS / NodeJS hack
if elem.get("name") == "require" and \
scoperef[0] is self.built_in_blob and \
not scoperef[1]:
try:
requirename = self.trg.extra.get("_params", []).pop(0)
except IndexError:
requirename = None
if requirename is not None:
import codeintel2.lang_javascript
requirename = codeintel2.lang_javascript.Utils.unquoteJsString(
requirename)
self.log("_hits_from_call: resolving CommonJS require(%r)",
requirename)
hits = self._hits_from_commonjs_require(requirename, scoperef)
if len(hits) > 0:
return hits
resolver = getattr(elem, "resolve", None)
try:
param = self.trg.extra.get("_params", []).pop(0)
except IndexError:
param = None
if resolver and param is not None:
try:
self.log("Attempting to use extra resolver %r param %r",
resolver, param)
hits = resolver(evlr=self, action="call", scoperef=scoperef,
param=param)
if hits:
return hits
except:
self.log("Extra resolver %r: Failed to resolve %s",
resolver, scoperef)
else:
self.log("_hits_from_call: no resolver on %r", elem)
citdl = elem.get("returns")
if not citdl:
raise CodeIntelError("no return type info for %r" % elem)
self.log("_hits_from_call: resolve '%s' for %r, scoperef: %r",
citdl, elem, scoperef)
# scoperef has to be set to the function called
scoperef = (scoperef[0], scoperef[1]+[elem.get("name")])
return self._hits_from_citdl(citdl, scoperef)
def _hit_from_getattr(self, elem, scoperef, token):
"""Resolve the getattr of 'token' on the given 'elem'.
Raises CodeIntelError if could not resolve it.
Algorithm:
- Try to resolve it.
- Call a hook to make an educated guess. Some attribute names
are strong signals as to the object type -- typically those
for common built-in classes.
"""
self.log("resolve getattr '%s' on %r in %r:", token, elem, scoperef)
if elem.tag == "variable":
hits = self._hits_from_variable_type_inference(elem, scoperef)
elif elem.tag == "scope" and elem.get("ilk") == "function":
# Functions have an implicit citdl type of "Function". Bug 80880.
hits = self._hits_from_type_inference("Function", scoperef)
else:
assert elem.tag == "scope", "elem tag is not 'scope': %r" % elem.tag
hits = [(elem, scoperef)]
for hit_elem, hit_scoperef in hits:
self.log("_hit_from_getattr:: hit elem %r, scoperef: %r",
hit_elem, hit_scoperef)
ilk = hit_elem.get("ilk")
if hit_elem.tag == "variable":
attr = hit_elem.names.get(token)
if attr is not None:
self.log("attr is %r on %r", attr, hit_elem)
var_scoperef = (hit_scoperef[0],
hit_scoperef[1]+[hit_elem.get("name")])
return (attr, var_scoperef)
elif ilk == "function":
return self._hit_from_getattr(hit_elem, hit_scoperef, token)
elif ilk == "class":
attr = hit_elem.names.get(token)
if attr is not None:
self.log("attr is %r on %r", attr, hit_elem)
if hit_scoperef:
class_scoperef = (hit_scoperef[0],
hit_scoperef[1]+[hit_elem.get("name")])
# If this is a variable defined in a class, move the
# scope to become the position in the class where the
# variable was defined (usually the ctor class function)
# this ensures we get the right citdl lookup. See bug:
# http://bugs.activestate.com/show_bug.cgi?id=71343
lineno = int(attr.get("line", "-1"))
if attr.tag == "variable" and \
lineno > int(hit_elem.get("line", "-1")) and \
lineno <= int(hit_elem.get("lineend", "-1")):
# get the scope of the variable
blob, lpath = self.buf.scoperef_from_blob_and_line(
hit_elem,
lineno)
if lpath:
class_scoperef = (class_scoperef[0],
class_scoperef[1]+lpath)
self.log(
"Updating scoperef to: %r", class_scoperef)
else:
class_scoperef = (None, [hit_elem.get("name")])
return (attr, class_scoperef)
for classref in hit_elem.get("classrefs", "").split():
try:
base_hits = self._hits_from_type_inference(classref,
hit_scoperef)
except CodeIntelError:
pass # Ignore when parent class not found, bug 65447
else:
for base_elem, base_scoperef in base_hits:
if token in base_elem.names:
self.log("is '%s' from %s base class? yes",
token, base_elem)
new_scoperef = (base_scoperef[0],
base_scoperef[1] +
[base_elem.get("name")])
return (base_elem.names[token], new_scoperef)
self.log("is '%s' from %s base class? no", token,
base_elem)
else:
raise NotImplementedError("unexpected scope ilk: %r" % ilk)
raise CodeIntelError("could not resolve '%s' getattr on %r in %r"
% (token, elem, scoperef))
def _hits_from_variable_type_inference(self, elem, scoperef):
"""Resolve the type inference for 'elem' at 'scoperef'."""
assert elem.tag == "variable"
hits = []
citdl = elem.get("citdl")
if citdl == "require()":
# Node.js / CommonJS hack: try to resolve things via require()
requirename = elem.get('required_library_name')
if requirename:
self.log(
"_hits_from_variable_type_inference: resolving require(%r)",
requirename)
hits += self._hits_from_commonjs_require(requirename, scoperef)
if len(elem) != 0:
# This is CIX for a JavaScript custom Object instance: a
# common pattern in JS. See test javascript/cpln/local2.
# remember to also return things from require()
return hits + [(elem, scoperef)]
if not citdl:
raise CodeIntelError("no type-inference info for %r" % elem)
self.log("resolve '%s' type inference for %r:", citdl, elem)
if citdl == elem.get("name") and citdl not in elem.names:
# The citdl expression is the same as the variable name, this will
# create a recursive citdl lookup loop. What we likely want is a
# different match that has the same name, so we go looking for it.
# Fix for bug: http://bugs.activestate.com/show_bug.cgi?id=71666
self.log("_hits_from_variable_type_inference:: recursive citdl "
" expression found, trying alternatives.")
try:
parent_elem = self._elem_from_scoperef(scoperef)
except KeyError as ex:
raise CodeIntelError(
"could not resolve recursive citdl expression %r" % citdl)
else:
alt_hits = []
# Look for alternative non-variable matches.
for child in parent_elem:
if child.tag != "variable" and child.get("name") == citdl:
alt_hits.append((child, scoperef))
# Remember the alternative hit, in case we need to
# look up this lpath again.
if self._alt_elem_from_scoperef is None:
self._alt_elem_from_scoperef = {}
alt_sref_name = ".".join(scoperef[1] + [citdl])
self._alt_elem_from_scoperef[alt_sref_name] = child
self.log(
"Alternative hit found: %r, scoperef: %r", child, scoperef, )
if alt_hits:
return alt_hits
# Try from the parent scoperef then.
scoperef = self.parent_scoperef_from_scoperef(scoperef)
if scoperef is None:
# When we run out of scope, raise an error
raise CodeIntelError(
"could not resolve recursive citdl expression %r" % citdl)
# Continue looking using _hits_from_citdl with the parent.
self.log(
"Continue search for %r from the parent scope.", citdl)
try:
hits += self._hits_from_citdl(citdl, scoperef)
except EvalError:
# shut up eval errors if we managed to get _some_ hits
if not hits:
raise
return hits
def _hits_from_type_inference(self, citdl, scoperef):
"""Resolve the 'citdl' type inference at 'scoperef'."""
self.log("resolve '%s' type inference:", citdl)
return self._hits_from_citdl(citdl, scoperef)
def _hits_from_first_part(self, tokens, scoperef):
"""Resolve the first part of the expression.
If the first token is found at the global or built-in level (or
not found at all locally) then it may be a shared namespace with
other files in the execution set. Get that down to a list of
hits and a remaining list of expression tokens.
"""
elem, scoperef = self._hit_from_first_token(tokens[0], scoperef)
if elem is not None:
self.log("_hit_from_first_part: found elem: %s %r at %r",
elem.get("ilk") or elem.tag, elem.get("name"),
scoperef[1])
if (elem is None # first token wasn't found
or not scoperef[1] # first token was found at global level
# first token was found on built-in Window class (the top scope)
or (scoperef[1] == ['Window'] and scoperef[0].get("name") == "*")
):
# Search symbol table in execution set.
#
# Example: 'myPet.name.toLowerCase()' and 'myPet' is found
# at top-level. First lookup 'myPet.name.toLowerCase'
# (everything up to first '()'), in execution set, then
# 'myPet.name', then 'myPet'. The last one should always hit
# in current file, at least.
for first_call_idx, token in enumerate(tokens):
if token == "()":
break
else:
first_call_idx = len(tokens)
hits = []
for nconsumed in range(first_call_idx, 0, -1):
lpath = tuple(tokens[:nconsumed]) # for hits_from_lpath()
if elem is not None and len(lpath) > 1:
# Try at the current elem we found in the file
try:
self.log("Checking for deeper local match %r from scoperef %r", lpath[
1:], scoperef)
check_elem = elem
for p in lpath[1:]: # we matched first token already
check_elem = check_elem.names[p]
check_scoperef = (scoperef[
0], scoperef[1] + list(lpath[:-1]))
hits.insert(0, (check_elem,
check_scoperef))
self.log("_hit_from_first_part: found deeper local elem: "
"%s %r at %r",
check_elem.get("ilk") or check_elem.tag,
check_elem.get("name"),
check_scoperef[1])
except KeyError:
pass
for lib in self.libs:
self.log("lookup '%s' in %s", '.'.join(lpath), lib)
hits_here = lib.hits_from_lpath(lpath, self.ctlr,
curr_buf=self.buf)
if hits_here:
self.log("found %d hits in lib", len(hits_here))
hits += hits_here
if hits:
break
if elem is not None:
if not hits or nconsumed == 1:
hits.insert(0, (elem, scoperef))
nconsumed = 1
else:
# Hits were found in the libs that are deeper than
# the hit in the local buf: we need to adjust the
# local hit.
new_elem = elem
for token in tokens[1:nconsumed]:
try:
new_elem = new_elem.names[token]
except KeyError:
break
else:
if new_elem not in (e for e, sr in hits):
new_scoperef = (scoperef[0], tokens[:nconsumed-1])
hits.insert(0, (new_elem, new_scoperef))
else:
hits = [(elem, scoperef)]
nconsumed = 1
return hits, nconsumed
def _hits_from_commonjs_require(self, requirename, scoperef):
"""Resolve hits from a CommonJS require() invocation"""
# Files usually end with a ".js" suffix, though others are like
# ".node" are possible.
#
# TODO: Get these from node using "require.extensions".
requirename += ".js"
from codeintel2.database.langlib import LangDirsLib
from codeintel2.database.multilanglib import MultiLangDirsLib
from codeintel2.database.catalog import CatalogLib
hits = []
for lib in self.libs:
blobs = None
if isinstance(lib, (LangDirsLib, MultiLangDirsLib)):
blobs = lib.blobs_with_basename(requirename, ctlr=self.ctlr)
elif isinstance(lib, CatalogLib):
blob = lib.get_blob(requirename)
if blob is not None:
blobs = [blob]
for blob in blobs or []:
exports = blob.names.get("exports")
if exports is not None and exports.tag == "variable":
hits += self._hits_from_variable_type_inference(
exports, [blob, ["exports"]])
else:
self.log(
"Exported exports to be a variable, got %r instead", exports)
return hits
## n-char trigger completions ##
def _completion_names_from_scope(self, expr, scoperef):
"""Return all available element names beginning with expr"""
self.log("_completion_names_from_scope:: %r, scoperef: %r",
expr, scoperef)
# global_blob = self._elem_from_scoperef(self._get_global_scoperef(scoperef))
# Get all of the imports
# Keep a dictionary of completions.
all_completions = {}
# We start off having JS keywords at a bare minimum.
keywords = self.langintel.langinfo.keywords
for name in keywords:
if not expr or name.startswith(expr):
all_completions[name] = "keyword"
# From the local scope, walk up the parent chain including matches as
# we go.
# XXX - Can we skip the global (stdlib) blob in here?
loopcount = -1
while scoperef and scoperef[0] is not None:
loopcount += 1
# Iterate over the contents of the scope.
self.log("_completion_names_from_scope:: checking scoperef: %r",
scoperef)
elem = self._elem_from_scoperef(scoperef)
if elem is None:
continue
for name in elem.names:
# self.log("_completion_names_from_scope:: checking name: %r",
# name)
if name and name.startswith(expr):
if name not in all_completions:
hit_elem = elem.names[name]
if loopcount and "__local__" in hit_elem.get("attributes", "").split():
# Skip things that should only be local to the
# original scope.
# self.log("_completion_names_from_scope:: skipping local %r",
# name)
continue
all_completions[name] = hit_elem.get(
"ilk") or hit_elem.tag
# Continue walking up the scope chain...
scoperef = self.parent_scoperef_from_scoperef(scoperef)
# Builtins
# Find the matching names (or all names if no expr)
cplns = self.stdlib.toplevel_cplns(prefix=expr)
for ilk, name in cplns:
if name not in all_completions:
all_completions[name] = ilk
# "Import everything", iterate over all known libs
for lib in self.libs:
# Find the matching names (or all names if no expr)
self.log("_completion_names_from_scope:: include everything from "
"lib: %r", lib)
cplns = lib.toplevel_cplns(prefix=expr)
for ilk, name in cplns:
if name not in all_completions:
all_completions[name] = ilk
return [(ilk, name) for name, ilk in list(all_completions.items())]
|
mit
|
epssy/hue
|
desktop/core/ext-py/python-daemon/daemon/daemon.py
|
42
|
24728
|
# -*- coding: utf-8 -*-
# daemon/daemon.py
# Part of python-daemon, an implementation of PEP 3143.
#
# Copyright © 2008–2009 Ben Finney <[email protected]>
# Copyright © 2007–2008 Robert Niederreiter, Jens Klein
# Copyright © 2004–2005 Chad J. Schroeder
# Copyright © 2003 Clark Evans
# Copyright © 2002 Noah Spurrier
# Copyright © 2001 Jürgen Hermann
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Daemon process behaviour.
"""
import os
import sys
import resource
import errno
import signal
import socket
import atexit
class DaemonError(Exception):
""" Base exception class for errors from this module. """
class DaemonOSEnvironmentError(DaemonError, OSError):
""" Exception raised when daemon OS environment setup receives error. """
class DaemonProcessDetachError(DaemonError, OSError):
""" Exception raised when process detach fails. """
class DaemonContext(object):
""" Context for turning the current program into a daemon process.
A `DaemonContext` instance represents the behaviour settings and
process context for the program when it becomes a daemon. The
behaviour and environment is customised by setting options on the
instance, before calling the `open` method.
Each option can be passed as a keyword argument to the `DaemonContext`
constructor, or subsequently altered by assigning to an attribute on
the instance at any time prior to calling `open`. That is, for
options named `wibble` and `wubble`, the following invocation::
foo = daemon.DaemonContext(wibble=bar, wubble=baz)
foo.open()
is equivalent to::
foo = daemon.DaemonContext()
foo.wibble = bar
foo.wubble = baz
foo.open()
The following options are defined.
`files_preserve`
:Default: ``None``
List of files that should *not* be closed when starting the
daemon. If ``None``, all open file descriptors will be closed.
Elements of the list are file descriptors (as returned by a file
object's `fileno()` method) or Python `file` objects. Each
specifies a file that is not to be closed during daemon start.
`chroot_directory`
:Default: ``None``
Full path to a directory to set as the effective root directory of
the process. If ``None``, specifies that the root directory is not
to be changed.
`working_directory`
:Default: ``'/'``
Full path of the working directory to which the process should
change on daemon start.
Since a filesystem cannot be unmounted if a process has its
current working directory on that filesystem, this should either
be left at default or set to a directory that is a sensible “home
directory” for the daemon while it is running.
`umask`
:Default: ``0``
File access creation mask (“umask”) to set for the process on
daemon start.
Since a process inherits its umask from its parent process,
starting the daemon will reset the umask to this value so that
files are created by the daemon with access modes as it expects.
`pidfile`
:Default: ``None``
Context manager for a PID lock file. When the daemon context opens
and closes, it enters and exits the `pidfile` context manager.
`detach_process`
:Default: ``None``
If ``True``, detach the process context when opening the daemon
context; if ``False``, do not detach.
If unspecified (``None``) during initialisation of the instance,
this will be set to ``True`` by default, and ``False`` only if
detaching the process is determined to be redundant; for example,
in the case when the process was started by `init`, by `initd`, or
by `inetd`.
`signal_map`
:Default: system-dependent
Mapping from operating system signals to callback actions.
The mapping is used when the daemon context opens, and determines
the action for each signal's signal handler:
* A value of ``None`` will ignore the signal (by setting the
signal action to ``signal.SIG_IGN``).
* A string value will be used as the name of an attribute on the
``DaemonContext`` instance. The attribute's value will be used
as the action for the signal handler.
* Any other value will be used as the action for the
signal handler. See the ``signal.signal`` documentation
for details of the signal handler interface.
The default value depends on which signals are defined on the
running system. Each item from the list below whose signal is
actually defined in the ``signal`` module will appear in the
default map:
* ``signal.SIGTTIN``: ``None``
* ``signal.SIGTTOU``: ``None``
* ``signal.SIGTSTP``: ``None``
* ``signal.SIGTERM``: ``'terminate'``
Depending on how the program will interact with its child
processes, it may need to specify a signal map that
includes the ``signal.SIGCHLD`` signal (received when a
child process exits). See the specific operating system's
documentation for more detail on how to determine what
circumstances dictate the need for signal handlers.
`uid`
:Default: ``os.getuid()``
`gid`
:Default: ``os.getgid()``
The user ID (“UID”) value and group ID (“GID”) value to switch
the process to on daemon start.
The default values, the real UID and GID of the process, will
relinquish any effective privilege elevation inherited by the
process.
`prevent_core`
:Default: ``True``
If true, prevents the generation of core files, in order to avoid
leaking sensitive information from daemons run as `root`.
`stdin`
:Default: ``None``
`stdout`
:Default: ``None``
`stderr`
:Default: ``None``
Each of `stdin`, `stdout`, and `stderr` is a file-like object
which will be used as the new file for the standard I/O stream
`sys.stdin`, `sys.stdout`, and `sys.stderr` respectively. The file
should therefore be open, with a minimum of mode 'r' in the case
of `stdin`, and mode 'w+' in the case of `stdout` and `stderr`.
If the object has a `fileno()` method that returns a file
descriptor, the corresponding file will be excluded from being
closed during daemon start (that is, it will be treated as though
it were listed in `files_preserve`).
If ``None``, the corresponding system stream is re-bound to the
file named by `os.devnull`.
"""
def __init__(
self,
chroot_directory=None,
working_directory='/',
umask=0,
uid=None,
gid=None,
detach_process=None,
files_preserve=None,
pidfile=None,
stdin=None,
stdout=None,
stderr=None,
signal_map=None,
):
""" Set up a new instance. """
self.chroot_directory = chroot_directory
self.working_directory = working_directory
self.umask = umask
self.files_preserve = files_preserve
self.pidfile = pidfile
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
if uid is None:
uid = os.getuid()
self.uid = uid
if gid is None:
gid = os.getgid()
self.gid = gid
if detach_process is None:
detach_process = is_detach_process_context_required()
self.detach_process = detach_process
if signal_map is None:
signal_map = make_default_signal_map()
self.signal_map = signal_map
self._is_open = False
@property
def is_open(self):
""" ``True`` if the instance is currently open. """
return self._is_open
def open(self):
""" Become a daemon process.
:Return: ``None``
Open the daemon context, turning the current program into a daemon
process. This performs the following steps:
* If this instance's `is_open` property is true, return
immediately. This makes it safe to call `open` multiple times on
an instance.
* If the `prevent_core` attribute is true, set the resource limits
for the process to prevent any core dump from the process.
* If the `chroot_directory` attribute is not ``None``, set the
effective root directory of the process to that directory (via
`os.chroot`).
This allows running the daemon process inside a “chroot gaol”
as a means of limiting the system's exposure to rogue behaviour
by the process. Note that the specified directory needs to
already be set up for this purpose.
* Set the process UID and GID to the `uid` and `gid` attribute
values.
* Close all open file descriptors. This excludes those listed in
the `files_preserve` attribute, and those that correspond to the
`stdin`, `stdout`, or `stderr` attributes.
* Change current working directory to the path specified by the
`working_directory` attribute.
* Reset the file access creation mask to the value specified by
the `umask` attribute.
* If the `detach_process` option is true, detach the current
process into its own process group, and disassociate from any
controlling terminal.
* Set signal handlers as specified by the `signal_map` attribute.
* If any of the attributes `stdin`, `stdout`, `stderr` are not
``None``, bind the system streams `sys.stdin`, `sys.stdout`,
and/or `sys.stderr` to the files represented by the
corresponding attributes. Where the attribute has a file
descriptor, the descriptor is duplicated (instead of re-binding
the name).
* If the `pidfile` attribute is not ``None``, enter its context
manager.
* Mark this instance as open (for the purpose of future `open` and
`close` calls).
* Register the `close` method to be called during Python's exit
processing.
When the function returns, the running program is a daemon
process.
"""
if self.is_open:
return
if self.chroot_directory is not None:
change_root_directory(self.chroot_directory)
prevent_core_dump()
change_file_creation_mask(self.umask)
change_working_directory(self.working_directory)
change_process_owner(self.uid, self.gid)
if self.detach_process:
detach_process_context()
signal_handler_map = self._make_signal_handler_map()
set_signal_handlers(signal_handler_map)
exclude_fds = self._get_exclude_file_descriptors()
close_all_open_files(exclude=exclude_fds)
redirect_stream(sys.stdin, self.stdin)
redirect_stream(sys.stdout, self.stdout)
redirect_stream(sys.stderr, self.stderr)
if self.pidfile is not None:
self.pidfile.__enter__()
self._is_open = True
register_atexit_function(self.close)
def __enter__(self):
""" Context manager entry point. """
self.open()
return self
def close(self):
""" Exit the daemon process context.
:Return: ``None``
Close the daemon context. This performs the following steps:
* If this instance's `is_open` property is false, return
immediately. This makes it safe to call `close` multiple times
on an instance.
* If the `pidfile` attribute is not ``None``, exit its context
manager.
* Mark this instance as closed (for the purpose of future `open`
and `close` calls).
"""
if not self.is_open:
return
if self.pidfile is not None:
self.pidfile.__exit__()
self._is_open = False
def __exit__(self, exc_type, exc_value, traceback):
""" Context manager exit point. """
self.close()
def terminate(self, signal_number, stack_frame):
""" Signal handler for end-process signals.
:Return: ``None``
Signal handler for the ``signal.SIGTERM`` signal. Performs the
following step:
* Raise a ``SystemExit`` exception explaining the signal.
"""
exception = SystemExit(
"Terminating on signal %(signal_number)r"
% vars())
raise exception
def _get_exclude_file_descriptors(self):
""" Return the set of file descriptors to exclude closing.
Returns a set containing the file descriptors for the
items in `files_preserve`, and also each of `stdin`,
`stdout`, and `stderr`:
* If the item is ``None``, it is omitted from the return
set.
* If the item has a ``fileno()`` method, that method's
return value is in the return set.
* Otherwise, the item is in the return set verbatim.
"""
files_preserve = self.files_preserve
if files_preserve is None:
files_preserve = []
files_preserve.extend(
item for item in [self.stdin, self.stdout, self.stderr]
if hasattr(item, 'fileno'))
exclude_descriptors = set()
for item in files_preserve:
if item is None:
continue
if hasattr(item, 'fileno'):
exclude_descriptors.add(item.fileno())
else:
exclude_descriptors.add(item)
return exclude_descriptors
def _make_signal_handler(self, target):
""" Make the signal handler for a specified target object.
If `target` is ``None``, returns ``signal.SIG_IGN``. If
`target` is a string, returns the attribute of this
instance named by that string. Otherwise, returns `target`
itself.
"""
if target is None:
result = signal.SIG_IGN
elif isinstance(target, basestring):
name = target
result = getattr(self, name)
else:
result = target
return result
def _make_signal_handler_map(self):
""" Make the map from signals to handlers for this instance.
Constructs a map from signal numbers to handlers for this
context instance, suitable for passing to
`set_signal_handlers`.
"""
signal_handler_map = dict(
(signal_number, self._make_signal_handler(target))
for (signal_number, target) in self.signal_map.items())
return signal_handler_map
def change_working_directory(directory):
""" Change the working directory of this process.
"""
try:
os.chdir(directory)
except Exception, exc:
error = DaemonOSEnvironmentError(
"Unable to change working directory (%(exc)s)"
% vars())
raise error
def change_root_directory(directory):
""" Change the root directory of this process.
Sets the current working directory, then the process root
directory, to the specified `directory`. Requires appropriate
OS privileges for this process.
"""
try:
os.chdir(directory)
os.chroot(directory)
except Exception, exc:
error = DaemonOSEnvironmentError(
"Unable to change root directory (%(exc)s)"
% vars())
raise error
def change_file_creation_mask(mask):
""" Change the file creation mask for this process.
"""
try:
os.umask(mask)
except Exception, exc:
error = DaemonOSEnvironmentError(
"Unable to change file creation mask (%(exc)s)"
% vars())
raise error
def change_process_owner(uid, gid):
""" Change the owning UID and GID of this process.
Sets the GID then the UID of the process (in that order, to
avoid permission errors) to the specified `gid` and `uid`
values. Requires appropriate OS privileges for this process.
"""
try:
os.setgid(gid)
os.setuid(uid)
except Exception, exc:
error = DaemonOSEnvironmentError(
"Unable to change file creation mask (%(exc)s)"
% vars())
raise error
def prevent_core_dump():
""" Prevent this process from generating a core dump.
Sets the soft and hard limits for core dump size to zero. On
Unix, this prevents the process from creating core dump
altogether.
"""
core_resource = resource.RLIMIT_CORE
try:
# Ensure the resource limit exists on this platform, by requesting
# its current value
core_limit_prev = resource.getrlimit(core_resource)
except ValueError, exc:
error = DaemonOSEnvironmentError(
"System does not support RLIMIT_CORE resource limit (%(exc)s)"
% vars())
raise error
# Set hard and soft limits to zero, i.e. no core dump at all
core_limit = (0, 0)
resource.setrlimit(core_resource, core_limit)
def detach_process_context():
""" Detach the process context from parent and session.
Detach from the parent process and session group, allowing the
parent to exit while this process continues running.
Reference: “Advanced Programming in the Unix Environment”,
section 13.3, by W. Richard Stevens, published 1993 by
Addison-Wesley.
"""
def fork_then_exit_parent(error_message):
""" Fork a child process, then exit the parent process.
If the fork fails, raise a ``DaemonProcessDetachError``
with ``error_message``.
"""
try:
pid = os.fork()
if pid > 0:
os._exit(0)
except OSError, exc:
exc_errno = exc.errno
exc_strerror = exc.strerror
error = DaemonProcessDetachError(
"%(error_message)s: [%(exc_errno)d] %(exc_strerror)s" % vars())
raise error
fork_then_exit_parent(error_message="Failed first fork")
os.setsid()
fork_then_exit_parent(error_message="Failed second fork")
def is_process_started_by_init():
""" Determine if the current process is started by `init`.
The `init` process has the process ID of 1; if that is our
parent process ID, return ``True``, otherwise ``False``.
"""
result = False
init_pid = 1
if os.getppid() == init_pid:
result = True
return result
def is_socket(fd):
""" Determine if the file descriptor is a socket.
Return ``False`` if querying the socket type of `fd` raises an
error; otherwise return ``True``.
"""
result = False
file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)
try:
socket_type = file_socket.getsockopt(
socket.SOL_SOCKET, socket.SO_TYPE)
except socket.error, exc:
exc_errno = exc.args[0]
if exc_errno == errno.ENOTSOCK:
# Socket operation on non-socket
pass
else:
# Some other socket error
result = True
else:
# No error getting socket type
result = True
return result
def is_process_started_by_superserver():
""" Determine if the current process is started by the superserver.
The internet superserver creates a network socket, and
attaches it to the standard streams of the child process. If
that is the case for this process, return ``True``, otherwise
``False``.
"""
result = False
stdin_fd = sys.__stdin__.fileno()
if is_socket(stdin_fd):
result = True
return result
def is_detach_process_context_required():
""" Determine whether detaching process context is required.
Return ``True`` if the process environment indicates the
process is already detached:
* Process was started by `init`; or
* Process was started by `inetd`.
"""
result = True
if is_process_started_by_init() or is_process_started_by_superserver():
result = False
return result
def close_file_descriptor_if_open(fd):
""" Close a file descriptor if already open.
Close the file descriptor `fd`, suppressing an error in the
case the file was not open.
"""
try:
os.close(fd)
except OSError, exc:
if exc.errno == errno.EBADF:
# File descriptor was not open
pass
else:
error = DaemonOSEnvironmentError(
"Failed to close file descriptor %(fd)d"
" (%(exc)s)"
% vars())
raise error
MAXFD = 2048
def get_maximum_file_descriptors():
""" Return the maximum number of open file descriptors for this process.
Return the process hard resource limit of maximum number of
open file descriptors. If the limit is “infinity”, a default
value of ``MAXFD`` is returned.
"""
limits = resource.getrlimit(resource.RLIMIT_NOFILE)
result = limits[1]
if result == resource.RLIM_INFINITY:
result = MAXFD
return result
def close_all_open_files(exclude=set()):
""" Close all open file descriptors.
Closes every file descriptor (if open) of this process. If
specified, `exclude` is a set of file descriptors to *not*
close.
"""
maxfd = get_maximum_file_descriptors()
for fd in reversed(range(maxfd)):
if fd not in exclude:
close_file_descriptor_if_open(fd)
def redirect_stream(system_stream, target_stream):
""" Redirect a system stream to a specified file.
`system_stream` is a standard system stream such as
``sys.stdout``. `target_stream` is an open file object that
should replace the corresponding system stream object.
If `target_stream` is ``None``, defaults to opening the
operating system's null device and using its file descriptor.
"""
if target_stream is None:
target_fd = os.open(os.devnull, os.O_RDWR)
else:
target_fd = target_stream.fileno()
os.dup2(target_fd, system_stream.fileno())
def make_default_signal_map():
""" Make the default signal map for this system.
The signals available differ by system. The map will not
contain any signals not defined on the running system.
"""
name_map = {
'SIGTSTP': None,
'SIGTTIN': None,
'SIGTTOU': None,
'SIGTERM': 'terminate',
}
signal_map = dict(
(getattr(signal, name), target)
for (name, target) in name_map.items()
if hasattr(signal, name))
return signal_map
def set_signal_handlers(signal_handler_map):
""" Set the signal handlers as specified.
The `signal_handler_map` argument is a map from signal number
to signal handler. See the `signal` module for details.
"""
for (signal_number, handler) in signal_handler_map.items():
signal.signal(signal_number, handler)
def register_atexit_function(func):
""" Register a function for processing at program exit.
The function `func` is registered for a call with no arguments
at program exit.
"""
atexit.register(func)
|
apache-2.0
|
dbertha/odoo
|
addons/survey/__openerp__.py
|
261
|
2391
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Survey',
'version': '2.0',
'category': 'Marketing',
'description': """
Create beautiful web surveys and visualize answers
==================================================
It depends on the answers or reviews of some questions by different users. A
survey may have multiple pages. Each page may contain multiple questions and
each question may have multiple answers. Different users may give different
answers of question and according to that survey is done. Partners are also
sent mails with personal token for the invitation of the survey.
""",
'summary': 'Create surveys, collect answers and print statistics',
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/survey',
'depends': ['email_template', 'mail', 'website', 'marketing'],
'data': [
'security/survey_security.xml',
'security/ir.model.access.csv',
'views/survey_views.xml',
'views/survey_templates.xml',
'views/survey_result.xml',
'wizard/survey_email_compose_message.xml',
'data/survey_stages.xml',
'data/survey_cron.xml'
],
'demo': ['data/survey_demo_user.xml',
'data/survey_demo_feedback.xml',
'data/survey.user_input.csv',
'data/survey.user_input_line.csv'],
'installable': True,
'auto_install': False,
'application': True,
'sequence': 10,
}
|
agpl-3.0
|
MoritzS/django
|
tests/backends/test_postgresql.py
|
21
|
2121
|
import unittest
from collections import namedtuple
from django.db import connection
from django.test import TestCase
from .models import Person
@unittest.skipUnless(connection.vendor == 'postgresql', "Test only for PostgreSQL")
class ServerSideCursorsPostgres(TestCase):
cursor_fields = 'name, statement, is_holdable, is_binary, is_scrollable, creation_time'
PostgresCursor = namedtuple('PostgresCursor', cursor_fields)
@classmethod
def setUpTestData(cls):
Person.objects.create(first_name='a', last_name='a')
Person.objects.create(first_name='b', last_name='b')
def inspect_cursors(self):
with connection.cursor() as cursor:
cursor.execute('SELECT {fields} FROM pg_cursors;'.format(fields=self.cursor_fields))
cursors = cursor.fetchall()
return [self.PostgresCursor._make(cursor) for cursor in cursors]
def test_server_side_cursor(self):
persons = Person.objects.iterator()
next(persons) # Open a server-side cursor
cursors = self.inspect_cursors()
self.assertEqual(len(cursors), 1)
self.assertIn('_django_curs_', cursors[0].name)
self.assertFalse(cursors[0].is_scrollable)
self.assertFalse(cursors[0].is_holdable)
self.assertFalse(cursors[0].is_binary)
def test_server_side_cursor_many_cursors(self):
persons = Person.objects.iterator()
persons2 = Person.objects.iterator()
next(persons) # Open a server-side cursor
next(persons2) # Open a second server-side cursor
cursors = self.inspect_cursors()
self.assertEqual(len(cursors), 2)
for cursor in cursors:
self.assertIn('_django_curs_', cursor.name)
self.assertFalse(cursor.is_scrollable)
self.assertFalse(cursor.is_holdable)
self.assertFalse(cursor.is_binary)
def test_closed_server_side_cursor(self):
persons = Person.objects.iterator()
next(persons) # Open a server-side cursor
del persons
cursors = self.inspect_cursors()
self.assertEqual(len(cursors), 0)
|
bsd-3-clause
|
40023256/2015cdag1man
|
static/Brython3.1.0-20150301-090019/Lib/xml/etree/ElementTree.py
|
730
|
61800
|
#
# ElementTree
# $Id: ElementTree.py 3440 2008-07-18 14:45:01Z fredrik $
#
# light-weight XML support for Python 2.3 and later.
#
# history (since 1.2.6):
# 2005-11-12 fl added tostringlist/fromstringlist helpers
# 2006-07-05 fl merged in selected changes from the 1.3 sandbox
# 2006-07-05 fl removed support for 2.1 and earlier
# 2007-06-21 fl added deprecation/future warnings
# 2007-08-25 fl added doctype hook, added parser version attribute etc
# 2007-08-26 fl added new serializer code (better namespace handling, etc)
# 2007-08-27 fl warn for broken /tag searches on tree level
# 2007-09-02 fl added html/text methods to serializer (experimental)
# 2007-09-05 fl added method argument to tostring/tostringlist
# 2007-09-06 fl improved error handling
# 2007-09-13 fl added itertext, iterfind; assorted cleanups
# 2007-12-15 fl added C14N hooks, copy method (experimental)
#
# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
__all__ = [
# public symbols
"Comment",
"dump",
"Element", "ElementTree",
"fromstring", "fromstringlist",
"iselement", "iterparse",
"parse", "ParseError",
"PI", "ProcessingInstruction",
"QName",
"SubElement",
"tostring", "tostringlist",
"TreeBuilder",
"VERSION",
"XML", "XMLID",
"XMLParser", "XMLTreeBuilder",
"register_namespace",
]
VERSION = "1.3.0"
##
# The <b>Element</b> type is a flexible container object, designed to
# store hierarchical data structures in memory. The type can be
# described as a cross between a list and a dictionary.
# <p>
# Each element has a number of properties associated with it:
# <ul>
# <li>a <i>tag</i>. This is a string identifying what kind of data
# this element represents (the element type, in other words).</li>
# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
# <li>a <i>text</i> string.</li>
# <li>an optional <i>tail</i> string.</li>
# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
# </ul>
#
# To create an element instance, use the {@link #Element} constructor
# or the {@link #SubElement} factory function.
# <p>
# The {@link #ElementTree} class can be used to wrap an element
# structure, and convert it from and to XML.
##
import sys
import re
import warnings
import io
import contextlib
from . import ElementPath
##
# Parser error. This is a subclass of <b>SyntaxError</b>.
# <p>
# In addition to the exception value, an exception instance contains a
# specific exception code in the <b>code</b> attribute, and the line and
# column of the error in the <b>position</b> attribute.
class ParseError(SyntaxError):
pass
# --------------------------------------------------------------------
##
# Checks if an object appears to be a valid element object.
#
# @param An element instance.
# @return A true value if this is an element object.
# @defreturn flag
def iselement(element):
# FIXME: not sure about this;
# isinstance(element, Element) or look for tag/attrib/text attributes
return hasattr(element, 'tag')
##
# Element class. This class defines the Element interface, and
# provides a reference implementation of this interface.
# <p>
# The element name, attribute names, and attribute values can be
# either ASCII strings (ordinary Python strings containing only 7-bit
# ASCII characters) or Unicode strings.
#
# @param tag The element name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @see Element
# @see SubElement
# @see Comment
# @see ProcessingInstruction
class Element:
# <tag attrib>text<child/>...</tag>tail
##
# (Attribute) Element tag.
tag = None
##
# (Attribute) Element attribute dictionary. Where possible, use
# {@link #Element.get},
# {@link #Element.set},
# {@link #Element.keys}, and
# {@link #Element.items} to access
# element attributes.
attrib = None
##
# (Attribute) Text before first subelement. This is either a
# string or the value None. Note that if there was no text, this
# attribute may be either None or an empty string, depending on
# the parser.
text = None
##
# (Attribute) Text after this element's end tag, but before the
# next sibling element's start tag. This is either a string or
# the value None. Note that if there was no text, this attribute
# may be either None or an empty string, depending on the parser.
tail = None # text after end tag, if any
# constructor
def __init__(self, tag, attrib={}, **extra):
if not isinstance(attrib, dict):
raise TypeError("attrib must be dict, not %s" % (
attrib.__class__.__name__,))
attrib = attrib.copy()
attrib.update(extra)
self.tag = tag
self.attrib = attrib
self._children = []
def __repr__(self):
return "<Element %s at 0x%x>" % (repr(self.tag), id(self))
##
# Creates a new element object of the same type as this element.
#
# @param tag Element tag.
# @param attrib Element attributes, given as a dictionary.
# @return A new element instance.
def makeelement(self, tag, attrib):
return self.__class__(tag, attrib)
##
# (Experimental) Copies the current element. This creates a
# shallow copy; subelements will be shared with the original tree.
#
# @return A new element instance.
def copy(self):
elem = self.makeelement(self.tag, self.attrib)
elem.text = self.text
elem.tail = self.tail
elem[:] = self
return elem
##
# Returns the number of subelements. Note that this only counts
# full elements; to check if there's any content in an element, you
# have to check both the length and the <b>text</b> attribute.
#
# @return The number of subelements.
def __len__(self):
return len(self._children)
def __bool__(self):
warnings.warn(
"The behavior of this method will change in future versions. "
"Use specific 'len(elem)' or 'elem is not None' test instead.",
FutureWarning, stacklevel=2
)
return len(self._children) != 0 # emulate old behaviour, for now
##
# Returns the given subelement, by index.
#
# @param index What subelement to return.
# @return The given subelement.
# @exception IndexError If the given element does not exist.
def __getitem__(self, index):
return self._children[index]
##
# Replaces the given subelement, by index.
#
# @param index What subelement to replace.
# @param element The new element value.
# @exception IndexError If the given element does not exist.
def __setitem__(self, index, element):
# if isinstance(index, slice):
# for elt in element:
# assert iselement(elt)
# else:
# assert iselement(element)
self._children[index] = element
##
# Deletes the given subelement, by index.
#
# @param index What subelement to delete.
# @exception IndexError If the given element does not exist.
def __delitem__(self, index):
del self._children[index]
##
# Adds a subelement to the end of this element. In document order,
# the new element will appear after the last existing subelement (or
# directly after the text, if it's the first subelement), but before
# the end tag for this element.
#
# @param element The element to add.
def append(self, element):
self._assert_is_element(element)
self._children.append(element)
##
# Appends subelements from a sequence.
#
# @param elements A sequence object with zero or more elements.
# @since 1.3
def extend(self, elements):
for element in elements:
self._assert_is_element(element)
self._children.extend(elements)
##
# Inserts a subelement at the given position in this element.
#
# @param index Where to insert the new subelement.
def insert(self, index, element):
self._assert_is_element(element)
self._children.insert(index, element)
def _assert_is_element(self, e):
# Need to refer to the actual Python implementation, not the
# shadowing C implementation.
if not isinstance(e, _Element):
raise TypeError('expected an Element, not %s' % type(e).__name__)
##
# Removes a matching subelement. Unlike the <b>find</b> methods,
# this method compares elements based on identity, not on tag
# value or contents. To remove subelements by other means, the
# easiest way is often to use a list comprehension to select what
# elements to keep, and use slice assignment to update the parent
# element.
#
# @param element What element to remove.
# @exception ValueError If a matching element could not be found.
def remove(self, element):
# assert iselement(element)
self._children.remove(element)
##
# (Deprecated) Returns all subelements. The elements are returned
# in document order.
#
# @return A list of subelements.
# @defreturn list of Element instances
def getchildren(self):
warnings.warn(
"This method will be removed in future versions. "
"Use 'list(elem)' or iteration over elem instead.",
DeprecationWarning, stacklevel=2
)
return self._children
##
# Finds the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path, namespaces=None):
return ElementPath.find(self, path, namespaces)
##
# Finds text for the first matching subelement, by tag name or path.
#
# @param path What element to look for.
# @param default What to return if the element was not found.
# @keyparam namespaces Optional namespace prefix map.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None, namespaces=None):
return ElementPath.findtext(self, path, default, namespaces)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return A list or other sequence containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path, namespaces=None):
return ElementPath.findall(self, path, namespaces)
##
# Finds all matching subelements, by tag name or path.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return An iterator or sequence containing all matching elements,
# in document order.
# @defreturn a generated sequence of Element instances
def iterfind(self, path, namespaces=None):
return ElementPath.iterfind(self, path, namespaces)
##
# Resets an element. This function removes all subelements, clears
# all attributes, and sets the <b>text</b> and <b>tail</b> attributes
# to None.
def clear(self):
self.attrib.clear()
self._children = []
self.text = self.tail = None
##
# Gets an element attribute. Equivalent to <b>attrib.get</b>, but
# some implementations may handle this a bit more efficiently.
#
# @param key What attribute to look for.
# @param default What to return if the attribute was not found.
# @return The attribute value, or the default value, if the
# attribute was not found.
# @defreturn string or None
def get(self, key, default=None):
return self.attrib.get(key, default)
##
# Sets an element attribute. Equivalent to <b>attrib[key] = value</b>,
# but some implementations may handle this a bit more efficiently.
#
# @param key What attribute to set.
# @param value The attribute value.
def set(self, key, value):
self.attrib[key] = value
##
# Gets a list of attribute names. The names are returned in an
# arbitrary order (just like for an ordinary Python dictionary).
# Equivalent to <b>attrib.keys()</b>.
#
# @return A list of element attribute names.
# @defreturn list of strings
def keys(self):
return self.attrib.keys()
##
# Gets element attributes, as a sequence. The attributes are
# returned in an arbitrary order. Equivalent to <b>attrib.items()</b>.
#
# @return A list of (name, value) tuples for all attributes.
# @defreturn list of (string, string) tuples
def items(self):
return self.attrib.items()
##
# Creates a tree iterator. The iterator loops over this element
# and all subelements, in document order, and returns all elements
# with a matching tag.
# <p>
# If the tree structure is modified during iteration, new or removed
# elements may or may not be included. To get a stable set, use the
# list() function on the iterator, and loop over the resulting list.
#
# @param tag What tags to look for (default is to return all elements).
# @return An iterator containing all the matching elements.
# @defreturn iterator
def iter(self, tag=None):
if tag == "*":
tag = None
if tag is None or self.tag == tag:
yield self
for e in self._children:
for e in e.iter(tag):
yield e
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'elem.iter()' or 'list(elem.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
##
# Creates a text iterator. The iterator loops over this element
# and all subelements, in document order, and returns all inner
# text.
#
# @return An iterator containing all inner text.
# @defreturn iterator
def itertext(self):
tag = self.tag
if not isinstance(tag, str) and tag is not None:
return
if self.text:
yield self.text
for e in self:
for s in e.itertext():
yield s
if e.tail:
yield e.tail
# compatibility
_Element = _ElementInterface = Element
##
# Subelement factory. This function creates an element instance, and
# appends it to an existing element.
# <p>
# The element name, attribute names, and attribute values can be
# either 8-bit ASCII strings or Unicode strings.
#
# @param parent The parent element.
# @param tag The subelement name.
# @param attrib An optional dictionary, containing element attributes.
# @param **extra Additional attributes, given as keyword arguments.
# @return An element instance.
# @defreturn Element
def SubElement(parent, tag, attrib={}, **extra):
attrib = attrib.copy()
attrib.update(extra)
element = parent.makeelement(tag, attrib)
parent.append(element)
return element
##
# Comment element factory. This factory function creates a special
# element that will be serialized as an XML comment by the standard
# serializer.
# <p>
# The comment string can be either an 8-bit ASCII string or a Unicode
# string.
#
# @param text A string containing the comment string.
# @return An element instance, representing a comment.
# @defreturn Element
def Comment(text=None):
element = Element(Comment)
element.text = text
return element
##
# PI element factory. This factory function creates a special element
# that will be serialized as an XML processing instruction by the standard
# serializer.
#
# @param target A string containing the PI target.
# @param text A string containing the PI contents, if any.
# @return An element instance, representing a PI.
# @defreturn Element
def ProcessingInstruction(target, text=None):
element = Element(ProcessingInstruction)
element.text = target
if text:
element.text = element.text + " " + text
return element
PI = ProcessingInstruction
##
# QName wrapper. This can be used to wrap a QName attribute value, in
# order to get proper namespace handling on output.
#
# @param text A string containing the QName value, in the form {uri}local,
# or, if the tag argument is given, the URI part of a QName.
# @param tag Optional tag. If given, the first argument is interpreted as
# an URI, and this argument is interpreted as a local name.
# @return An opaque object, representing the QName.
class QName:
def __init__(self, text_or_uri, tag=None):
if tag:
text_or_uri = "{%s}%s" % (text_or_uri, tag)
self.text = text_or_uri
def __str__(self):
return self.text
def __repr__(self):
return '<QName %r>' % (self.text,)
def __hash__(self):
return hash(self.text)
def __le__(self, other):
if isinstance(other, QName):
return self.text <= other.text
return self.text <= other
def __lt__(self, other):
if isinstance(other, QName):
return self.text < other.text
return self.text < other
def __ge__(self, other):
if isinstance(other, QName):
return self.text >= other.text
return self.text >= other
def __gt__(self, other):
if isinstance(other, QName):
return self.text > other.text
return self.text > other
def __eq__(self, other):
if isinstance(other, QName):
return self.text == other.text
return self.text == other
def __ne__(self, other):
if isinstance(other, QName):
return self.text != other.text
return self.text != other
# --------------------------------------------------------------------
##
# ElementTree wrapper class. This class represents an entire element
# hierarchy, and adds some extra support for serialization to and from
# standard XML.
#
# @param element Optional root element.
# @keyparam file Optional file handle or file name. If given, the
# tree is initialized with the contents of this XML file.
class ElementTree:
def __init__(self, element=None, file=None):
# assert element is None or iselement(element)
self._root = element # first node
if file:
self.parse(file)
##
# Gets the root element for this tree.
#
# @return An element instance.
# @defreturn Element
def getroot(self):
return self._root
##
# Replaces the root element for this tree. This discards the
# current contents of the tree, and replaces it with the given
# element. Use with care.
#
# @param element An element instance.
def _setroot(self, element):
# assert iselement(element)
self._root = element
##
# Loads an external XML document into this element tree.
#
# @param source A file name or file object. If a file object is
# given, it only has to implement a <b>read(n)</b> method.
# @keyparam parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return The document root element.
# @defreturn Element
# @exception ParseError If the parser fails to parse the document.
def parse(self, source, parser=None):
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
try:
if not parser:
parser = XMLParser(target=TreeBuilder())
while 1:
data = source.read(65536)
if not data:
break
parser.feed(data)
self._root = parser.close()
return self._root
finally:
if close_source:
source.close()
##
# Creates a tree iterator for the root element. The iterator loops
# over all elements in this tree, in document order.
#
# @param tag What tags to look for (default is to return all elements)
# @return An iterator.
# @defreturn iterator
def iter(self, tag=None):
# assert self._root is not None
return self._root.iter(tag)
# compatibility
def getiterator(self, tag=None):
# Change for a DeprecationWarning in 1.4
warnings.warn(
"This method will be removed in future versions. "
"Use 'tree.iter()' or 'list(tree.iter())' instead.",
PendingDeprecationWarning, stacklevel=2
)
return list(self.iter(tag))
##
# Same as getroot().find(path), starting at the root of the tree.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return The first matching element, or None if no element was found.
# @defreturn Element or None
def find(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.find(path, namespaces)
##
# Same as getroot().findtext(path), starting at the root of the tree.
#
# @param path What element to look for.
# @param default What to return if the element was not found.
# @keyparam namespaces Optional namespace prefix map.
# @return The text content of the first matching element, or the
# default value no element was found. Note that if the element
# is found, but has no text content, this method returns an
# empty string.
# @defreturn string
def findtext(self, path, default=None, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findtext(path, default, namespaces)
##
# Same as getroot().findall(path), starting at the root of the tree.
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return A list or iterator containing all matching elements,
# in document order.
# @defreturn list of Element instances
def findall(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.findall(path, namespaces)
##
# Finds all matching subelements, by tag name or path.
# Same as getroot().iterfind(path).
#
# @param path What element to look for.
# @keyparam namespaces Optional namespace prefix map.
# @return An iterator or sequence containing all matching elements,
# in document order.
# @defreturn a generated sequence of Element instances
def iterfind(self, path, namespaces=None):
# assert self._root is not None
if path[:1] == "/":
path = "." + path
warnings.warn(
"This search is broken in 1.3 and earlier, and will be "
"fixed in a future version. If you rely on the current "
"behaviour, change it to %r" % path,
FutureWarning, stacklevel=2
)
return self._root.iterfind(path, namespaces)
##
# Writes the element tree to a file, as XML.
#
# @def write(file, **options)
# @param file A file name, or a file object opened for writing.
# @param **options Options, given as keyword arguments.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# Use "unicode" to return a Unicode string.
# @keyparam xml_declaration Controls if an XML declaration should
# be added to the file. Use False for never, True for always,
# None for only if not US-ASCII or UTF-8 or Unicode. None is default.
# @keyparam default_namespace Sets the default XML namespace (for "xmlns").
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
def write(self, file_or_filename,
encoding=None,
xml_declaration=None,
default_namespace=None,
method=None):
if not method:
method = "xml"
elif method not in _serialize:
raise ValueError("unknown method %r" % method)
if not encoding:
if method == "c14n":
encoding = "utf-8"
else:
encoding = "us-ascii"
else:
encoding = encoding.lower()
with _get_writer(file_or_filename, encoding) as write:
if method == "xml" and (xml_declaration or
(xml_declaration is None and
encoding not in ("utf-8", "us-ascii", "unicode"))):
declared_encoding = encoding
if encoding == "unicode":
# Retrieve the default encoding for the xml declaration
import locale
declared_encoding = locale.getpreferredencoding()
write("<?xml version='1.0' encoding='%s'?>\n" % (
declared_encoding,))
if method == "text":
_serialize_text(write, self._root)
else:
qnames, namespaces = _namespaces(self._root, default_namespace)
serialize = _serialize[method]
serialize(write, self._root, qnames, namespaces)
def write_c14n(self, file):
# lxml.etree compatibility. use output method instead
return self.write(file, method="c14n")
# --------------------------------------------------------------------
# serialization support
@contextlib.contextmanager
def _get_writer(file_or_filename, encoding):
# returns text write method and release all resourses after using
try:
write = file_or_filename.write
except AttributeError:
# file_or_filename is a file name
if encoding == "unicode":
file = open(file_or_filename, "w")
else:
file = open(file_or_filename, "w", encoding=encoding,
errors="xmlcharrefreplace")
with file:
yield file.write
else:
# file_or_filename is a file-like object
# encoding determines if it is a text or binary writer
if encoding == "unicode":
# use a text writer as is
yield write
else:
# wrap a binary writer with TextIOWrapper
with contextlib.ExitStack() as stack:
if isinstance(file_or_filename, io.BufferedIOBase):
file = file_or_filename
elif isinstance(file_or_filename, io.RawIOBase):
file = io.BufferedWriter(file_or_filename)
# Keep the original file open when the BufferedWriter is
# destroyed
stack.callback(file.detach)
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
file = io.BufferedIOBase()
file.writable = lambda: True
file.write = write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
file.seekable = file_or_filename.seekable
file.tell = file_or_filename.tell
except AttributeError:
pass
file = io.TextIOWrapper(file,
encoding=encoding,
errors="xmlcharrefreplace",
newline="\n")
# Keep the original file open when the TextIOWrapper is
# destroyed
stack.callback(file.detach)
yield file.write
def _namespaces(elem, default_namespace=None):
# identify namespaces used in this tree
# maps qnames to *encoded* prefix:local names
qnames = {None: None}
# maps uri:s to prefixes
namespaces = {}
if default_namespace:
namespaces[default_namespace] = ""
def add_qname(qname):
# calculate serialized qname representation
try:
if qname[:1] == "{":
uri, tag = qname[1:].rsplit("}", 1)
prefix = namespaces.get(uri)
if prefix is None:
prefix = _namespace_map.get(uri)
if prefix is None:
prefix = "ns%d" % len(namespaces)
if prefix != "xml":
namespaces[uri] = prefix
if prefix:
qnames[qname] = "%s:%s" % (prefix, tag)
else:
qnames[qname] = tag # default element
else:
if default_namespace:
# FIXME: can this be handled in XML 1.0?
raise ValueError(
"cannot use non-qualified names with "
"default_namespace option"
)
qnames[qname] = qname
except TypeError:
_raise_serialization_error(qname)
# populate qname and namespaces table
for elem in elem.iter():
tag = elem.tag
if isinstance(tag, QName):
if tag.text not in qnames:
add_qname(tag.text)
elif isinstance(tag, str):
if tag not in qnames:
add_qname(tag)
elif tag is not None and tag is not Comment and tag is not PI:
_raise_serialization_error(tag)
for key, value in elem.items():
if isinstance(key, QName):
key = key.text
if key not in qnames:
add_qname(key)
if isinstance(value, QName) and value.text not in qnames:
add_qname(value.text)
text = elem.text
if isinstance(text, QName) and text.text not in qnames:
add_qname(text.text)
return qnames, namespaces
def _serialize_xml(write, elem, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % text)
elif tag is ProcessingInstruction:
write("<?%s?>" % text)
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib(v)
write(" %s=\"%s\"" % (qnames[k], v))
if text or len(elem):
write(">")
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_xml(write, e, qnames, None)
write("</" + tag + ">")
else:
write(" />")
if elem.tail:
write(_escape_cdata(elem.tail))
HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "isindex", "link", "meta", "param")
try:
HTML_EMPTY = set(HTML_EMPTY)
except NameError:
pass
def _serialize_html(write, elem, qnames, namespaces):
tag = elem.tag
text = elem.text
if tag is Comment:
write("<!--%s-->" % _escape_cdata(text))
elif tag is ProcessingInstruction:
write("<?%s?>" % _escape_cdata(text))
else:
tag = qnames[tag]
if tag is None:
if text:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
else:
write("<" + tag)
items = list(elem.items())
if items or namespaces:
if namespaces:
for v, k in sorted(namespaces.items(),
key=lambda x: x[1]): # sort on prefix
if k:
k = ":" + k
write(" xmlns%s=\"%s\"" % (
k,
_escape_attrib(v)
))
for k, v in sorted(items): # lexical order
if isinstance(k, QName):
k = k.text
if isinstance(v, QName):
v = qnames[v.text]
else:
v = _escape_attrib_html(v)
# FIXME: handle boolean attributes
write(" %s=\"%s\"" % (qnames[k], v))
write(">")
tag = tag.lower()
if text:
if tag == "script" or tag == "style":
write(text)
else:
write(_escape_cdata(text))
for e in elem:
_serialize_html(write, e, qnames, None)
if tag not in HTML_EMPTY:
write("</" + tag + ">")
if elem.tail:
write(_escape_cdata(elem.tail))
def _serialize_text(write, elem):
for part in elem.itertext():
write(part)
if elem.tail:
write(elem.tail)
_serialize = {
"xml": _serialize_xml,
"html": _serialize_html,
"text": _serialize_text,
# this optional method is imported at the end of the module
# "c14n": _serialize_c14n,
}
##
# Registers a namespace prefix. The registry is global, and any
# existing mapping for either the given prefix or the namespace URI
# will be removed.
#
# @param prefix Namespace prefix.
# @param uri Namespace uri. Tags and attributes in this namespace
# will be serialized with the given prefix, if at all possible.
# @exception ValueError If the prefix is reserved, or is otherwise
# invalid.
def register_namespace(prefix, uri):
if re.match("ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in list(_namespace_map.items()):
if k == uri or v == prefix:
del _namespace_map[k]
_namespace_map[uri] = prefix
_namespace_map = {
# "well-known" namespace prefixes
"http://www.w3.org/XML/1998/namespace": "xml",
"http://www.w3.org/1999/xhtml": "html",
"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
"http://schemas.xmlsoap.org/wsdl/": "wsdl",
# xml schema
"http://www.w3.org/2001/XMLSchema": "xs",
"http://www.w3.org/2001/XMLSchema-instance": "xsi",
# dublin core
"http://purl.org/dc/elements/1.1/": "dc",
}
# For tests and troubleshooting
register_namespace._namespace_map = _namespace_map
def _raise_serialization_error(text):
raise TypeError(
"cannot serialize %r (type %s)" % (text, type(text).__name__)
)
def _escape_cdata(text):
# escape character data
try:
# it's worth avoiding do-nothing calls for strings that are
# shorter than 500 character, or so. assume that's, by far,
# the most common case in most applications.
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if "<" in text:
text = text.replace("<", "<")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
if "\n" in text:
text = text.replace("\n", " ")
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
def _escape_attrib_html(text):
# escape attribute value
try:
if "&" in text:
text = text.replace("&", "&")
if ">" in text:
text = text.replace(">", ">")
if "\"" in text:
text = text.replace("\"", """)
return text
except (TypeError, AttributeError):
_raise_serialization_error(text)
# --------------------------------------------------------------------
##
# Generates a string representation of an XML element, including all
# subelements. If encoding is "unicode", the return type is a string;
# otherwise it is a bytes array.
#
# @param element An Element instance.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# Use "unicode" to return a Unicode string.
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @return An (optionally) encoded string containing the XML data.
# @defreturn string
def tostring(element, encoding=None, method=None):
stream = io.StringIO() if encoding == 'unicode' else io.BytesIO()
ElementTree(element).write(stream, encoding, method=method)
return stream.getvalue()
##
# Generates a string representation of an XML element, including all
# subelements.
#
# @param element An Element instance.
# @keyparam encoding Optional output encoding (default is US-ASCII).
# Use "unicode" to return a Unicode string.
# @keyparam method Optional output method ("xml", "html", "text" or
# "c14n"; default is "xml").
# @return A sequence object containing the XML data.
# @defreturn sequence
# @since 1.3
class _ListDataStream(io.BufferedIOBase):
""" An auxiliary stream accumulating into a list reference
"""
def __init__(self, lst):
self.lst = lst
def writable(self):
return True
def seekable(self):
return True
def write(self, b):
self.lst.append(b)
def tell(self):
return len(self.lst)
def tostringlist(element, encoding=None, method=None):
lst = []
stream = _ListDataStream(lst)
ElementTree(element).write(stream, encoding, method=method)
return lst
##
# Writes an element tree or element structure to sys.stdout. This
# function should be used for debugging only.
# <p>
# The exact output format is implementation dependent. In this
# version, it's written as an ordinary XML file.
#
# @param elem An element tree or an individual element.
def dump(elem):
# debugging
if not isinstance(elem, ElementTree):
elem = ElementTree(elem)
elem.write(sys.stdout, encoding="unicode")
tail = elem.getroot().tail
if not tail or tail[-1] != "\n":
sys.stdout.write("\n")
# --------------------------------------------------------------------
# parsing
##
# Parses an XML document into an element tree.
#
# @param source A filename or file object containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An ElementTree instance
def parse(source, parser=None):
tree = ElementTree()
tree.parse(source, parser)
return tree
##
# Parses an XML document into an element tree incrementally, and reports
# what's going on to the user.
#
# @param source A filename or file object containing XML data.
# @param events A list of events to report back. If omitted, only "end"
# events are reported.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A (event, elem) iterator.
def iterparse(source, events=None, parser=None):
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
if not parser:
parser = XMLParser(target=TreeBuilder())
return _IterParseIterator(source, events, parser, close_source)
class _IterParseIterator:
def __init__(self, source, events, parser, close_source=False):
self._file = source
self._close_file = close_source
self._events = []
self._index = 0
self._error = None
self.root = self._root = None
self._parser = parser
# wire up the parser for event reporting
parser = self._parser._parser
append = self._events.append
if events is None:
events = ["end"]
for event in events:
if event == "start":
try:
parser.ordered_attributes = 1
parser.specified_attributes = 1
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start_list):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
except AttributeError:
def handler(tag, attrib_in, event=event, append=append,
start=self._parser._start):
append((event, start(tag, attrib_in)))
parser.StartElementHandler = handler
elif event == "end":
def handler(tag, event=event, append=append,
end=self._parser._end):
append((event, end(tag)))
parser.EndElementHandler = handler
elif event == "start-ns":
def handler(prefix, uri, event=event, append=append):
append((event, (prefix or "", uri or "")))
parser.StartNamespaceDeclHandler = handler
elif event == "end-ns":
def handler(prefix, event=event, append=append):
append((event, None))
parser.EndNamespaceDeclHandler = handler
else:
raise ValueError("unknown event %r" % event)
def __next__(self):
while 1:
try:
item = self._events[self._index]
self._index += 1
return item
except IndexError:
pass
if self._error:
e = self._error
self._error = None
raise e
if self._parser is None:
self.root = self._root
if self._close_file:
self._file.close()
raise StopIteration
# load event buffer
del self._events[:]
self._index = 0
data = self._file.read(16384)
if data:
try:
self._parser.feed(data)
except SyntaxError as exc:
self._error = exc
else:
self._root = self._parser.close()
self._parser = None
def __iter__(self):
return self
##
# Parses an XML document from a string constant. This function can
# be used to embed "XML literals" in Python code.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
def XML(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
return parser.close()
##
# Parses an XML document from a string constant, and also returns
# a dictionary which maps from element id:s to elements.
#
# @param source A string containing XML data.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return A tuple containing an Element instance and a dictionary.
# @defreturn (Element, dictionary)
def XMLID(text, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
parser.feed(text)
tree = parser.close()
ids = {}
for elem in tree.iter():
id = elem.get("id")
if id:
ids[id] = elem
return tree, ids
##
# Parses an XML document from a string constant. Same as {@link #XML}.
#
# @def fromstring(text)
# @param source A string containing XML data.
# @return An Element instance.
# @defreturn Element
fromstring = XML
##
# Parses an XML document from a sequence of string fragments.
#
# @param sequence A list or other sequence containing XML data fragments.
# @param parser An optional parser instance. If not given, the
# standard {@link XMLParser} parser is used.
# @return An Element instance.
# @defreturn Element
# @since 1.3
def fromstringlist(sequence, parser=None):
if not parser:
parser = XMLParser(target=TreeBuilder())
for text in sequence:
parser.feed(text)
return parser.close()
# --------------------------------------------------------------------
##
# Generic element structure builder. This builder converts a sequence
# of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
# #TreeBuilder.end} method calls to a well-formed element structure.
# <p>
# You can use this class to build an element structure using a custom XML
# parser, or a parser for some other XML-like format.
#
# @param element_factory Optional element factory. This factory
# is called to create new Element instances, as necessary.
class TreeBuilder:
def __init__(self, element_factory=None):
self._data = [] # data collector
self._elem = [] # element stack
self._last = None # last element
self._tail = None # true if we're after an end tag
if element_factory is None:
element_factory = Element
self._factory = element_factory
##
# Flushes the builder buffers, and returns the toplevel document
# element.
#
# @return An Element instance.
# @defreturn Element
def close(self):
assert len(self._elem) == 0, "missing end tags"
assert self._last is not None, "missing toplevel element"
return self._last
def _flush(self):
if self._data:
if self._last is not None:
text = "".join(self._data)
if self._tail:
assert self._last.tail is None, "internal error (tail)"
self._last.tail = text
else:
assert self._last.text is None, "internal error (text)"
self._last.text = text
self._data = []
##
# Adds text to the current element.
#
# @param data A string. This should be either an 8-bit string
# containing ASCII text, or a Unicode string.
def data(self, data):
self._data.append(data)
##
# Opens a new element.
#
# @param tag The element name.
# @param attrib A dictionary containing element attributes.
# @return The opened element.
# @defreturn Element
def start(self, tag, attrs):
self._flush()
self._last = elem = self._factory(tag, attrs)
if self._elem:
self._elem[-1].append(elem)
self._elem.append(elem)
self._tail = 0
return elem
##
# Closes the current element.
#
# @param tag The element name.
# @return The closed element.
# @defreturn Element
def end(self, tag):
self._flush()
self._last = self._elem.pop()
assert self._last.tag == tag,\
"end tag mismatch (expected %s, got %s)" % (
self._last.tag, tag)
self._tail = 1
return self._last
##
# Element structure builder for XML source data, based on the
# <b>expat</b> parser.
#
# @keyparam target Target object. If omitted, the builder uses an
# instance of the standard {@link #TreeBuilder} class.
# @keyparam html Predefine HTML entities. This flag is not supported
# by the current implementation.
# @keyparam encoding Optional encoding. If given, the value overrides
# the encoding specified in the XML file.
# @see #ElementTree
# @see #TreeBuilder
class XMLParser:
def __init__(self, html=0, target=None, encoding=None):
try:
from xml.parsers import expat
except ImportError:
try:
import pyexpat as expat
except ImportError:
raise ImportError(
"No module named expat; use SimpleXMLTreeBuilder instead"
)
parser = expat.ParserCreate(encoding, "}")
if target is None:
target = TreeBuilder()
# underscored names are provided for compatibility only
self.parser = self._parser = parser
self.target = self._target = target
self._error = expat.error
self._names = {} # name memo cache
# main callbacks
parser.DefaultHandlerExpand = self._default
if hasattr(target, 'start'):
parser.StartElementHandler = self._start
if hasattr(target, 'end'):
parser.EndElementHandler = self._end
if hasattr(target, 'data'):
parser.CharacterDataHandler = target.data
# miscellaneous callbacks
if hasattr(target, 'comment'):
parser.CommentHandler = target.comment
if hasattr(target, 'pi'):
parser.ProcessingInstructionHandler = target.pi
# let expat do the buffering, if supported
try:
parser.buffer_text = 1
except AttributeError:
pass
# use new-style attribute handling, if supported
try:
parser.ordered_attributes = 1
parser.specified_attributes = 1
if hasattr(target, 'start'):
parser.StartElementHandler = self._start_list
except AttributeError:
pass
self._doctype = None
self.entity = {}
try:
self.version = "Expat %d.%d.%d" % expat.version_info
except AttributeError:
pass # unknown
def _raiseerror(self, value):
err = ParseError(value)
err.code = value.code
err.position = value.lineno, value.offset
raise err
def _fixname(self, key):
# expand qname, and convert name string to ascii, if possible
try:
name = self._names[key]
except KeyError:
name = key
if "}" in name:
name = "{" + name
self._names[key] = name
return name
def _start(self, tag, attrib_in):
fixname = self._fixname
tag = fixname(tag)
attrib = {}
for key, value in attrib_in.items():
attrib[fixname(key)] = value
return self.target.start(tag, attrib)
def _start_list(self, tag, attrib_in):
fixname = self._fixname
tag = fixname(tag)
attrib = {}
if attrib_in:
for i in range(0, len(attrib_in), 2):
attrib[fixname(attrib_in[i])] = attrib_in[i+1]
return self.target.start(tag, attrib)
def _end(self, tag):
return self.target.end(self._fixname(tag))
def _default(self, text):
prefix = text[:1]
if prefix == "&":
# deal with undefined entities
try:
data_handler = self.target.data
except AttributeError:
return
try:
data_handler(self.entity[text[1:-1]])
except KeyError:
from xml.parsers import expat
err = expat.error(
"undefined entity %s: line %d, column %d" %
(text, self.parser.ErrorLineNumber,
self.parser.ErrorColumnNumber)
)
err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
err.lineno = self.parser.ErrorLineNumber
err.offset = self.parser.ErrorColumnNumber
raise err
elif prefix == "<" and text[:9] == "<!DOCTYPE":
self._doctype = [] # inside a doctype declaration
elif self._doctype is not None:
# parse doctype contents
if prefix == ">":
self._doctype = None
return
text = text.strip()
if not text:
return
self._doctype.append(text)
n = len(self._doctype)
if n > 2:
type = self._doctype[1]
if type == "PUBLIC" and n == 4:
name, type, pubid, system = self._doctype
if pubid:
pubid = pubid[1:-1]
elif type == "SYSTEM" and n == 3:
name, type, system = self._doctype
pubid = None
else:
return
if hasattr(self.target, "doctype"):
self.target.doctype(name, pubid, system[1:-1])
elif self.doctype != self._XMLParser__doctype:
# warn about deprecated call
self._XMLParser__doctype(name, pubid, system[1:-1])
self.doctype(name, pubid, system[1:-1])
self._doctype = None
##
# (Deprecated) Handles a doctype declaration.
#
# @param name Doctype name.
# @param pubid Public identifier.
# @param system System identifier.
def doctype(self, name, pubid, system):
"""This method of XMLParser is deprecated."""
warnings.warn(
"This method of XMLParser is deprecated. Define doctype() "
"method on the TreeBuilder target.",
DeprecationWarning,
)
# sentinel, if doctype is redefined in a subclass
__doctype = doctype
##
# Feeds data to the parser.
#
# @param data Encoded data.
def feed(self, data):
try:
self.parser.Parse(data, 0)
except self._error as v:
self._raiseerror(v)
##
# Finishes feeding data to the parser.
#
# @return An element structure.
# @defreturn Element
def close(self):
try:
self.parser.Parse("", 1) # end of data
except self._error as v:
self._raiseerror(v)
try:
close_handler = self.target.close
except AttributeError:
pass
else:
return close_handler()
finally:
# get rid of circular references
del self.parser, self._parser
del self.target, self._target
# Import the C accelerators
try:
# Element, SubElement, ParseError, TreeBuilder, XMLParser
from _elementtree import *
except ImportError:
pass
else:
# Overwrite 'ElementTree.parse' and 'iterparse' to use the C XMLParser
class ElementTree(ElementTree):
def parse(self, source, parser=None):
close_source = False
if not hasattr(source, 'read'):
source = open(source, 'rb')
close_source = True
try:
if parser is not None:
while True:
data = source.read(65536)
if not data:
break
parser.feed(data)
self._root = parser.close()
else:
parser = XMLParser()
self._root = parser._parse(source)
return self._root
finally:
if close_source:
source.close()
class iterparse:
"""Parses an XML section into an element tree incrementally.
Reports what’s going on to the user. 'source' is a filename or file
object containing XML data. 'events' is a list of events to report back.
The supported events are the strings "start", "end", "start-ns" and
"end-ns" (the "ns" events are used to get detailed namespace
information). If 'events' is omitted, only "end" events are reported.
'parser' is an optional parser instance. If not given, the standard
XMLParser parser is used. Returns an iterator providing
(event, elem) pairs.
"""
root = None
def __init__(self, file, events=None, parser=None):
self._close_file = False
if not hasattr(file, 'read'):
file = open(file, 'rb')
self._close_file = True
self._file = file
self._events = []
self._index = 0
self._error = None
self.root = self._root = None
if parser is None:
parser = XMLParser(target=TreeBuilder())
self._parser = parser
self._parser._setevents(self._events, events)
def __next__(self):
while True:
try:
item = self._events[self._index]
self._index += 1
return item
except IndexError:
pass
if self._error:
e = self._error
self._error = None
raise e
if self._parser is None:
self.root = self._root
if self._close_file:
self._file.close()
raise StopIteration
# load event buffer
del self._events[:]
self._index = 0
data = self._file.read(16384)
if data:
try:
self._parser.feed(data)
except SyntaxError as exc:
self._error = exc
else:
self._root = self._parser.close()
self._parser = None
def __iter__(self):
return self
# compatibility
XMLTreeBuilder = XMLParser
# workaround circular import.
try:
from ElementC14N import _serialize_c14n
_serialize["c14n"] = _serialize_c14n
except ImportError:
pass
|
gpl-3.0
|
cloudera/hadoop-common
|
src/contrib/hod/hodlib/Common/desc.py
|
182
|
7484
|
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""manage component descriptors"""
# -*- python -*-
import random
from sets import Set
from pprint import pformat
from hodlib.Common.util import local_fqdn
from hodlib.Common.tcp import tcpSocket, tcpError
class Schema:
"""the primary class for describing
schema's """
STRING, LIST, MAP = range(3)
def __init__(self, name, type = STRING, delim=','):
self.name = name
self.type = type
self.delim = delim
def getName(self):
return self.name
def getType(self):
return self.type
def getDelim(self):
return self.delim
class _Merger:
"""A class to merge lists and add key/value
pairs to a dictionary"""
def mergeList(x, y, uniq=True):
l = []
l.extend(x)
l.extend(y)
if not uniq:
return l
s = Set(l)
l = list(s)
return l
mergeList = staticmethod(mergeList)
def mergeMap(to, add):
for k in add:
to.setdefault(k, add[k])
return to
mergeMap = staticmethod(mergeMap)
class NodePoolDesc:
"""a schema for describing
Nodepools"""
def __init__(self, dict):
self.dict = dict.copy()
self.dict.setdefault('attrs', {})
self._checkRequired()
if 'options' in dict: self.dict['attrs'] = dict['options']
def _checkRequired(self):
if not 'id' in self.dict:
raise ValueError, "nodepool needs 'id'"
if self.getPkgDir() == None:
raise ValueError, "nodepool %s needs 'pkgs'" % (self.getName())
def getName(self):
return self.dict['id']
def getPkgDir(self):
return self.dict['batch-home']
def getAttrs(self):
return self.dict['attrs']
def getSchema():
schema = {}
s = Schema('id')
schema[s.getName()] = s
s = Schema('batch-home', Schema.LIST, ':')
schema[s.getName()] = s
s = Schema('attrs', Schema.MAP)
schema[s.getName()] = s
return schema
getSchema = staticmethod(getSchema)
class ServiceDesc:
"""A schema for describing services"""
def __init__(self, dict):
self.dict = dict.copy()
self.dict.setdefault('external', False)
self.dict.setdefault('attrs', {})
self.dict.setdefault('envs', {})
self.dict.setdefault('host',None)
self.dict.setdefault('port',None)
self.dict.setdefault('tar', None)
self.dict.setdefault('pkgs', '')
self.dict.setdefault('final-attrs', {})
self._checkRequired()
if self.dict.has_key('hadoop-tar-ball'):
self.dict['tar'] = self.dict['hadoop-tar-ball']
def _checkRequired(self):
if not 'id' in self.dict:
raise ValueError, "service description needs 'id'"
# if len(self.getPkgDirs()) <= 0:
# raise ValueError, "service description %s needs 'pkgs'" % (self.getName())
def getName(self):
return self.dict['id']
def isExternal(self):
"""True if the service is outside hod.
e.g. connect to existing HDFS"""
return self.dict['external']
def getPkgDirs(self):
return self.dict['pkgs']
def getTar(self):
return self.dict['tar']
def getAttrs(self):
return self.dict['attrs']
def getfinalAttrs(self):
return self.dict['final-attrs']
def getEnvs(self):
return self.dict['envs']
def getSchema():
schema = {}
s = Schema('id')
schema[s.getName()] = s
s = Schema('external')
schema[s.getName()] = s
s = Schema('pkgs', Schema.LIST, ':')
schema[s.getName()] = s
s = Schema('tar', Schema.LIST, ":")
schema[s.getName()] = s
s = Schema('attrs', Schema.MAP)
schema[s.getName()] = s
s = Schema('final-attrs', Schema.MAP)
schema[s.getName()] = s
s = Schema('envs', Schema.MAP)
schema[s.getName()] = s
return schema
getSchema = staticmethod(getSchema)
class CommandDesc:
def __init__(self, dict):
"""a class for how a command is described"""
self.dict = dict
def __repr__(self):
return pformat(self.dict)
def _getName(self):
"""return the name of the command to be run"""
return self.dict['name']
def _getProgram(self):
"""return where the program is """
return self.dict['program']
def _getArgv(self):
"""return the arguments for the command to be run"""
return self.dict['argv']
def _getEnvs(self):
"""return the environment in which the command is to be run"""
return self.dict['envs']
def _getPkgDirs(self):
"""return the packages for this command"""
return self.dict['pkgdirs']
def _getWorkDirs(self):
"""return the working directories for this command"""
return self.dict['workdirs']
def _getAttrs(self):
"""return the list of attributes for this command"""
return self.dict['attrs']
def _getfinalAttrs(self):
"""return the final xml params list for this command"""
return self.dict['final-attrs']
def _getForeground(self):
"""return if the command is to be run in foreground or not"""
return self.dict['fg']
def _getStdin(self):
return self.dict['stdin']
def toString(cmdDesc):
"""return a string representation of this command"""
row = []
row.append('name=%s' % (cmdDesc._getName()))
row.append('program=%s' % (cmdDesc._getProgram()))
row.append('pkgdirs=%s' % CommandDesc._csv(cmdDesc._getPkgDirs(), ':'))
if 'argv' in cmdDesc.dict:
row.append('argv=%s' % CommandDesc._csv(cmdDesc._getArgv()))
if 'envs' in cmdDesc.dict:
envs = cmdDesc._getEnvs()
list = []
for k in envs:
v = envs[k]
list.append('%s=%s' % (k, v))
row.append('envs=%s' % CommandDesc._csv(list))
if 'workdirs' in cmdDesc.dict:
row.append('workdirs=%s' % CommandDesc._csv(cmdDesc._getWorkDirs(), ':'))
if 'attrs' in cmdDesc.dict:
attrs = cmdDesc._getAttrs()
list = []
for k in attrs:
v = attrs[k]
list.append('%s=%s' % (k, v))
row.append('attrs=%s' % CommandDesc._csv(list))
if 'final-attrs' in cmdDesc.dict:
fattrs = cmdDesc._getAttrs()
list = []
for k in fattrs:
v = fattrs[k]
list.append('%s=%s' % (k, v))
row.append('final-attrs=%s' % CommandDesc._cvs(list))
if 'fg' in cmdDesc.dict:
row.append('fg=%s' % (cmdDesc._getForeground()))
if 'stdin' in cmdDesc.dict:
row.append('stdin=%s' % (cmdDesc._getStdin()))
return CommandDesc._csv(row)
toString = staticmethod(toString)
def _csv(row, delim=','):
"""return a string in csv format"""
import cStringIO
import csv
queue = cStringIO.StringIO()
writer = csv.writer(queue, delimiter=delim, escapechar='\\', quoting=csv.QUOTE_NONE,
doublequote=False, lineterminator='\n')
writer.writerow(row)
return queue.getvalue().rstrip('\n')
_csv = staticmethod(_csv)
|
apache-2.0
|
iulian787/spack
|
var/spack/repos/builtin/packages/r-igraph/package.py
|
5
|
1357
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RIgraph(RPackage):
"""Routines for simple graphs and network analysis. It can handle large
graphs very well and provides functions for generating random and regular
graphs, graph visualization, centrality methods and much more."""
homepage = "http://igraph.org/"
url = "https://cloud.r-project.org/src/contrib/igraph_1.0.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/igraph"
version('1.2.4.1', sha256='891acc763b5a4a4a245358a95dee69280f4013c342f14dd6a438e7bb2bf2e480')
version('1.2.4', sha256='1048eb26ab6b592815bc269c1d91e974c86c9ab827ccb80ae0a40042019592cb')
version('1.1.2', sha256='89b16b41bc77949ea208419e52a18b78b5d418c7fedc52cd47d06a51a6e746ec')
version('1.0.1', sha256='dc64ed09b8b5f8d66ed4936cde3491974d6bc5178dd259b6eab7ef3936aa5602')
depends_on('r-matrix', type=('build', 'run'))
depends_on('r-magrittr', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-irlba', when='@:1.1.9', type=('build', 'run'))
depends_on('gmp')
depends_on('libxml2')
depends_on('glpk', when='@1.2.0:')
|
lgpl-2.1
|
UstadMobile/eXePUB
|
exe/export/pages.py
|
2
|
2677
|
# ===========================================================================
# eXe
# Copyright 2004-2005, University of Auckland
# Copyright 2004-2008 eXe Project, http://eXeLearning.org/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
Export Pages functions
"""
import logging
from urllib import quote
from exe.webui import common
log = logging.getLogger(__name__)
# ===========================================================================
class Page(object):
"""
This is an abstraction for a page containing a node
e.g. in a SCORM package or Website
"""
def __init__(self, name, depth, node):
"""
Initialize
"""
self.name = name
self.depth = depth
self.node = node
def renderLicense(self):
"""
Returns an XHTML string rendering the license.
"""
return common.renderLicense(self.node.package.license)
def renderFooter(self):
"""
Returns an XHTML string rendering the footer.
"""
return common.renderFooter(self.node.package.footer)
# ===========================================================================
def uniquifyNames(pages):
"""
Make sure all the page names are unique
"""
pageNames = {}
# First identify the duplicate names
for page in pages:
if page.name in pageNames:
pageNames[page.name] = 1
else:
pageNames[page.name] = 0
# Then uniquify them
for page in pages:
uniquifier = pageNames[page.name]
if uniquifier:
pageNames[page.name] = uniquifier + 1
page.name += unicode(uniquifier)
# for export, temporarily set this unique name on the node itself,
# such that any links to it can use the proper target; also
# including the quote() & ".html", as per WebsitePage's:
page.node.tmp_export_filename = quote(page.name) + ".html"
|
gpl-2.0
|
Jayflux/servo
|
tests/wpt/web-platform-tests/tools/third_party/pytest/testing/code/test_code.py
|
14
|
4951
|
# coding: utf-8
from __future__ import absolute_import, division, print_function
import sys
import _pytest._code
import py
import pytest
from test_excinfo import TWMock
def test_ne():
code1 = _pytest._code.Code(compile('foo = "bar"', '', 'exec'))
assert code1 == code1
code2 = _pytest._code.Code(compile('foo = "baz"', '', 'exec'))
assert code2 != code1
def test_code_gives_back_name_for_not_existing_file():
name = 'abc-123'
co_code = compile("pass\n", name, 'exec')
assert co_code.co_filename == name
code = _pytest._code.Code(co_code)
assert str(code.path) == name
assert code.fullsource is None
def test_code_with_class():
class A(object):
pass
pytest.raises(TypeError, "_pytest._code.Code(A)")
if True:
def x():
pass
def test_code_fullsource():
code = _pytest._code.Code(x)
full = code.fullsource
assert 'test_code_fullsource()' in str(full)
def test_code_source():
code = _pytest._code.Code(x)
src = code.source()
expected = """def x():
pass"""
assert str(src) == expected
def test_frame_getsourcelineno_myself():
def func():
return sys._getframe(0)
f = func()
f = _pytest._code.Frame(f)
source, lineno = f.code.fullsource, f.lineno
assert source[lineno].startswith(" return sys._getframe(0)")
def test_getstatement_empty_fullsource():
def func():
return sys._getframe(0)
f = func()
f = _pytest._code.Frame(f)
prop = f.code.__class__.fullsource
try:
f.code.__class__.fullsource = None
assert f.statement == _pytest._code.Source("")
finally:
f.code.__class__.fullsource = prop
def test_code_from_func():
co = _pytest._code.Code(test_frame_getsourcelineno_myself)
assert co.firstlineno
assert co.path
def test_unicode_handling():
value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
def f():
raise Exception(value)
excinfo = pytest.raises(Exception, f)
str(excinfo)
if sys.version_info[0] < 3:
unicode(excinfo)
@pytest.mark.skipif(sys.version_info[0] >= 3, reason='python 2 only issue')
def test_unicode_handling_syntax_error():
value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
def f():
raise SyntaxError('invalid syntax', (None, 1, 3, value))
excinfo = pytest.raises(Exception, f)
str(excinfo)
if sys.version_info[0] < 3:
unicode(excinfo)
def test_code_getargs():
def f1(x):
pass
c1 = _pytest._code.Code(f1)
assert c1.getargs(var=True) == ('x',)
def f2(x, *y):
pass
c2 = _pytest._code.Code(f2)
assert c2.getargs(var=True) == ('x', 'y')
def f3(x, **z):
pass
c3 = _pytest._code.Code(f3)
assert c3.getargs(var=True) == ('x', 'z')
def f4(x, *y, **z):
pass
c4 = _pytest._code.Code(f4)
assert c4.getargs(var=True) == ('x', 'y', 'z')
def test_frame_getargs():
def f1(x):
return sys._getframe(0)
fr1 = _pytest._code.Frame(f1('a'))
assert fr1.getargs(var=True) == [('x', 'a')]
def f2(x, *y):
return sys._getframe(0)
fr2 = _pytest._code.Frame(f2('a', 'b', 'c'))
assert fr2.getargs(var=True) == [('x', 'a'), ('y', ('b', 'c'))]
def f3(x, **z):
return sys._getframe(0)
fr3 = _pytest._code.Frame(f3('a', b='c'))
assert fr3.getargs(var=True) == [('x', 'a'), ('z', {'b': 'c'})]
def f4(x, *y, **z):
return sys._getframe(0)
fr4 = _pytest._code.Frame(f4('a', 'b', c='d'))
assert fr4.getargs(var=True) == [('x', 'a'), ('y', ('b',)),
('z', {'c': 'd'})]
class TestExceptionInfo(object):
def test_bad_getsource(self):
try:
if False:
pass
else:
assert False
except AssertionError:
exci = _pytest._code.ExceptionInfo()
assert exci.getrepr()
class TestTracebackEntry(object):
def test_getsource(self):
try:
if False:
pass
else:
assert False
except AssertionError:
exci = _pytest._code.ExceptionInfo()
entry = exci.traceback[0]
source = entry.getsource()
assert len(source) == 6
assert 'assert False' in source[5]
class TestReprFuncArgs(object):
def test_not_raise_exception_with_mixed_encoding(self):
from _pytest._code.code import ReprFuncArgs
tw = TWMock()
args = [
('unicode_string', u"São Paulo"),
('utf8_string', 'S\xc3\xa3o Paulo'),
]
r = ReprFuncArgs(args)
r.toterminal(tw)
if sys.version_info[0] >= 3:
assert tw.lines[0] == 'unicode_string = São Paulo, utf8_string = São Paulo'
else:
assert tw.lines[0] == 'unicode_string = São Paulo, utf8_string = São Paulo'
|
mpl-2.0
|
hjarmstrong/Odme-plusplus
|
3rd/build/tools/build/v2/test/dependency_test.py
|
44
|
6986
|
#!/usr/bin/python
# Copyright 2003 Dave Abrahams
# Copyright 2002, 2003, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
def test_basic():
t = BoostBuild.Tester(["-d3", "-d+12"], pass_d0=False, use_test_config=False)
t.write("a.cpp", """
#include <a.h>
# include "a.h"
#include <x.h>
int main() {}
""")
t.write("a.h", "\n")
t.write("a_c.c", """\
#include <a.h>
# include "a.h"
#include <x.h>
""")
t.write("b.cpp", """\
#include "a.h"
int main() {}
""")
t.write("b.h", "\n")
t.write("c.cpp", """\
#include "x.h"
int main() {}
""")
t.write("e.cpp", """\
#include "x.h"
int main() {}
""")
t.write("x.foo", "")
t.write("y.foo", "")
t.write("src1/a.h", '#include "b.h"\n')
t.write("src1/b.h", '#include "c.h"\n')
t.write("src1/c.h", "\n")
t.write("src1/z.h", """\
extern int dummy_variable_suppressing_empty_file_warning_on_hp_cxx_compiler;
""")
t.write("src2/b.h", "\n")
t.write("jamroot.jam", """\
import foo ;
import types/cpp ;
import types/exe ;
project test : requirements <include>src1 ;
exe a : x.foo a.cpp a_c.c ;
exe b : b.cpp ;
# Because of <define>FOO, c.cpp will be compiled to a different directory than
# everything for main target "a". Therefore, without <implicit-dependency>, C
# preprocessor processing that module will not find "x.h", which is part of
# "a"'s dependency graph.
#
# --------------------------
# More detailed explanation:
# --------------------------
# c.cpp includes x.h which does not exist on the current include path so Boost
# Jam will try to match it to existing Jam targets to cover cases as this one
# where the file is generated by the same build.
#
# However, as x.h is not part of "c" metatarget's dependency graph, Boost
# Build will not actualize its target by default, i.e. create its Jam target.
#
# To get the Jam target created in time, we use the <implicit-dependency>
# feature. This tells Boost Build that it needs to actualize the dependency
# graph for metatarget "a", even though that metatarget has not been directly
# mentioned and is not a dependency for any of the metatargets mentioned in the
# current build request.
#
# Note that Boost Build does not automatically add a dependency between the
# Jam targets in question so, if Boost Jam does not add a dependency on a target
# from that other dependency graph (x.h in our case), i.e. if c.cpp does not
# actually include x.h, us actualizing it will have no effect in the end as
# Boost Jam will not have a reason to actually build those targets in spite of
# knowing about them.
exe c : c.cpp : <define>FOO <implicit-dependency>a ;
""")
t.write("foo.jam", """\
import generators ;
import modules ;
import os ;
import print ;
import type ;
import types/cpp ;
type.register FOO : foo ;
generators.register-standard foo.foo : FOO : CPP H ;
nl = "
" ;
rule foo ( targets * : sources * : properties * )
{
# On NT, you need an exported symbol in order to have an import library
# generated. We will not really use the symbol defined here, just force the
# import library creation.
if ( [ os.name ] = NT || [ modules.peek : OS ] in CYGWIN ) &&
<main-target-type>LIB in $(properties)
{
.decl = "void __declspec(dllexport) foo() {}" ;
}
print.output $(<[1]) ;
print.text $(.decl:E="//")$(nl) ;
print.output $(<[2]) ;
print.text "#include <z.h>"$(nl) ;
}
""")
t.write("foo.py",
r"""import bjam
import b2.build.type as type
import b2.build.generators as generators
from b2.manager import get_manager
type.register("FOO", ["foo"])
generators.register_standard("foo.foo", ["FOO"], ["CPP", "H"])
def prepare_foo(targets, sources, properties):
if properties.get('os') in ['windows', 'cygwin']:
bjam.call('set-target-variable', targets, "DECL",
"void __declspec(dllexport) foo() {}")
get_manager().engine().register_action("foo.foo",
"echo -e $(DECL:E=//)\\n > $(<[1])\n"
"echo -e "#include <z.h>\\n" > $(<[2])\n", function=prepare_foo)
""")
# Check that main target 'c' was able to find 'x.h' from 'a's dependency
# graph.
t.run_build_system()
t.expect_addition("bin/$toolset/debug/c.exe")
# Check handling of first level includes.
# Both 'a' and 'b' include "a.h" and should be updated.
t.touch("a.h")
t.run_build_system()
t.expect_touch("bin/$toolset/debug/a.exe")
t.expect_touch("bin/$toolset/debug/a.obj")
t.expect_touch("bin/$toolset/debug/a_c.obj")
t.expect_touch("bin/$toolset/debug/b.exe")
t.expect_touch("bin/$toolset/debug/b.obj")
t.expect_nothing_more()
# Only source files using include <a.h> should be compiled.
t.touch("src1/a.h")
t.run_build_system()
t.expect_touch("bin/$toolset/debug/a.exe")
t.expect_touch("bin/$toolset/debug/a.obj")
t.expect_touch("bin/$toolset/debug/a_c.obj")
t.expect_nothing_more()
# "src/a.h" includes "b.h" (in the same dir).
t.touch("src1/b.h")
t.run_build_system()
t.expect_touch("bin/$toolset/debug/a.exe")
t.expect_touch("bin/$toolset/debug/a.obj")
t.expect_touch("bin/$toolset/debug/a_c.obj")
t.expect_nothing_more()
# Included by "src/b.h". We had a bug: file included using double quotes
# (e.g. "b.h") was not scanned at all in this case.
t.touch("src1/c.h")
t.run_build_system()
t.expect_touch("bin/$toolset/debug/a.exe")
t.touch("b.h")
t.run_build_system()
t.expect_nothing_more()
# Test dependency on a generated header.
#
# TODO: we have also to check that generated header is found correctly if
# it is different for different subvariants. Lacking any toolset support,
# this check will be implemented later.
t.touch("x.foo")
t.run_build_system()
t.expect_touch("bin/$toolset/debug/a.obj")
t.expect_touch("bin/$toolset/debug/a_c.obj")
# Check that generated headers are scanned for dependencies as well.
t.touch("src1/z.h")
t.run_build_system()
t.expect_touch("bin/$toolset/debug/a.obj")
t.expect_touch("bin/$toolset/debug/a_c.obj")
t.cleanup()
def test_scanned_includes_with_absolute_paths():
"""
Regression test: on Windows, <includes> with absolute paths were not
considered when scanning dependencies.
"""
t = BoostBuild.Tester(["-d3", "-d+12"], pass_d0=False)
t.write("jamroot.jam", """\
path-constant TOP : . ;
exe app : main.cpp : <include>$(TOP)/include ;
""");
t.write("main.cpp", """\
#include <dir/header.h>
int main() {}
""")
t.write("include/dir/header.h", "\n")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/main.obj")
t.touch("include/dir/header.h")
t.run_build_system()
t.expect_touch("bin/$toolset/debug/main.obj")
t.cleanup()
test_basic()
test_scanned_includes_with_absolute_paths()
|
mit
|
ryfeus/lambda-packs
|
Keras_tensorflow_nightly/source2.7/google/protobuf/json_format.py
|
19
|
29203
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains routines for printing protocol messages in JSON format.
Simple usage example:
# Create a proto object and serialize it to a json format string.
message = my_proto_pb2.MyMessage(foo='bar')
json_string = json_format.MessageToJson(message)
# Parse a json format string to proto object.
message = json_format.Parse(json_string, my_proto_pb2.MyMessage())
"""
__author__ = '[email protected] (Jie Luo)'
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict #PY26
import base64
import json
import math
import re
import six
import sys
from operator import methodcaller
from google.protobuf import descriptor
from google.protobuf import symbol_database
_TIMESTAMPFOMAT = '%Y-%m-%dT%H:%M:%S'
_INT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT32,
descriptor.FieldDescriptor.CPPTYPE_UINT32,
descriptor.FieldDescriptor.CPPTYPE_INT64,
descriptor.FieldDescriptor.CPPTYPE_UINT64])
_INT64_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT64,
descriptor.FieldDescriptor.CPPTYPE_UINT64])
_FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT,
descriptor.FieldDescriptor.CPPTYPE_DOUBLE])
_INFINITY = 'Infinity'
_NEG_INFINITY = '-Infinity'
_NAN = 'NaN'
_UNPAIRED_SURROGATE_PATTERN = re.compile(six.u(
r'[\ud800-\udbff](?![\udc00-\udfff])|(?<![\ud800-\udbff])[\udc00-\udfff]'
))
_VALID_EXTENSION_NAME = re.compile(r'\[[a-zA-Z0-9\._]*\]$')
class Error(Exception):
"""Top-level module error for json_format."""
class SerializeToJsonError(Error):
"""Thrown if serialization to JSON fails."""
class ParseError(Error):
"""Thrown in case of parsing error."""
def MessageToJson(message,
including_default_value_fields=False,
preserving_proto_field_name=False,
indent=2,
sort_keys=False):
"""Converts protobuf message to JSON format.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
preserving_proto_field_name: If True, use the original proto field
names as defined in the .proto file. If False, convert the field
names to lowerCamelCase.
indent: The JSON object will be pretty-printed with this indent level.
An indent level of 0 or negative will only insert newlines.
sort_keys: If True, then the output will be sorted by field names.
Returns:
A string containing the JSON formatted protocol buffer message.
"""
printer = _Printer(including_default_value_fields,
preserving_proto_field_name)
return printer.ToJsonString(message, indent, sort_keys)
def MessageToDict(message,
including_default_value_fields=False,
preserving_proto_field_name=False):
"""Converts protobuf message to a dictionary.
When the dictionary is encoded to JSON, it conforms to proto3 JSON spec.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
preserving_proto_field_name: If True, use the original proto field
names as defined in the .proto file. If False, convert the field
names to lowerCamelCase.
Returns:
A dict representation of the protocol buffer message.
"""
printer = _Printer(including_default_value_fields,
preserving_proto_field_name)
# pylint: disable=protected-access
return printer._MessageToJsonObject(message)
def _IsMapEntry(field):
return (field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.message_type.has_options and
field.message_type.GetOptions().map_entry)
class _Printer(object):
"""JSON format printer for protocol message."""
def __init__(self,
including_default_value_fields=False,
preserving_proto_field_name=False):
self.including_default_value_fields = including_default_value_fields
self.preserving_proto_field_name = preserving_proto_field_name
def ToJsonString(self, message, indent, sort_keys):
js = self._MessageToJsonObject(message)
return json.dumps(js, indent=indent, sort_keys=sort_keys)
def _MessageToJsonObject(self, message):
"""Converts message to an object according to Proto3 JSON Specification."""
message_descriptor = message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
return self._WrapperMessageToJsonObject(message)
if full_name in _WKTJSONMETHODS:
return methodcaller(_WKTJSONMETHODS[full_name][0], message)(self)
js = {}
return self._RegularMessageToJsonObject(message, js)
def _RegularMessageToJsonObject(self, message, js):
"""Converts normal message according to Proto3 JSON Specification."""
fields = message.ListFields()
try:
for field, value in fields:
if self.preserving_proto_field_name:
name = field.name
else:
name = field.json_name
if _IsMapEntry(field):
# Convert a map field.
v_field = field.message_type.fields_by_name['value']
js_map = {}
for key in value:
if isinstance(key, bool):
if key:
recorded_key = 'true'
else:
recorded_key = 'false'
else:
recorded_key = key
js_map[recorded_key] = self._FieldToJsonObject(
v_field, value[key])
js[name] = js_map
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
# Convert a repeated field.
js[name] = [self._FieldToJsonObject(field, k)
for k in value]
elif field.is_extension:
f = field
if (f.containing_type.GetOptions().message_set_wire_format and
f.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
f.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):
f = f.message_type
name = '[%s.%s]' % (f.full_name, name)
js[name] = self._FieldToJsonObject(field, value)
else:
js[name] = self._FieldToJsonObject(field, value)
# Serialize default value if including_default_value_fields is True.
if self.including_default_value_fields:
message_descriptor = message.DESCRIPTOR
for field in message_descriptor.fields:
# Singular message fields and oneof fields will not be affected.
if ((field.label != descriptor.FieldDescriptor.LABEL_REPEATED and
field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE) or
field.containing_oneof):
continue
if self.preserving_proto_field_name:
name = field.name
else:
name = field.json_name
if name in js:
# Skip the field which has been serailized already.
continue
if _IsMapEntry(field):
js[name] = {}
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
js[name] = []
else:
js[name] = self._FieldToJsonObject(field, field.default_value)
except ValueError as e:
raise SerializeToJsonError(
'Failed to serialize {0} field: {1}.'.format(field.name, e))
return js
def _FieldToJsonObject(self, field, value):
"""Converts field value according to Proto3 JSON Specification."""
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
return self._MessageToJsonObject(value)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
enum_value = field.enum_type.values_by_number.get(value, None)
if enum_value is not None:
return enum_value.name
else:
if field.file.syntax == 'proto3':
return value
raise SerializeToJsonError('Enum field contains an integer value '
'which can not mapped to an enum value.')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
# Use base64 Data encoding for bytes
return base64.b64encode(value).decode('utf-8')
else:
return value
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
return bool(value)
elif field.cpp_type in _INT64_TYPES:
return str(value)
elif field.cpp_type in _FLOAT_TYPES:
if math.isinf(value):
if value < 0.0:
return _NEG_INFINITY
else:
return _INFINITY
if math.isnan(value):
return _NAN
return value
def _AnyMessageToJsonObject(self, message):
"""Converts Any message according to Proto3 JSON Specification."""
if not message.ListFields():
return {}
# Must print @type first, use OrderedDict instead of {}
js = OrderedDict()
type_url = message.type_url
js['@type'] = type_url
sub_message = _CreateMessageFromTypeUrl(type_url)
sub_message.ParseFromString(message.value)
message_descriptor = sub_message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
js['value'] = self._WrapperMessageToJsonObject(sub_message)
return js
if full_name in _WKTJSONMETHODS:
js['value'] = methodcaller(_WKTJSONMETHODS[full_name][0],
sub_message)(self)
return js
return self._RegularMessageToJsonObject(sub_message, js)
def _GenericMessageToJsonObject(self, message):
"""Converts message according to Proto3 JSON Specification."""
# Duration, Timestamp and FieldMask have ToJsonString method to do the
# convert. Users can also call the method directly.
return message.ToJsonString()
def _ValueMessageToJsonObject(self, message):
"""Converts Value message according to Proto3 JSON Specification."""
which = message.WhichOneof('kind')
# If the Value message is not set treat as null_value when serialize
# to JSON. The parse back result will be different from original message.
if which is None or which == 'null_value':
return None
if which == 'list_value':
return self._ListValueMessageToJsonObject(message.list_value)
if which == 'struct_value':
value = message.struct_value
else:
value = getattr(message, which)
oneof_descriptor = message.DESCRIPTOR.fields_by_name[which]
return self._FieldToJsonObject(oneof_descriptor, value)
def _ListValueMessageToJsonObject(self, message):
"""Converts ListValue message according to Proto3 JSON Specification."""
return [self._ValueMessageToJsonObject(value)
for value in message.values]
def _StructMessageToJsonObject(self, message):
"""Converts Struct message according to Proto3 JSON Specification."""
fields = message.fields
ret = {}
for key in fields:
ret[key] = self._ValueMessageToJsonObject(fields[key])
return ret
def _WrapperMessageToJsonObject(self, message):
return self._FieldToJsonObject(
message.DESCRIPTOR.fields_by_name['value'], message.value)
def _IsWrapperMessage(message_descriptor):
return message_descriptor.file.name == 'google/protobuf/wrappers.proto'
def _DuplicateChecker(js):
result = {}
for name, value in js:
if name in result:
raise ParseError('Failed to load JSON: duplicate key {0}.'.format(name))
result[name] = value
return result
def _CreateMessageFromTypeUrl(type_url):
# TODO(jieluo): Should add a way that users can register the type resolver
# instead of the default one.
db = symbol_database.Default()
type_name = type_url.split('/')[-1]
try:
message_descriptor = db.pool.FindMessageTypeByName(type_name)
except KeyError:
raise TypeError(
'Can not find message descriptor by type_url: {0}.'.format(type_url))
message_class = db.GetPrototype(message_descriptor)
return message_class()
def Parse(text, message, ignore_unknown_fields=False):
"""Parses a JSON representation of a protocol message into a message.
Args:
text: Message JSON representation.
message: A protocol buffer message to merge into.
ignore_unknown_fields: If True, do not raise errors for unknown fields.
Returns:
The same message passed as argument.
Raises::
ParseError: On JSON parsing problems.
"""
if not isinstance(text, six.text_type): text = text.decode('utf-8')
try:
if sys.version_info < (2, 7):
# object_pair_hook is not supported before python2.7
js = json.loads(text)
else:
js = json.loads(text, object_pairs_hook=_DuplicateChecker)
except ValueError as e:
raise ParseError('Failed to load JSON: {0}.'.format(str(e)))
return ParseDict(js, message, ignore_unknown_fields)
def ParseDict(js_dict, message, ignore_unknown_fields=False):
"""Parses a JSON dictionary representation into a message.
Args:
js_dict: Dict representation of a JSON message.
message: A protocol buffer message to merge into.
ignore_unknown_fields: If True, do not raise errors for unknown fields.
Returns:
The same message passed as argument.
"""
parser = _Parser(ignore_unknown_fields)
parser.ConvertMessage(js_dict, message)
return message
_INT_OR_FLOAT = six.integer_types + (float,)
class _Parser(object):
"""JSON format parser for protocol message."""
def __init__(self,
ignore_unknown_fields):
self.ignore_unknown_fields = ignore_unknown_fields
def ConvertMessage(self, value, message):
"""Convert a JSON object into a message.
Args:
value: A JSON object.
message: A WKT or regular protocol message to record the data.
Raises:
ParseError: In case of convert problems.
"""
message_descriptor = message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
self._ConvertWrapperMessage(value, message)
elif full_name in _WKTJSONMETHODS:
methodcaller(_WKTJSONMETHODS[full_name][1], value, message)(self)
else:
self._ConvertFieldValuePair(value, message)
def _ConvertFieldValuePair(self, js, message):
"""Convert field value pairs into regular message.
Args:
js: A JSON object to convert the field value pairs.
message: A regular protocol message to record the data.
Raises:
ParseError: In case of problems converting.
"""
names = []
message_descriptor = message.DESCRIPTOR
fields_by_json_name = dict((f.json_name, f)
for f in message_descriptor.fields)
for name in js:
try:
field = fields_by_json_name.get(name, None)
if not field:
field = message_descriptor.fields_by_name.get(name, None)
if not field and _VALID_EXTENSION_NAME.match(name):
if not message_descriptor.is_extendable:
raise ParseError('Message type {0} does not have extensions'.format(
message_descriptor.full_name))
identifier = name[1:-1] # strip [] brackets
identifier = '.'.join(identifier.split('.')[:-1])
# pylint: disable=protected-access
field = message.Extensions._FindExtensionByName(identifier)
# pylint: enable=protected-access
if not field:
if self.ignore_unknown_fields:
continue
raise ParseError(
('Message type "{0}" has no field named "{1}".\n'
' Available Fields(except extensions): {2}').format(
message_descriptor.full_name, name,
message_descriptor.fields))
if name in names:
raise ParseError('Message type "{0}" should not have multiple '
'"{1}" fields.'.format(
message.DESCRIPTOR.full_name, name))
names.append(name)
# Check no other oneof field is parsed.
if field.containing_oneof is not None:
oneof_name = field.containing_oneof.name
if oneof_name in names:
raise ParseError('Message type "{0}" should not have multiple '
'"{1}" oneof fields.'.format(
message.DESCRIPTOR.full_name, oneof_name))
names.append(oneof_name)
value = js[name]
if value is None:
if (field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE
and field.message_type.full_name == 'google.protobuf.Value'):
sub_message = getattr(message, field.name)
sub_message.null_value = 0
else:
message.ClearField(field.name)
continue
# Parse field value.
if _IsMapEntry(field):
message.ClearField(field.name)
self._ConvertMapFieldValue(value, message, field)
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
message.ClearField(field.name)
if not isinstance(value, list):
raise ParseError('repeated field {0} must be in [] which is '
'{1}.'.format(name, value))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
# Repeated message field.
for item in value:
sub_message = getattr(message, field.name).add()
# None is a null_value in Value.
if (item is None and
sub_message.DESCRIPTOR.full_name != 'google.protobuf.Value'):
raise ParseError('null is not allowed to be used as an element'
' in a repeated field.')
self.ConvertMessage(item, sub_message)
else:
# Repeated scalar field.
for item in value:
if item is None:
raise ParseError('null is not allowed to be used as an element'
' in a repeated field.')
getattr(message, field.name).append(
_ConvertScalarFieldValue(item, field))
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
if field.is_extension:
sub_message = message.Extensions[field]
else:
sub_message = getattr(message, field.name)
sub_message.SetInParent()
self.ConvertMessage(value, sub_message)
else:
setattr(message, field.name, _ConvertScalarFieldValue(value, field))
except ParseError as e:
if field and field.containing_oneof is None:
raise ParseError('Failed to parse {0} field: {1}'.format(name, e))
else:
raise ParseError(str(e))
except ValueError as e:
raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))
except TypeError as e:
raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))
def _ConvertAnyMessage(self, value, message):
"""Convert a JSON representation into Any message."""
if isinstance(value, dict) and not value:
return
try:
type_url = value['@type']
except KeyError:
raise ParseError('@type is missing when parsing any message.')
sub_message = _CreateMessageFromTypeUrl(type_url)
message_descriptor = sub_message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
self._ConvertWrapperMessage(value['value'], sub_message)
elif full_name in _WKTJSONMETHODS:
methodcaller(
_WKTJSONMETHODS[full_name][1], value['value'], sub_message)(self)
else:
del value['@type']
self._ConvertFieldValuePair(value, sub_message)
# Sets Any message
message.value = sub_message.SerializeToString()
message.type_url = type_url
def _ConvertGenericMessage(self, value, message):
"""Convert a JSON representation into message with FromJsonString."""
# Duration, Timestamp, FieldMask have a FromJsonString method to do the
# conversion. Users can also call the method directly.
message.FromJsonString(value)
def _ConvertValueMessage(self, value, message):
"""Convert a JSON representation into Value message."""
if isinstance(value, dict):
self._ConvertStructMessage(value, message.struct_value)
elif isinstance(value, list):
self. _ConvertListValueMessage(value, message.list_value)
elif value is None:
message.null_value = 0
elif isinstance(value, bool):
message.bool_value = value
elif isinstance(value, six.string_types):
message.string_value = value
elif isinstance(value, _INT_OR_FLOAT):
message.number_value = value
else:
raise ParseError('Unexpected type for Value message.')
def _ConvertListValueMessage(self, value, message):
"""Convert a JSON representation into ListValue message."""
if not isinstance(value, list):
raise ParseError(
'ListValue must be in [] which is {0}.'.format(value))
message.ClearField('values')
for item in value:
self._ConvertValueMessage(item, message.values.add())
def _ConvertStructMessage(self, value, message):
"""Convert a JSON representation into Struct message."""
if not isinstance(value, dict):
raise ParseError(
'Struct must be in a dict which is {0}.'.format(value))
for key in value:
self._ConvertValueMessage(value[key], message.fields[key])
return
def _ConvertWrapperMessage(self, value, message):
"""Convert a JSON representation into Wrapper message."""
field = message.DESCRIPTOR.fields_by_name['value']
setattr(message, 'value', _ConvertScalarFieldValue(value, field))
def _ConvertMapFieldValue(self, value, message, field):
"""Convert map field value for a message map field.
Args:
value: A JSON object to convert the map field value.
message: A protocol message to record the converted data.
field: The descriptor of the map field to be converted.
Raises:
ParseError: In case of convert problems.
"""
if not isinstance(value, dict):
raise ParseError(
'Map field {0} must be in a dict which is {1}.'.format(
field.name, value))
key_field = field.message_type.fields_by_name['key']
value_field = field.message_type.fields_by_name['value']
for key in value:
key_value = _ConvertScalarFieldValue(key, key_field, True)
if value_field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
self.ConvertMessage(value[key], getattr(
message, field.name)[key_value])
else:
getattr(message, field.name)[key_value] = _ConvertScalarFieldValue(
value[key], value_field)
def _ConvertScalarFieldValue(value, field, require_str=False):
"""Convert a single scalar field value.
Args:
value: A scalar value to convert the scalar field value.
field: The descriptor of the field to convert.
require_str: If True, the field value must be a str.
Returns:
The converted scalar field value
Raises:
ParseError: In case of convert problems.
"""
if field.cpp_type in _INT_TYPES:
return _ConvertInteger(value)
elif field.cpp_type in _FLOAT_TYPES:
return _ConvertFloat(value)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
return _ConvertBool(value, require_str)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
return base64.b64decode(value)
else:
# Checking for unpaired surrogates appears to be unreliable,
# depending on the specific Python version, so we check manually.
if _UNPAIRED_SURROGATE_PATTERN.search(value):
raise ParseError('Unpaired surrogate')
return value
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
# Convert an enum value.
enum_value = field.enum_type.values_by_name.get(value, None)
if enum_value is None:
try:
number = int(value)
enum_value = field.enum_type.values_by_number.get(number, None)
except ValueError:
raise ParseError('Invalid enum value {0} for enum type {1}.'.format(
value, field.enum_type.full_name))
if enum_value is None:
if field.file.syntax == 'proto3':
# Proto3 accepts unknown enums.
return number
raise ParseError('Invalid enum value {0} for enum type {1}.'.format(
value, field.enum_type.full_name))
return enum_value.number
def _ConvertInteger(value):
"""Convert an integer.
Args:
value: A scalar value to convert.
Returns:
The integer value.
Raises:
ParseError: If an integer couldn't be consumed.
"""
if isinstance(value, float) and not value.is_integer():
raise ParseError('Couldn\'t parse integer: {0}.'.format(value))
if isinstance(value, six.text_type) and value.find(' ') != -1:
raise ParseError('Couldn\'t parse integer: "{0}".'.format(value))
return int(value)
def _ConvertFloat(value):
"""Convert an floating point number."""
if value == 'nan':
raise ParseError('Couldn\'t parse float "nan", use "NaN" instead.')
try:
# Assume Python compatible syntax.
return float(value)
except ValueError:
# Check alternative spellings.
if value == _NEG_INFINITY:
return float('-inf')
elif value == _INFINITY:
return float('inf')
elif value == _NAN:
return float('nan')
else:
raise ParseError('Couldn\'t parse float: {0}.'.format(value))
def _ConvertBool(value, require_str):
"""Convert a boolean value.
Args:
value: A scalar value to convert.
require_str: If True, value must be a str.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
if require_str:
if value == 'true':
return True
elif value == 'false':
return False
else:
raise ParseError('Expected "true" or "false", not {0}.'.format(value))
if not isinstance(value, bool):
raise ParseError('Expected true or false without quotes.')
return value
_WKTJSONMETHODS = {
'google.protobuf.Any': ['_AnyMessageToJsonObject',
'_ConvertAnyMessage'],
'google.protobuf.Duration': ['_GenericMessageToJsonObject',
'_ConvertGenericMessage'],
'google.protobuf.FieldMask': ['_GenericMessageToJsonObject',
'_ConvertGenericMessage'],
'google.protobuf.ListValue': ['_ListValueMessageToJsonObject',
'_ConvertListValueMessage'],
'google.protobuf.Struct': ['_StructMessageToJsonObject',
'_ConvertStructMessage'],
'google.protobuf.Timestamp': ['_GenericMessageToJsonObject',
'_ConvertGenericMessage'],
'google.protobuf.Value': ['_ValueMessageToJsonObject',
'_ConvertValueMessage']
}
|
mit
|
yugangw-msft/azure-cli
|
src/azure-cli/azure/cli/command_modules/storage/tests/hybrid_2020_09_01/test_storage_account_scenarios.py
|
2
|
13246
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import (ScenarioTest, JMESPathCheck, ResourceGroupPreparer,
StorageAccountPreparer, api_version_constraint, live_only)
from azure.cli.core.profiles import ResourceType
from ..storage_test_util import StorageScenarioMixin
from knack.util import CLIError
from azure_devtools.scenario_tests import AllowLargeResponse
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2016-12-01')
class StorageAccountTests(StorageScenarioMixin, ScenarioTest):
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2017-06-01')
@ResourceGroupPreparer(name_prefix='cli_test_storage_service_endpoints')
@StorageAccountPreparer()
def test_storage_account_service_endpoints(self, resource_group, storage_account):
kwargs = {
'rg': resource_group,
'acc': storage_account,
'vnet': 'vnet1',
'subnet': 'subnet1'
}
self.cmd('storage account create -g {rg} -n {acc} --sku Standard_LRS --bypass Metrics --default-action Deny --https-only '
.format(**kwargs),
checks=[
JMESPathCheck('networkRuleSet.bypass', 'Metrics'),
JMESPathCheck('networkRuleSet.defaultAction', 'Deny')])
self.cmd('storage account update -g {rg} -n {acc} --bypass Logging --default-action Allow'.format(**kwargs),
checks=[
JMESPathCheck('networkRuleSet.bypass', 'Logging'),
JMESPathCheck('networkRuleSet.defaultAction', 'Allow')])
self.cmd('storage account update -g {rg} -n {acc} --set networkRuleSet.default_action=deny'.format(**kwargs),
checks=[
JMESPathCheck('networkRuleSet.bypass', 'Logging'),
JMESPathCheck('networkRuleSet.defaultAction', 'Deny')])
self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet}'.format(**kwargs))
self.cmd(
'network vnet subnet update -g {rg} --vnet-name {vnet} -n {subnet} --service-endpoints Microsoft.Storage'.format(
**kwargs))
self.cmd('storage account network-rule add -g {rg} --account-name {acc} --ip-address 25.1.2.3'.format(**kwargs))
self.cmd(
'storage account network-rule add -g {rg} --account-name {acc} --ip-address 25.2.0.0/24'.format(**kwargs))
self.cmd(
'storage account network-rule add -g {rg} --account-name {acc} --vnet-name {vnet} --subnet {subnet}'.format(
**kwargs))
self.cmd('storage account network-rule list -g {rg} --account-name {acc}'.format(**kwargs), checks=[
JMESPathCheck('length(ipRules)', 2),
JMESPathCheck('length(virtualNetworkRules)', 1)
])
self.cmd(
'storage account network-rule remove -g {rg} --account-name {acc} --ip-address 25.1.2.3'.format(**kwargs))
self.cmd(
'storage account network-rule remove -g {rg} --account-name {acc} --vnet-name {vnet} --subnet {subnet}'.format(
**kwargs))
self.cmd('storage account network-rule list -g {rg} --account-name {acc}'.format(**kwargs), checks=[
JMESPathCheck('length(ipRules)', 1),
JMESPathCheck('length(virtualNetworkRules)', 0)
])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2017-06-01')
@ResourceGroupPreparer(location='southcentralus')
def test_create_storage_account_with_assigned_identity(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
cmd = 'az storage account create -n {} -g {} --sku Standard_LRS --assign-identity --https-only '.format(name, resource_group)
result = self.cmd(cmd).get_output_in_json()
self.assertIn('identity', result)
self.assertTrue(result['identity']['principalId'])
self.assertTrue(result['identity']['tenantId'])
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2017-06-01')
@ResourceGroupPreparer(location='southcentralus')
def test_update_storage_account_with_assigned_identity(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} --sku Standard_LRS --https-only '.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('identity', None)])
update_cmd = 'az storage account update -n {} -g {} --assign-identity'.format(name, resource_group)
result = self.cmd(update_cmd).get_output_in_json()
self.assertIn('identity', result)
self.assertTrue(result['identity']['principalId'])
self.assertTrue(result['identity']['tenantId'])
# @AllowLargeResponse()
# @ResourceGroupPreparer(parameter_name_for_location='location')
# def test_create_storage_account(self, resource_group, location):
# name = self.create_random_name(prefix='cli', length=24)
# self.cmd('az storage account create -n {} -g {} --sku {} -l {} --https-only '.format(
# name, resource_group, 'Standard_LRS', location))
# self.cmd('storage account check-name --name {}'.format(name), checks=[
# JMESPathCheck('nameAvailable', False),
# JMESPathCheck('reason', 'AlreadyExists')
# ])
# self.cmd('storage account list -g {}'.format(resource_group), checks=[
# JMESPathCheck('[0].location', 'westus'),
# JMESPathCheck('[0].sku.name', 'Standard_LRS'),
# JMESPathCheck('[0].resourceGroup', resource_group)
# ])
# self.cmd('az storage account show -n {} -g {}'.format(name, resource_group), checks=[
# JMESPathCheck('name', name),
# JMESPathCheck('location', location),
# JMESPathCheck('sku.name', 'Standard_LRS'),
# JMESPathCheck('kind', 'Storage')
# ])
# self.cmd('az storage account show -n {}'.format(name), checks=[
# JMESPathCheck('name', name),
# JMESPathCheck('location', location),
# JMESPathCheck('sku.name', 'Standard_LRS'),
# JMESPathCheck('kind', 'Storage')
# ])
# self.cmd('storage account show-connection-string -g {} -n {} --protocol http'.format(
# resource_group, name), checks=[
# JMESPathCheck("contains(connectionString, 'https')", False),
# JMESPathCheck("contains(connectionString, '{}')".format(name), True)])
# self.cmd('storage account update -g {} -n {} --tags foo=bar cat'
# .format(resource_group, name),
# checks=JMESPathCheck('tags', {'cat': '', 'foo': 'bar'}))
# self.cmd('storage account update -g {} -n {} --sku Standard_GRS --tags'
# .format(resource_group, name),
# checks=[JMESPathCheck('tags', {}),
# JMESPathCheck('sku.name', 'Standard_GRS')])
# self.cmd('storage account update -g {} -n {} --set tags.test=success'
# .format(resource_group, name),
# checks=JMESPathCheck('tags', {'test': 'success'}))
# self.cmd('storage account delete -g {} -n {} --yes'.format(resource_group, name))
# self.cmd('storage account check-name --name {}'.format(name),
# checks=JMESPathCheck('nameAvailable', True))
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2017-10-01')
@ResourceGroupPreparer(location='southcentralus')
def test_storage_create_default_sku(self, resource_group):
name = self.create_random_name(prefix='cli', length=24)
create_cmd = 'az storage account create -n {} -g {} --https-only '.format(name, resource_group)
self.cmd(create_cmd, checks=[JMESPathCheck('sku.name', 'Standard_RAGRS')])
def test_show_usage(self):
self.cmd('storage account show-usage -l westus', checks=JMESPathCheck('name.value', 'StorageAccounts'))
# @ResourceGroupPreparer()
# @StorageAccountPreparer()
# def test_logging_operations(self, resource_group, storage_account):
# connection_string = self.cmd(
# 'storage account show-connection-string -g {} -n {} -otsv'.format(resource_group, storage_account)).output
# self.cmd('storage logging show --connection-string {}'.format(connection_string), checks=[
# JMESPathCheck('blob.read', False),
# JMESPathCheck('blob.retentionPolicy.enabled', False)
# ])
# self.cmd('storage logging update --services b --log r --retention 1 '
# '--service b --connection-string {}'.format(connection_string))
# self.cmd('storage logging show --connection-string {}'.format(connection_string), checks=[
# JMESPathCheck('blob.read', True),
# JMESPathCheck('blob.retentionPolicy.enabled', True),
# JMESPathCheck('blob.retentionPolicy.days', 1)
# ])
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_metrics_operations(self, resource_group, storage_account_info):
self.storage_cmd('storage metrics show', storage_account_info) \
.assert_with_checks(JMESPathCheck('file.hour.enabled', True),
JMESPathCheck('file.minute.enabled', False))
self.storage_cmd('storage metrics update --services f --api true --hour true --minute true --retention 1 ',
storage_account_info)
self.storage_cmd('storage metrics show', storage_account_info).assert_with_checks(
JMESPathCheck('file.hour.enabled', True),
JMESPathCheck('file.minute.enabled', True))
@AllowLargeResponse()
@ResourceGroupPreparer()
@StorageAccountPreparer(parameter_name='account_1')
@StorageAccountPreparer(parameter_name='account_2')
def test_list_storage_accounts(self, account_1, account_2):
accounts_list = self.cmd('az storage account list').get_output_in_json()
assert len(accounts_list) >= 2
assert next(acc for acc in accounts_list if acc['name'] == account_1)
assert next(acc for acc in accounts_list if acc['name'] == account_2)
# @ResourceGroupPreparer()
# @StorageAccountPreparer()
# def test_renew_account_key(self, resource_group, storage_account):
# original_keys = self.cmd('storage account keys list -g {} -n {}'
# .format(resource_group, storage_account)).get_output_in_json()
# # key1 = keys_result[0]
# # key2 = keys_result[1]
# assert original_keys[0] and original_keys[1]
# renewed_keys = self.cmd('storage account keys renew -g {} -n {} --key primary'
# .format(resource_group, storage_account)).get_output_in_json()
# print(renewed_keys)
# print(original_keys)
# assert renewed_keys[0] != original_keys[0]
# assert renewed_keys[1] == original_keys[1]
# original_keys = renewed_keys
# renewed_keys = self.cmd('storage account keys renew -g {} -n {} --key secondary'
# .format(resource_group, storage_account)).get_output_in_json()
# assert renewed_keys[0] == original_keys[0]
# assert renewed_keys[1] != original_keys[1]
# @AllowLargeResponse()
# @ResourceGroupPreparer()
# @StorageAccountPreparer()
# def test_create_account_sas(self, storage_account):
# sas = self.cmd('storage account generate-sas --resource-types o --services b '
# '--expiry 2046-12-31T08:23Z --permissions r --https-only --account-name {}'
# .format(storage_account)).output
# self.assertIn('sig=', sas, 'SAS token {} does not contain sig segment'.format(sas))
# self.assertIn('se=', sas, 'SAS token {} does not contain se segment'.format(sas))
def test_list_locations(self):
self.cmd('az account list-locations',
checks=[JMESPathCheck("[?name=='westus'].displayName | [0]", 'West US')])
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_account_show_exit_codes(self, resource_group, storage_account):
self.kwargs = {'rg': resource_group, 'sa': storage_account}
self.assertEqual(self.cmd('storage account show -g {rg} -n {sa}').exit_code, 0)
with self.assertRaises(SystemExit) as ex:
self.cmd('storage account show text_causing_parsing_error')
self.assertEqual(ex.exception.code, 2)
with self.assertRaises(SystemExit) as ex:
self.cmd('storage account show -g fake_group -n {sa}')
self.assertEqual(ex.exception.code, 3)
with self.assertRaises(SystemExit) as ex:
self.cmd('storage account show -g {rg} -n fake_account')
self.assertEqual(ex.exception.code, 3)
|
mit
|
pombredanne/timyd
|
timyd/console.py
|
1
|
1115
|
# At the time of writing, unicurses just doesn't work on Windows (pdcurses.dll
# lookup is wrong)
# This module provides escape sequences for terminal colors
import os
_DEFAULT = b'\x1B[0m'
class _ColorMethod(object):
def __init__(self, code):
self._code = code
def __get__(self, obj, type=None):
return self
def __call__(self, msg):
if colors._enabled:
return "%s%s%s" % (self._code, msg, _DEFAULT)
else:
return msg
class _Colors(object):
def __init__(self):
self._enabled = None
self.auto()
def auto(self):
if '/bin:' in os.getenv('PATH'):
self.enable(True)
else:
self.enable(False)
def enable(self, mode=True):
self._enabled = mode
black = _ColorMethod(b'\x1B[30m')
red = _ColorMethod(b'\x1B[31m')
green = _ColorMethod(b'\x1B[32m')
yellow = _ColorMethod(b'\x1B[33m')
blue = _ColorMethod(b'\x1B[34m')
magenta = _ColorMethod(b'\x1B[35m')
cyan = _ColorMethod(b'\x1B[36m')
white = _ColorMethod(b'\x1B[37m')
colors = _Colors()
|
mit
|
EliotBryant/ShadDetector
|
shadDetector_testing/Colour Based Methods/ColorHistogram-master/color_histogram/results/hist_3d.py
|
2
|
2636
|
# -*- coding: utf-8 -*-
# # @package color_histogram.results.hist_3d
#
# cCompute 3D color histogram result.
# @author tody
# @date 2015/08/28
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from color_histogram.io_util.image import loadRGB
from color_histogram.cv.image import rgb, to32F, rgb2Lab, rgb2hsv
from color_histogram.core.hist_3d import Hist3D
from color_histogram.datasets.datasets import dataFile
from color_histogram.results.results import resultFile, batchResults
from color_histogram.plot.window import showMaximize
from color_histogram.util.timer import timing_func
# # Plot 3D color histograms for the target image, color space, channels.
@timing_func
def plotHistogram3D(image, num_bins, color_space, ax):
font_size = 15
plt.title("%s: %s bins" % (color_space, num_bins), fontsize=font_size)
hist3D = Hist3D(image, num_bins=num_bins, color_space=color_space)
hist3D.plot(ax)
# # Create histogram 3D result function.
def histogram3DResultFunc(num_bins=32):
def func(image_file):
histogram3DResult(image_file, num_bins)
return func
# # Compute histogram 3D result for the image file.
def histogram3DResult(image_file, num_bins=32, image=None, tile=None):
image_name = os.path.basename(image_file)
if image is None:
image_name = os.path.basename(image_file)
image_name = os.path.splitext(image_name)[0]
image = loadRGB(image_file)
if tile is None:
tile = image
fig_w = 10
fig_h = 6
fig = plt.figure(figsize=(fig_w, fig_h))
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95, wspace=0.02, hspace=0.2)
font_size = 15
fig.suptitle("Hisotogram 3D", fontsize=font_size)
h, w = image.shape[:2]
fig.add_subplot(231)
plt.title("Original Image: %s x %s" % (w, h), fontsize=font_size)
plt.imshow(tile)
plt.axis('off')
color_spaces = ["rgb", "Lab", "hsv"]
plot_id = 234
for color_space in color_spaces:
ax = fig.add_subplot(plot_id, projection='3d')
plotHistogram3D(image, num_bins, color_space, ax)
plot_id += 1
result_name = image_name + "_hist3D"
result_file = resultFile(result_name)
plt.savefig(result_file, transparent=True)
# # Compute histogram 3D results for the data names, ids.
def histogram3DResults(data_names, data_ids, num_bins=32):
batchResults(data_names, data_ids, histogram3DResultFunc(num_bins), "Histogram 3D")
if __name__ == '__main__':
data_names = ["flower"]
data_ids = [0, 1, 2]
histogram3DResults(data_names, data_ids)
|
gpl-3.0
|
roystgnr/libmesh
|
doc/statistics/libmesh_mailinglists.py
|
2
|
12279
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
from operator import add
# Import stuff for working with dates
from datetime import datetime
from matplotlib.dates import date2num, num2date
# Number of messages to libmesh-devel and libmesh-users over the life
# of the project. I cut and pasted these from the sf website: they
# are in basically the same format as those pages, which is to say,
# not particularly useful for plotting.
# Administrative interface for both lists.
# https://lists.sourceforge.net/lists/admindb/libmesh-users
# https://lists.sourceforge.net/lists/admindb/libmesh-devel
# You can now see the subscriber counts for both lists in another place:
# https://sourceforge.net/p/libmesh/admin/mailman/
# Month, year, libmesh-devel subscriber count, libmesh-users subscriber count
membership_data = [
'Jan 2010', 75, 143,
'Jul 2011', 92, 185,
'Aug 2011', 91, 184,
'Sep 2011', 90, 184,
'Nov 2011', 92, 184,
'Dec 2011', 93, 185,
'Jan 2012', 96, 190,
'Feb 2012', 98, 196,
'Mar 2012', 99, 199,
'Apr 2012', 103, 204,
'May 2012', 105, 210,
'Jun 2012', 105, 209,
'Jul 2012', 107, 210,
'Aug 2012', 109, 213,
'Sep 2012', 111, 220,
'Oct 2012', 111, 222,
'Nov 2012', 112, 225,
'Dec 2012', 111, 225,
'Jan 2013', 111, 225,
'Feb 2013', 112, 228,
'Mar 2013', 112, 231,
'Apr 2013', 112, 228,
'May 2013', 113, 233,
'Jun 2013', 113, 237,
'Jul 2013', 114, 240,
'Aug 2013', 114, 242,
'Sep 2013', 113, 241,
'Oct 2013', 112, 241,
'Nov 2013', 112, 241,
'Dec 2013', 111, 240,
'Jan 2014', 112, 244,
'Feb 2014', 113, 244,
'Mar 2014', 113, 247,
'Apr 2014', 113, 248,
'May 2014', 113, 249,
'Jun 2014', 113, 247,
'Jul 2014', 115, 249,
'Aug 2014', 115, 251,
'Sep 2014', 117, 254,
'Oct 2014', 118, 257,
'Nov 2014', 119, 261,
'Dec 2014', 120, 262,
'Jan 2015', 120, 263,
'Mar 2015', 120, 266, # I missed getting data for Feb 2015
'Apr 2015', 122, 268,
'May 2015', 122, 269,
'Jun 2015', 122, 268,
'Jul 2015', 122, 271,
'Aug 2015', 122, 272,
'Sep 2015', 107, 228,
'Oct 2015', 107, 230,
'Nov 2015', 109, 234,
'Dec 2015', 108, 241,
'Jan 2016', 108, 239,
'Feb 2016', 110, 242,
'Mar 2016', 109, 243,
'Apr 2016', 110, 242,
'May 2016', 110, 245,
'Jun 2016', 110, 244,
'Jul 2016', 112, 243,
'Aug 2016', 112, 245,
'Sep 2016', 115, 246,
'Oct 2016', 116, 244,
'Nov 2016', 116, 246,
'Dec 2016', 116, 246,
'Jan 2017', 117, 247,
'Feb 2017', 117, 249,
'Mar 2017', 117, 250,
'Apr 2017', 118, 249,
'May 2017', 118, 249,
'Jun 2017', 118, 245,
'Jul 2017', 109, 226,
'Aug 2017', 109, 226,
'Sep 2017', 28, 56, # Sourceforge must have finally dropped the people who didn't reconfirm their mailing list subscriptions!
'Oct 2017', 30, 63,
'Nov 2017', 32, 70,
'Dec 2017', 34, 73,
'Jan 2018', 35, 77,
'Feb 2018', 37, 82,
'Apr 2018', 40, 87,
'May 2018', 40, 89,
'Jun 2018', 38, 95,
'Jul 2018', 39, 99,
'Aug 2018', 39, 98,
'Sep 2018', 41, 100,
'Oct 2018', 41, 103,
'Nov 2018', 41, 103,
'Dec 2018', 41, 106,
'Jan 2019', 41, 104,
'Feb 2019', 41, 107,
'Mar 2019', 42, 108,
'Apr 2019', 42, 107,
'May 2019', 43, 106,
'May 2019', 43, 107,
'Jun 2019', 43, 109,
'Jul 2019', 42, 107,
'Aug 2019', 42, 109,
'Oct 2019', 43, 109,
'Nov 2019', 43, 111,
'Dec 2019', 43, 110,
'Jan 2020', 43, 109,
'Feb 2020', 44, 114,
'Mar 2020', 44, 113,
'Apr 2020', 44, 113,
'May 2020', 44, 113,
'Jun 2020', 44, 113,
'Jul 2020', 44, 113,
'Aug 2020', 44, 113,
'Sep 2020', 45, 116,
'Oct 2020', 45, 118,
]
# Strip out the dates from membership_data
date_strings = membership_data[0::3]
# Convert date strings into numbers
date_nums = []
for d in date_strings:
date_nums.append(date2num(datetime.strptime(d, '%b %Y')))
# Strip out the number of libmesh-devel subscribers from membership_data
devel_count = membership_data[1::3]
# Strip out the number of libmesh-users subscribers from membership_data
users_count = membership_data[2::3]
# Get a reference to the figure
fig = plt.figure()
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax = fig.add_subplot(111)
# The colors used come from sns.color_palette("muted").as_hex() They
# are the "same basic order of hues as the default matplotlib color
# cycle but more attractive colors."
muted_dark_blue = u'#4878cf'
muted_green = u'#6acc65'
muted_red = u'#d65f5f'
muted_purple = u'#b47cc7'
muted_yellow = u'#c4ad66'
muted_light_blue = u'#77bedb'
# Choose colors from the list above.
primary = muted_dark_blue
secondary = muted_light_blue
# Plot libmesh-users mailing list membership over time
ax.plot(date_nums, users_count, color=primary, marker='s', linestyle='--', label='libmesh-users')
# Plot libmesh-devel mailing list membership over time
ax.plot(date_nums, devel_count, color=secondary, marker='o', linestyle='-', label='libmesh-devel')
# Add a legend to the plot.
plt.legend(loc='upper left')
# Create title
fig.suptitle('LibMesh Mailing List Membership Size')
# Set up xticks and xticklabels
N = len(devel_count)
xtick_indexes = [0, 1, N-1]
xticks = []
xticklabels = []
for index in xtick_indexes:
xticks.append(date_nums[index])
xticklabels.append(date_strings[index])
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
# Save as PDF
plt.savefig('libmesh_mailinglists_membership.pdf')
# libmesh-devel
# https://sourceforge.net/p/libmesh/mailman/libmesh-devel/
# jan feb mar apr may jun jul aug sep oct nov dec
devel_data = [
'2003', 4, 1, 9, 2, 7, 1, 1, 4, 12, 8, 3, 4,
'2004', 1, 21, 31, 10, 12, 15, 4, 6, 5, 11, 43, 13,
'2005', 25, 12, 49, 19, 104, 60, 10, 42, 15, 12, 6, 4,
'2006', 1, 6, 31, 17, 5, 95, 38, 44, 6, 8, 21, 0,
'2007', 5, 46, 9, 23, 17, 51, 41, 4, 28, 71, 193, 20,
'2008', 46, 46, 18, 38, 14, 107, 50, 115, 84, 96, 105, 34,
'2009', 89, 93, 119, 73, 39, 51, 27, 8, 91, 90, 77, 67,
'2010', 24, 36, 98, 45, 25, 60, 17, 36, 48, 45, 65, 39,
'2011', 26, 48, 151, 108, 61, 108, 27, 50, 43, 43, 27, 37,
'2012', 56, 120, 72, 57, 82, 66, 51, 75, 166, 232, 284, 105, # Dec 10, 2012 libmesh moved to github
'2013', 168, 151, 30, 145, 26, 53, 76, 33, 23, 72, 125, 38,
'2014', 47, 62, 27, 8, 12, 2, 22, 22, 0, 17, 20, 12,
'2015', 25, 2, 16, 13, 21, 5, 1, 8, 9, 30, 8, 0,
'2016', 16, 31, 43, 18, 21, 11, 17, 26, 4, 16, 5, 6,
'2017', 1, 2, 5, 4, 1, 11, 5, 0, 3, 1, 7, 0,
'2018', 8, 8, 1, 0, 5, 11, 0, 51, 3, 0, 0, 0,
'2019', 2, 0, 3, 7, 2, 0, 6, 0, 0, 4, 0, 0,
'2020', 0, 0, 0, 0, 1, 0, 0, 0, 0,
]
# libmesh-users starts in Sept 2003!
# https://sourceforge.net/p/libmesh/mailman/libmesh-users/
# jan feb mar apr may jun jul aug sep oct nov dec
users_data = [
'2003', 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 27, 31,
'2004', 6, 15, 33, 10, 46, 11, 21, 15, 13, 23, 1, 8,
'2005', 27, 57, 86, 23, 37, 34, 24, 17, 50, 24, 10, 60,
'2006', 47, 46, 127, 19, 26, 62, 47, 51, 61, 42, 50, 33,
'2007', 60, 55, 77, 102, 82, 102, 169, 117, 80, 37, 51, 43,
'2008', 71, 94, 98, 125, 54, 119, 60, 111, 118, 125, 119, 94,
'2009', 109, 38, 93, 88, 29, 57, 53, 48, 68, 151, 23, 35,
'2010', 84, 60, 184, 112, 60, 90, 23, 70, 119, 27, 47, 54,
'2011', 22, 19, 92, 93, 35, 91, 32, 61, 7, 69, 81, 23,
'2012', 64, 95, 35, 36, 63, 98, 70, 171, 149, 64, 67, 126, # Dec 10, 2012 libmesh moved to github
'2013', 108, 104, 171, 133, 108, 100, 93, 126, 74, 59, 145, 93,
'2014', 38, 45, 26, 41, 125, 70, 61, 66, 60, 110, 27, 30,
'2015', 43, 67, 71, 92, 39, 15, 46, 63, 84, 82, 69, 45,
'2016', 92, 91, 148, 43, 58, 117, 92, 140, 49, 33, 85, 40,
'2017', 41, 36, 49, 41, 73, 51, 12, 69, 26, 43, 75, 23,
'2018', 86, 36, 50, 28, 53, 65, 26, 43, 32, 28, 52, 17,
'2019', 39, 26, 71, 30, 73, 18, 5, 10, 8, 24, 12, 34,
'2020', 17, 10, 6, 4, 15, 3, 8, 15, 6,
]
# Make plot of monthly data
fig.clf()
# Use a smaller font size on these plots since they are... smaller.
by_month_fontsize = 7
month_names = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
# Strip out the year strings
year_strings = devel_data[0::13]
for i in range(0, 12):
# Strip out the devel_data for the current month. Note that the
# stride is 13 since the year string also appears in each row.
devel_data_current_month = devel_data[i+1::13]
# Strip out the users_data for the current month
users_data_current_month = users_data[i+1::13]
# Get the combined number of messages
combined_data_current_month = np.add(devel_data_current_month, users_data_current_month)
# Get reference to the ith axes on a 3x4 grid. Note that the plot
# numbering starts at 1, simiarly to Matlab.
ax = fig.add_subplot(3, 4, i+1)
# The width of the bars to use in the bar chart
width=.8
# Make an x-axis to plot against
N = len(devel_data_current_month)
x = np.linspace(1, N, N)
# Plot the summed data
ax.bar(x, combined_data_current_month, width, color=primary)
# Plot only the libmesh-devel data
ax.bar(x, devel_data_current_month, width, color=secondary)
# Set up the xticks and labels
xticks = [1, N]
xticklabels = [year_strings[0], year_strings[N-1]]
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels, fontsize=by_month_fontsize)
# Set month name as subplot title
ax.set_title(month_names[i] + ' (max ' + str(max(combined_data_current_month)) + ')', fontsize=by_month_fontsize)
# Set an empty set of ticks for the y-axis to turn it off. This
# is necessary to declutter the figure.
ax.get_yaxis().set_ticks([])
# We need to leave a bit more room between the subplots to balance the
# font size we can use with the amount of space available on the
# figure.
# wspace = the amount of width reserved for blank space between subplots
# hspace = the amount of height reserved for white space between subplots
plt.subplots_adjust(wspace=.2, hspace=.4)
# Save as PDF
plt.savefig('libmesh_mailinglists_by_month.pdf')
# Make plot of data from all time
fig.clf()
# Strip out all the list entries which are numbers. Not sure if this
# is the best or most Python way to do this, but it works...
devel_numbers = [x for x in devel_data if (isinstance(x, str)==False)]
users_numbers = [x for x in users_data if (isinstance(x, str)==False)]
# Get the combined number of messages
combined_devel_users_number = np.add(devel_numbers, users_numbers)
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax = fig.add_subplot(111)
# Make an x-axis to plot against
N = len(combined_devel_users_number)
x = np.linspace(1, N, N)
# Plot the combined data
ax.bar(x, combined_devel_users_number, width, color=primary, label='libmesh-users')
# Plot the libmesh-devel data alone
ax.bar(x, devel_numbers, width, color=secondary, label='libmesh-devel')
# Set bi-yearly xticklabels
year_labels = ['2003', '2005', '2007', '2009', '2011', '2013', '2015', '2017', '2019']
# Set up the corresponding tick locations. This starting point was chosen by
# trial and error because it lined up the tick marks fairly well, but I don't
# understand the logic behind it.
xticks = [.55]
for i in range(1, len(year_labels)):
xticks.append(xticks[i-1] + 24) # 2 years = 24 months
# Center the ticks slightly
xticks = [x+width/2. for x in xticks]
ax.set_xticks(xticks)
ax.set_xticklabels(year_labels)
# Add a legend to the plot.
plt.legend(loc='upper left')
# Set the xlimits
plt.xlim(0, N+2);
# Save as PDF
plt.savefig('libmesh_mailinglists.pdf')
# Local Variables:
# python-indent: 2
# End:
|
lgpl-2.1
|
ecederstrand/django
|
tests/distinct_on_fields/tests.py
|
263
|
5996
|
from __future__ import unicode_literals
from django.db.models import Max
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import str_prefix
from .models import Celebrity, Fan, Staff, StaffTag, Tag
@skipUnlessDBFeature('can_distinct_on_fields')
@skipUnlessDBFeature('supports_nullable_unique_constraints')
class DistinctOnTests(TestCase):
def setUp(self):
t1 = Tag.objects.create(name='t1')
Tag.objects.create(name='t2', parent=t1)
t3 = Tag.objects.create(name='t3', parent=t1)
Tag.objects.create(name='t4', parent=t3)
Tag.objects.create(name='t5', parent=t3)
self.p1_o1 = Staff.objects.create(id=1, name="p1", organisation="o1")
self.p2_o1 = Staff.objects.create(id=2, name="p2", organisation="o1")
self.p3_o1 = Staff.objects.create(id=3, name="p3", organisation="o1")
self.p1_o2 = Staff.objects.create(id=4, name="p1", organisation="o2")
self.p1_o1.coworkers.add(self.p2_o1, self.p3_o1)
StaffTag.objects.create(staff=self.p1_o1, tag=t1)
StaffTag.objects.create(staff=self.p1_o1, tag=t1)
celeb1 = Celebrity.objects.create(name="c1")
celeb2 = Celebrity.objects.create(name="c2")
self.fan1 = Fan.objects.create(fan_of=celeb1)
self.fan2 = Fan.objects.create(fan_of=celeb1)
self.fan3 = Fan.objects.create(fan_of=celeb2)
def test_basic_distinct_on(self):
"""QuerySet.distinct('field', ...) works"""
# (qset, expected) tuples
qsets = (
(
Staff.objects.distinct().order_by('name'),
['<Staff: p1>', '<Staff: p1>', '<Staff: p2>', '<Staff: p3>'],
),
(
Staff.objects.distinct('name').order_by('name'),
['<Staff: p1>', '<Staff: p2>', '<Staff: p3>'],
),
(
Staff.objects.distinct('organisation').order_by('organisation', 'name'),
['<Staff: p1>', '<Staff: p1>'],
),
(
Staff.objects.distinct('name', 'organisation').order_by('name', 'organisation'),
['<Staff: p1>', '<Staff: p1>', '<Staff: p2>', '<Staff: p3>'],
),
(
Celebrity.objects.filter(fan__in=[self.fan1, self.fan2, self.fan3]).distinct('name').order_by('name'),
['<Celebrity: c1>', '<Celebrity: c2>'],
),
# Does combining querysets work?
(
(Celebrity.objects.filter(fan__in=[self.fan1, self.fan2]).
distinct('name').order_by('name') |
Celebrity.objects.filter(fan__in=[self.fan3]).
distinct('name').order_by('name')),
['<Celebrity: c1>', '<Celebrity: c2>'],
),
(
StaffTag.objects.distinct('staff', 'tag'),
['<StaffTag: t1 -> p1>'],
),
(
Tag.objects.order_by('parent__pk', 'pk').distinct('parent'),
['<Tag: t2>', '<Tag: t4>', '<Tag: t1>'],
),
(
StaffTag.objects.select_related('staff').distinct('staff__name').order_by('staff__name'),
['<StaffTag: t1 -> p1>'],
),
# Fetch the alphabetically first coworker for each worker
(
(Staff.objects.distinct('id').order_by('id', 'coworkers__name').
values_list('id', 'coworkers__name')),
[str_prefix("(1, %(_)s'p2')"), str_prefix("(2, %(_)s'p1')"),
str_prefix("(3, %(_)s'p1')"), "(4, None)"]
),
)
for qset, expected in qsets:
self.assertQuerysetEqual(qset, expected)
self.assertEqual(qset.count(), len(expected))
# Combining queries with different distinct_fields is not allowed.
base_qs = Celebrity.objects.all()
self.assertRaisesMessage(
AssertionError,
"Cannot combine queries with different distinct fields.",
lambda: (base_qs.distinct('id') & base_qs.distinct('name'))
)
# Test join unreffing
c1 = Celebrity.objects.distinct('greatest_fan__id', 'greatest_fan__fan_of')
self.assertIn('OUTER JOIN', str(c1.query))
c2 = c1.distinct('pk')
self.assertNotIn('OUTER JOIN', str(c2.query))
def test_distinct_not_implemented_checks(self):
# distinct + annotate not allowed
with self.assertRaises(NotImplementedError):
Celebrity.objects.annotate(Max('id')).distinct('id')[0]
with self.assertRaises(NotImplementedError):
Celebrity.objects.distinct('id').annotate(Max('id'))[0]
# However this check is done only when the query executes, so you
# can use distinct() to remove the fields before execution.
Celebrity.objects.distinct('id').annotate(Max('id')).distinct()[0]
# distinct + aggregate not allowed
with self.assertRaises(NotImplementedError):
Celebrity.objects.distinct('id').aggregate(Max('id'))
def test_distinct_on_in_ordered_subquery(self):
qs = Staff.objects.distinct('name').order_by('name', 'id')
qs = Staff.objects.filter(pk__in=qs).order_by('name')
self.assertQuerysetEqual(
qs, [self.p1_o1, self.p2_o1, self.p3_o1],
lambda x: x
)
qs = Staff.objects.distinct('name').order_by('name', '-id')
qs = Staff.objects.filter(pk__in=qs).order_by('name')
self.assertQuerysetEqual(
qs, [self.p1_o2, self.p2_o1, self.p3_o1],
lambda x: x
)
def test_distinct_on_get_ordering_preserved(self):
"""
Ordering shouldn't be cleared when distinct on fields are specified.
refs #25081
"""
staff = Staff.objects.distinct('name').order_by('name', '-organisation').get(name='p1')
self.assertEqual(staff.organisation, 'o2')
|
bsd-3-clause
|
paxapy/zulip
|
docs/conf.py
|
9
|
9881
|
# -*- coding: utf-8 -*-
#
# zulip-contributor-docs documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 17 16:24:04 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
if False: from typing import Any, Dict, List, Optional
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [] # type: List[str]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zulip'
copyright = u'2015-2016, The Zulip Team'
author = u'The Zulip Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.4'
# The full version, including alpha/beta/rc tags.
release = '1.4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None # type: Optional[str]
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Read The Docs can't import sphinx_rtd_theme, so don't import it there.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'zulip-contributor-docsdoc'
def setup(app):
# type: (Any) -> None
# overrides for wide tables in RTD theme
app.add_stylesheet('theme_overrides.css') # path relative to _static
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
} # type: Dict[str, str]
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'zulip-contributor-docs.tex', u'Zulip Documentation',
u'The Zulip Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'zulip-contributor-docs', u'Zulip Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'zulip-contributor-docs', u'Zulip Documentation',
author, 'zulip-contributor-docs', 'Documentation for contributing to Zulip.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
|
apache-2.0
|
aviciimaxwell/odoo
|
addons/account/wizard/account_subscription_generate.py
|
347
|
2254
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_subscription_generate(osv.osv_memory):
_name = "account.subscription.generate"
_description = "Subscription Compute"
_columns = {
'date': fields.date('Generate Entries Before', required=True),
}
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d'),
}
def action_generate(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
sub_line_obj = self.pool.get('account.subscription.line')
moves_created=[]
for data in self.read(cr, uid, ids, context=context):
line_ids = sub_line_obj.search(cr, uid, [('date', '<', data['date']), ('move_id', '=', False)], context=context)
moves = sub_line_obj.move_create(cr, uid, line_ids, context=context)
moves_created.extend(moves)
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_move_line_form')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
result['domain'] = str([('id','in',moves_created)])
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
tchaly-bethmaure/Emotes
|
script/tools.py
|
2
|
4321
|
import sys
import json
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def lst_weight_to_freq(list_weight):
sigma = sum(list_weight)
lst_freq = []
for w in list_weight:
lst_freq.append(w/sigma)
return lst_freq
def esperence(list_freq, list_val):
if len(list_freq) != len(list_val):
print("Length of the frequences' list must be the same as values' list.");exit()
# Formula : E[X] = (x1*freq1 + ... + xn*freqn ) / (freq1 + ... + freqn), we call (freq1 + ... + freqn) = omega
sigma = 0
i = 0
omega = sum(list_freq)
for val_i in list_val:
sigma += list_freq[i] * val_i
return sigma / omega
def std_dev(list_freq, list_val, mean):
if len(list_freq) != len(list_val):
print("Length of the frequences' list must be the same as values' list.");exit()
# Formula : STDDEV[X] = sqrt( sum on i of { p_i * (x_i - x_bar) } )
from math import sqrt
result = 0
i = 0
for val_i in list_val:
result += list_freq[i]*(val_i - mean)
i += 1
return sqrt(result)
def get_round_precision(number):
return len(str(int(1/number))) - 1
def m_replace(tab, replaceBy, strToReplace):
for val in tab:
strToReplace = strToReplace.replace(val,replaceBy)
return strToReplace
def str_table_to_list(str_table):
# example of a string table : '[Hello, World]'
# nor eval() or list() could do the job so here si a function that does
str_table = m_replace(["[","]"], "", str_table)
l = []
for value in str_table.split(","):
if value.isdigit():
l.append(eval(value))
else:
l.append(value)
return l
# read file of type csv but with ignore hatched # (commented) line
def read_specific_file(file_name, file_type="none"):
f = open(file_name, 'r')
tab = []
# loading data
for line in f:
if line[0] != "#" and line and line not in ['\n', '\r\n']:
# make a switch if more than two file types are.
if file_type == "conf":
tab.append( (line.replace("\n","")).split(' ') )
if file_type == "json":
tab.append(json.loads(line))
if file_type == "csv":
tab.append( (line.rstrip("\n")).split(';') )
if file_type == "none":
tab.append((line.replace("\n","")))
f.close();
return tab
class pyMap:
def __init__(self):
self.indices = []
self.values = []
def add_i(self, index):
if index not in self.indices:
# add our index value
self.indices.append(index)
def add_v(self, index, value):
self.add_i(index)
i = self.indices.index(index)
# create a list at the slot i if this is not done yet
if i >= len(self.values):
self.values.append([])
# add the value ...
self.values[i].append(value)
def delete_i(self, index):
# looking for the index in indicies
if index in self.indices:
# if found, delete it then delete its values associated array
i = self.indices.index(index)
self.indices.remove(index)
self.values.pop(i) # we drop the array
return True
return False
def delete_v(self, index, value):
# does index is contained in indices
if index in self.indices:
i = self.indices.index(index)
# does value is in the array associated with index ?
if value in self.values[i]:
self.values[i].remove(value) # we remove the value
return True
return False
def get_values(self, index):
if index in self.indices:
i = self.indices.index(index)
return self.values[i]
return []
def get_index_of_value(self, value):
find = False
i = 0
j = -1
while not find and len(self.values) > i:
if value in self.values[i]:
j = i
find = True
i+=1
return j
def get_key_of_value(self, value):
return self.indices[self.get_index_of_value(value)]
def get_index_of_value_in_list(self, value):
find = False
i = 0
j = -1
while not find and len(self.values) > i:
if value in self.values[i]:
j = self.values[i].index(value)
find = True
i+=1
return j
def display(self):
print "Keys: " + str(self.indices) + "\n Values: " + str(self.values)
def test_pyMap():
m = pyMap()
m.add_i("hello")
m.add_v("hello","hi")
m.add_v("hello","bonjour")
m.add_v("hello","allo")
m.add_v("hello","bonjour")
print m.get_key_of_value("allo")
m.display()
m.delete_v("hello","hi")
m.display()
m.delete_i("hello")
m.display()
if __name__ == '__main__':
test_pyMap()
|
gpl-2.0
|
j-marjanovic/myhdl
|
myhdl/test/conversion/toVerilog/test_hec.py
|
2
|
4388
|
from __future__ import absolute_import
import os
path = os.path
import unittest
from random import randrange
from myhdl import *
from .util import setupCosimulation
COSET = 0x55
def calculateHecRef(header):
""" Return hec for an ATM header.
Reference version.
The hec polynomial is 1 + x + x**2 + x**8.
"""
hec = intbv(0)
for bit in header[32:]:
hec[8:] = concat(hec[7:2],
bit ^ hec[1] ^ hec[7],
bit ^ hec[0] ^ hec[7],
bit ^ hec[7]
)
return hec ^ COSET
def calculateHecFunc(header):
""" Return hec for an ATM header.
Translatable version.
The hec polynomial is 1 + x + x**2 + x**8.
"""
h = intbv(0)[8:]
for i in downrange(len(header)):
bit = header[i]
h[:] = concat(h[7:2],
bit ^ h[1] ^ h[7],
bit ^ h[0] ^ h[7],
bit ^ h[7]
)
h ^= COSET
return h
def calculateHecTask(hec, header):
""" Calculate hec for an ATM header.
Translatable version.
The hec polynomial is 1 + x + x**2 + x**8.
"""
h = intbv(0)[8:]
for i in downrange(len(header)):
bit = header[i]
h[:] = concat(h[7:2],
bit ^ h[1] ^ h[7],
bit ^ h[0] ^ h[7],
bit ^ h[7]
)
h ^= COSET
hec[:] = h
def HecCalculatorPlain(hec, header):
""" Hec calculation module.
Plain version.
"""
@instance
def logic():
h = intbv(0)[8:]
while 1:
yield header
h[:] = 0
for i in downrange(len(header)):
bit = header[i]
h[:] = concat(h[7:2],
bit ^ h[1] ^ h[7],
bit ^ h[0] ^ h[7],
bit ^ h[7]
)
hec.next = h ^ COSET
return logic
def HecCalculatorFunc(hec, header=1):
""" Hec calculation module.
Version with function call.
"""
@instance
def logic():
h = intbv(0)[8:]
while 1:
yield header
hec.next = calculateHecFunc(header=header)
return logic
def HecCalculatorTask(hec, header):
""" Hec calculation module.
Version with task call.
"""
@instance
def logic():
h = intbv(0)[8:]
while 1:
yield header
calculateHecTask(h, header)
hec.next = h
return logic
def HecCalculatorTask2(hec, header):
""" Hec calculation module.
Version with task call.
"""
@instance
def logic():
h = intbv(0)[8:]
while 1:
yield header
calculateHecTask(header=header, hec=h)
hec.next = h
return logic
def HecCalculator_v(name, hec, header):
return setupCosimulation(**locals())
headers = [ 0x00000000,
0x01234567,
0xbac6f4ca
]
headers.extend([randrange(2**32-1) for i in range(10)])
class TestHec(unittest.TestCase):
def bench(self, HecCalculator):
hec = Signal(intbv(0)[8:])
hec_v = Signal(intbv(0)[8:])
header = Signal(intbv(-1)[32:])
heccalc_inst = toVerilog(HecCalculator, hec, header)
# heccalc_inst = HecCalculator(hec, header)
heccalc_v_inst = HecCalculator_v(HecCalculator.__name__, hec_v, header)
def stimulus():
for h in headers:
header.next = h
yield delay(10)
hec_ref = calculateHecRef(header)
# print "hec: %s hec_v: %s" % (hex(hec), hex(hec_v))
self.assertEqual(hec, hec_ref)
self.assertEqual(hec, hec_v)
return stimulus(), heccalc_inst, heccalc_v_inst
def testPlain(self):
sim = self.bench(HecCalculatorPlain)
Simulation(sim).run()
def testFunc(self):
sim = self.bench(HecCalculatorFunc)
Simulation(sim).run()
def testTask(self):
sim = self.bench(HecCalculatorTask)
Simulation(sim).run()
def testTask2(self):
sim = self.bench(HecCalculatorTask2)
Simulation(sim).run()
if __name__ == '__main__':
unittest.main()
|
lgpl-2.1
|
nharraud/b2share
|
invenio/legacy/registry.py
|
18
|
2353
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import os
import inspect
from flask import current_app
from flask_registry import RegistryProxy, ImportPathRegistry, \
ModuleAutoDiscoveryRegistry
from invenio.ext.registry import ModuleAutoDiscoverySubRegistry
from invenio.utils.datastructures import LazyDict
legacy_modules = RegistryProxy('legacy', ImportPathRegistry,
initial=['invenio.legacy.*'])
webadmin_proxy = RegistryProxy('legacy.webadmin', \
ModuleAutoDiscoverySubRegistry, 'web.admin',
registry_namespace=legacy_modules)
def _admin_handler_name(name):
parts = name.split('.')
return '%s/%s' % (parts[2], parts[5])
webadmin = LazyDict(lambda: dict((_admin_handler_name(module.__name__), module)
for module in webadmin_proxy))
webinterface_proxy = RegistryProxy(
'legacy.webinterface', ModuleAutoDiscoveryRegistry, 'webinterface',
registry_namespace=legacy_modules)
def _webinterface(module):
from invenio.ext.legacy.handler import WebInterfaceDirectory
parts = module.__name__.split('.')
for value in dir(module):
webinterface = getattr(module, value)
if inspect.isclass(webinterface) and \
issubclass(webinterface, WebInterfaceDirectory) and \
webinterface.__module__ == module.__name__:
yield webinterface.__name__, webinterface
def _webinterfaces(modules):
for module in modules:
for value in _webinterface(module):
yield value
webinterfaces = LazyDict(lambda: dict(_webinterfaces(webinterface_proxy)))
|
gpl-2.0
|
PetePriority/home-assistant
|
homeassistant/components/automation/state.py
|
2
|
2698
|
"""
Offer state listening automation rules.
For more details about this automation rule, please refer to the documentation
at https://home-assistant.io/docs/automation/trigger/#state-trigger
"""
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import MATCH_ALL, CONF_PLATFORM, CONF_FOR
from homeassistant.helpers.event import (
async_track_state_change, async_track_same_state)
import homeassistant.helpers.config_validation as cv
CONF_ENTITY_ID = 'entity_id'
CONF_FROM = 'from'
CONF_TO = 'to'
TRIGGER_SCHEMA = vol.All(vol.Schema({
vol.Required(CONF_PLATFORM): 'state',
vol.Required(CONF_ENTITY_ID): cv.entity_ids,
# These are str on purpose. Want to catch YAML conversions
vol.Optional(CONF_FROM): str,
vol.Optional(CONF_TO): str,
vol.Optional(CONF_FOR): vol.All(cv.time_period, cv.positive_timedelta),
}), cv.key_dependency(CONF_FOR, CONF_TO))
async def async_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
entity_id = config.get(CONF_ENTITY_ID)
from_state = config.get(CONF_FROM, MATCH_ALL)
to_state = config.get(CONF_TO, MATCH_ALL)
time_delta = config.get(CONF_FOR)
match_all = (from_state == MATCH_ALL and to_state == MATCH_ALL)
unsub_track_same = {}
@callback
def state_automation_listener(entity, from_s, to_s):
"""Listen for state changes and calls action."""
@callback
def call_action():
"""Call action with right context."""
hass.async_run_job(action({
'trigger': {
'platform': 'state',
'entity_id': entity,
'from_state': from_s,
'to_state': to_s,
'for': time_delta,
}
}, context=to_s.context))
# Ignore changes to state attributes if from/to is in use
if (not match_all and from_s is not None and to_s is not None and
from_s.state == to_s.state):
return
if not time_delta:
call_action()
return
unsub_track_same[entity] = async_track_same_state(
hass, time_delta, call_action,
lambda _, _2, to_state: to_state.state == to_s.state,
entity_ids=entity_id)
unsub = async_track_state_change(
hass, entity_id, state_automation_listener, from_state, to_state)
@callback
def async_remove():
"""Remove state listeners async."""
unsub()
for async_remove in unsub_track_same.values():
async_remove()
unsub_track_same.clear()
return async_remove
|
apache-2.0
|
engagespark/django-waffle
|
waffle/south_migrations/0008_auto__add_field_flag_languages.py
|
21
|
6752
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Flag.languages'
db.add_column('waffle_flag', 'languages', self.gf('django.db.models.fields.TextField')(default='', blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Flag.languages'
db.delete_column('waffle_flag', 'languages')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'waffle.flag': {
'Meta': {'object_name': 'Flag'},
'authenticated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'everyone': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languages': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '1', 'blank': 'True'}),
'rollout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'superusers': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'testing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'waffle.sample': {
'Meta': {'object_name': 'Sample'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'})
},
'waffle.switch': {
'Meta': {'object_name': 'Switch'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['waffle']
|
bsd-3-clause
|
pratapvardhan/scikit-learn
|
sklearn/svm/setup.py
|
321
|
3157
|
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
caesar2164/edx-platform
|
cms/djangoapps/contentstore/management/commands/export.py
|
8
|
1908
|
"""
Script for exporting courseware from Mongo to a tar.gz file
"""
import os
from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.xml_exporter import export_course_to_xml
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import CourseKey
from xmodule.contentstore.django import contentstore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class Command(BaseCommand):
"""
Export the specified course into a directory. Output will need to be tar zxcf'ed.
"""
help = 'Export the specified course into a directory'
def add_arguments(self, parser):
parser.add_argument('course_id')
parser.add_argument('output_path')
def handle(self, *args, **options):
"""
Given a course id(old or new style), and an output_path folder. Export the
corresponding course from mongo and put it directly in the folder.
"""
try:
course_key = CourseKey.from_string(options['course_id'])
except InvalidKeyError:
try:
course_key = SlashSeparatedCourseKey.from_deprecated_string(options['course_id'])
except InvalidKeyError:
raise CommandError("Invalid course_key: '%s'." % options['course_id'])
if not modulestore().get_course(course_key):
raise CommandError("Course with %s key not found." % options['course_id'])
output_path = options['output_path']
print "Exporting course id = {0} to {1}".format(course_key, output_path)
if not output_path.endswith('/'):
output_path += '/'
root_dir = os.path.dirname(output_path)
course_dir = os.path.splitext(os.path.basename(output_path))[0]
export_course_to_xml(modulestore(), contentstore(), course_key, root_dir, course_dir)
|
agpl-3.0
|
jhutar/spacewalk
|
proxy/proxy/rhnAuthCacheClient.py
|
2
|
6945
|
# rhnAuthCacheClient.py
#-------------------------------------------------------------------------------
# Implements a client-side 'remote shelf' caching object used for
# authentication token caching.
# (Client, meaning, a client to the authCache daemon)
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#-------------------------------------------------------------------------------
## language imports
import socket
import sys
from xmlrpclib import Fault
## local imports
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnTB import Traceback
from spacewalk.common.rhnException import rhnFault
from spacewalk.common.rhnTranslate import _
from rhnAuthProtocol import CommunicationError, send, recv
#
# Protocol description:
# 1. Send the size of the data as a long (4 bytes), in network order
# 2. Send the data
#
# Shamelessly stolen from xmlrpclib.xmlrpc
class _Method:
""" Bind XML-RPC to an RPC Server
Some magic to bind an XML-RPC method to an RPC server.
Supports "nested" methods (e.g. examples.getStateName).
"""
# pylint: disable=R0903
def __init__(self, msend, name):
self.__send = msend
self.__name = name
def __getattr__(self, name):
return _Method(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
def __str__(self):
return "<_Method instance at %s>" % id(self)
__repr__ = __str__
class Shelf:
""" Client authenication temp. db.
Main class that the client side (client to the caching daemon) has to
instantiate to expose the proper API. Basically, the API is a dictionary.
"""
# pylint: disable=R0903
def __init__(self, server_addr):
log_debug(6, server_addr)
self.serverAddr = server_addr
def __request(self, methodname, params):
# pylint: disable=R0915
log_debug(6, methodname, params)
# Init the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(self.serverAddr)
except socket.error, e:
sock.close()
methodname = None
log_error("Error connecting to the auth cache: %s" % str(e))
Traceback("Shelf.__request", extra="""
Error connecting to the the authentication cache daemon.
Make sure it is started on %s""" % str(self.serverAddr))
# FIXME: PROBLEM: this rhnFault will never reach the client
raise rhnFault(1000,
_("Spacewalk Proxy error (issues connecting to auth cache). "
"Please contact your system administrator")), None, sys.exc_info()[2]
wfile = sock.makefile("w")
try:
send(wfile, methodname, None, *params)
except CommunicationError:
wfile.close()
sock.close()
Traceback("Shelf.__request",
extra="Encountered a CommunicationError")
raise
except socket.error:
wfile.close()
sock.close()
log_error("Error communicating to the auth cache: %s" % str(e))
Traceback("Shelf.__request", extra="""\
Error sending to the authentication cache daemon.
Make sure the authentication cache daemon is started""")
# FIXME: PROBLEM: this rhnFault will never reach the client
raise rhnFault(1000,
_("Spacewalk Proxy error (issues connecting to auth cache). "
"Please contact your system administrator")), None, sys.exc_info()[2]
wfile.close()
rfile = sock.makefile("r")
try:
params, methodname = recv(rfile)
except CommunicationError, e:
log_error(e.faultString)
rfile.close()
sock.close()
log_error("Error communicating to the auth cache: %s" % str(e))
Traceback("Shelf.__request", extra="""\
Error receiving from the authentication cache daemon.
Make sure the authentication cache daemon is started""")
# FIXME: PROBLEM: this rhnFault will never reach the client
raise rhnFault(1000,
_("Spacewalk Proxy error (issues communicating to auth cache). "
"Please contact your system administrator")), None, sys.exc_info()[2]
except Fault, e:
rfile.close()
sock.close()
# If e.faultCode is 0, it's another exception
if e.faultCode != 0:
# Treat is as a regular xmlrpc fault
raise
_dict = e.faultString
if not isinstance(_dict, type({})):
# Not the expected type
raise
if not _dict.has_key('name'):
# Doesn't look like a marshalled exception
raise
name = _dict['name']
args = _dict.get('args')
# Look up the exception
if not hasattr(__builtins__, name):
# Unknown exception name
raise
# Instantiate the exception object
import new
_dict = {'args': args}
raise new.instance(getattr(__builtins__, name), _dict), None, sys.exc_info()[2]
return params[0]
def __getattr__(self, name):
log_debug(6, name)
return _Method(self.__request, name)
def __str__(self):
return "<Remote-Shelf instance at %s>" % id(self)
#-------------------------------------------------------------------------------
# test code
# pylint: disable=E0012, C0411, C0413, E1136, C0412
if __name__ == '__main__':
from spacewalk.common.rhnConfig import initCFG
initCFG("proxy.broker")
s = Shelf(('localhost', 9999))
s['1234'] = [1, 2, 3, 4, None, None]
s['blah'] = 'testing 1 2 3'
print 'Cached object s["1234"] = %s' % str(s['1234'])
print 'Cached object s["blah"] = %s' % str(s['blah'])
print s.has_key("asdfrasdf")
# print
# print 'And this will bomb (attempt to get non-existant data:'
# s["DOESN'T EXIST!!!"]
#-------------------------------------------------------------------------------
|
gpl-2.0
|
abaditsegay/arangodb
|
3rdParty/V8-4.3.61/tools/testrunner/objects/testcase.py
|
22
|
3600
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import output
class TestCase(object):
def __init__(self, suite, path, flags=None, dependency=None):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.flags = flags or [] # list of strings, flags specific to this test
self.dependency = dependency # |path| for testcase that must be run first
self.outcomes = None
self.output = None
self.id = None # int, used to map result back to TestCase instance
self.duration = None # assigned during execution
self.run = 1 # The nth time this test is executed.
def CopyAddingFlags(self, flags):
copy = TestCase(self.suite, self.path, self.flags + flags, self.dependency)
copy.outcomes = self.outcomes
return copy
def PackTask(self):
"""
Extracts those parts of this object that are required to run the test
and returns them as a JSON serializable object.
"""
assert self.id is not None
return [self.suitename(), self.path, self.flags,
self.dependency, list(self.outcomes or []), self.id]
@staticmethod
def UnpackTask(task):
"""Creates a new TestCase object based on packed task data."""
# For the order of the fields, refer to PackTask() above.
test = TestCase(str(task[0]), task[1], task[2], task[3])
test.outcomes = set(task[4])
test.id = task[5]
test.run = 1
return test
def SetSuiteObject(self, suites):
self.suite = suites[self.suite]
def PackResult(self):
"""Serializes the output of the TestCase after it has run."""
self.suite.StripOutputForTransmit(self)
return [self.id, self.output.Pack(), self.duration]
def MergeResult(self, result):
"""Applies the contents of a Result to this object."""
assert result[0] == self.id
self.output = output.Output.Unpack(result[1])
self.duration = result[2]
def suitename(self):
return self.suite.name
def GetLabel(self):
return self.suitename() + "/" + self.suite.CommonTestName(self)
|
apache-2.0
|
uberlaggydarwin/htc-desire-eye-kernel
|
tools/perf/scripts/python/failed-syscalls-by-pid.py
|
11180
|
2058
|
# failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
gpl-2.0
|
sergiohgz/incubator-airflow
|
airflow/ti_deps/deps/dag_unpaused_dep.py
|
20
|
1224
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.db import provide_session
class DagUnpausedDep(BaseTIDep):
NAME = "Dag Not Paused"
IGNOREABLE = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if ti.task.dag.is_paused:
yield self._failing_status(
reason="Task's DAG '{0}' is paused.".format(ti.dag_id))
|
apache-2.0
|
dhocker/MPD
|
python/build/verify.py
|
22
|
1150
|
import hashlib
def feed_file(h, f):
"""Feed data read from an open file into the hashlib instance."""
while True:
data = f.read(65536)
if len(data) == 0:
# end of file
break
h.update(data)
def feed_file_path(h, path):
"""Feed data read from a file (to be opened by this function) into the hashlib instance."""
with open(path, 'rb') as f:
feed_file(h, f)
def file_digest(algorithm, path):
"""Calculate the digest of a file and return it in hexadecimal notation."""
h = algorithm()
feed_file_path(h, path)
return h.hexdigest()
def guess_digest_algorithm(digest):
l = len(digest)
if l == 32:
return hashlib.md5
elif l == 40:
return hashlib.sha1
elif l == 64:
return hashlib.sha256
else:
return None
def verify_file_digest(path, expected_digest):
"""Verify the digest of a file, and return True if the digest matches with the given expected digest."""
algorithm = guess_digest_algorithm(expected_digest)
assert(algorithm is not None)
return file_digest(algorithm, path) == expected_digest
|
gpl-2.0
|
winklerand/pandas
|
pandas/tests/series/test_timeseries.py
|
1
|
32325
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
from datetime import datetime, timedelta, time
import pandas as pd
import pandas.util.testing as tm
from pandas._libs.tslib import iNaT
from pandas.compat import lrange, StringIO, product
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.tseries.offsets import BDay, BMonthEnd
from pandas import (Index, Series, date_range, NaT, concat, DataFrame,
Timestamp, to_datetime, offsets,
timedelta_range)
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, _skip_if_has_locale)
from pandas.tests.series.common import TestData
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
def assert_range_equal(left, right):
assert (left.equals(right))
assert (left.freq == right.freq)
assert (left.tz == right.tz)
class TestTimeSeries(TestData):
def test_shift(self):
shifted = self.ts.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, self.ts.index)
tm.assert_index_equal(unshifted.index, self.ts.index)
tm.assert_numpy_array_equal(unshifted.valid().values,
self.ts.values[:-1])
offset = BDay()
shifted = self.ts.shift(1, freq=offset)
unshifted = shifted.shift(-1, freq=offset)
assert_series_equal(unshifted, self.ts)
unshifted = self.ts.shift(0, freq=offset)
assert_series_equal(unshifted, self.ts)
shifted = self.ts.shift(1, freq='B')
unshifted = shifted.shift(-1, freq='B')
assert_series_equal(unshifted, self.ts)
# corner case
unshifted = self.ts.shift(0)
assert_series_equal(unshifted, self.ts)
# Shifting with PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_index_equal(shifted.index, ps.index)
tm.assert_index_equal(unshifted.index, ps.index)
tm.assert_numpy_array_equal(unshifted.valid().values, ps.values[:-1])
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, BDay())
assert_series_equal(shifted2, shifted3)
assert_series_equal(ps, shifted2.shift(-1, 'B'))
pytest.raises(ValueError, ps.shift, freq='D')
# legacy support
shifted4 = ps.shift(1, freq='B')
assert_series_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=BDay())
assert_series_equal(shifted5, shifted4)
# 32-bit taking
# GH 8129
index = date_range('2000-01-01', periods=5)
for dtype in ['int32', 'int64']:
s1 = Series(np.arange(5, dtype=dtype), index=index)
p = s1.iloc[1]
result = s1.shift(periods=p)
expected = Series([np.nan, 0, 1, 2, 3], index=index)
assert_series_equal(result, expected)
# xref 8260
# with tz
s = Series(date_range('2000-01-01 09:00:00', periods=5,
tz='US/Eastern'), name='foo')
result = s - s.shift()
exp = Series(TimedeltaIndex(['NaT'] + ['1 days'] * 4), name='foo')
assert_series_equal(result, exp)
# incompat tz
s2 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz='CET'), name='foo')
pytest.raises(ValueError, lambda: s - s2)
def test_shift2(self):
ts = Series(np.random.randn(5),
index=date_range('1/1/2000', periods=5, freq='H'))
result = ts.shift(1, freq='5T')
exp_index = ts.index.shift(1, freq='5T')
tm.assert_index_equal(result.index, exp_index)
# GH #1063, multiple of same base
result = ts.shift(1, freq='4H')
exp_index = ts.index + offsets.Hour(4)
tm.assert_index_equal(result.index, exp_index)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
pytest.raises(ValueError, idx.shift, 1)
def test_shift_dst(self):
# GH 13926
dates = date_range('2016-11-06', freq='H', periods=10, tz='US/Eastern')
s = Series(dates)
res = s.shift(0)
tm.assert_series_equal(res, s)
assert res.dtype == 'datetime64[ns, US/Eastern]'
res = s.shift(1)
exp_vals = [NaT] + dates.asobject.values.tolist()[:9]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == 'datetime64[ns, US/Eastern]'
res = s.shift(-2)
exp_vals = dates.asobject.values.tolist()[2:] + [NaT, NaT]
exp = Series(exp_vals)
tm.assert_series_equal(res, exp)
assert res.dtype == 'datetime64[ns, US/Eastern]'
for ex in [10, -10, 20, -20]:
res = s.shift(ex)
exp = Series([NaT] * 10, dtype='datetime64[ns, US/Eastern]')
tm.assert_series_equal(res, exp)
assert res.dtype == 'datetime64[ns, US/Eastern]'
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_series_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=BDay())
assert_series_equal(shifted, shifted3)
pytest.raises(ValueError, ps.tshift, freq='M')
# DatetimeIndex
shifted = self.ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(self.ts, unshifted)
shifted2 = self.ts.tshift(freq=self.ts.index.freq)
assert_series_equal(shifted, shifted2)
inferred_ts = Series(self.ts.values, Index(np.asarray(self.ts.index)),
name='ts')
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(shifted, self.ts.tshift(1))
assert_series_equal(unshifted, inferred_ts)
no_freq = self.ts[[0, 5, 7]]
pytest.raises(ValueError, no_freq.tshift)
def test_truncate(self):
offset = BDay()
ts = self.ts[::3]
start, end = self.ts.index[3], self.ts.index[6]
start_missing, end_missing = self.ts.index[2], self.ts.index[7]
# neither specified
truncated = ts.truncate()
assert_series_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_series_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_series_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_series_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_series_equal(truncated, expected)
# corner case, empty series returned
truncated = ts.truncate(after=self.ts.index[0] - offset)
assert (len(truncated) == 0)
truncated = ts.truncate(before=self.ts.index[-1] + offset)
assert (len(truncated) == 0)
pytest.raises(ValueError, ts.truncate,
before=self.ts.index[-1] + offset,
after=self.ts.index[0] - offset)
def test_truncate_nonsortedindex(self):
# GH 17935
s = pd.Series(['a', 'b', 'c', 'd', 'e'],
index=[5, 3, 2, 9, 0])
with tm.assert_raises_regex(ValueError,
'truncate requires a sorted index'):
s.truncate(before=3, after=9)
rng = pd.date_range('2011-01-01', '2012-01-01', freq='W')
ts = pd.Series(np.random.randn(len(rng)), index=rng)
with tm.assert_raises_regex(ValueError,
'truncate requires a sorted index'):
ts.sort_values(ascending=False).truncate(before='2011-11',
after='2011-12')
def test_asfreq(self):
ts = Series([0., 1., 2.], index=[datetime(2009, 10, 30), datetime(
2009, 11, 30), datetime(2009, 12, 31)])
daily_ts = ts.asfreq('B')
monthly_ts = daily_ts.asfreq('BM')
tm.assert_series_equal(monthly_ts, ts)
daily_ts = ts.asfreq('B', method='pad')
monthly_ts = daily_ts.asfreq('BM')
tm.assert_series_equal(monthly_ts, ts)
daily_ts = ts.asfreq(BDay())
monthly_ts = daily_ts.asfreq(BMonthEnd())
tm.assert_series_equal(monthly_ts, ts)
result = ts[:0].asfreq('M')
assert len(result) == 0
assert result is not ts
daily_ts = ts.asfreq('D', fill_value=-1)
result = daily_ts.value_counts().sort_index()
expected = Series([60, 1, 1, 1],
index=[-1.0, 2.0, 1.0, 0.0]).sort_index()
tm.assert_series_equal(result, expected)
def test_asfreq_datetimeindex_empty_series(self):
# GH 14320
expected = Series(index=pd.DatetimeIndex(
["2016-09-29 11:00"])).asfreq('H')
result = Series(index=pd.DatetimeIndex(["2016-09-29 11:00"]),
data=[3]).asfreq('H')
tm.assert_index_equal(expected.index, result.index)
def test_diff(self):
# Just run the function
self.ts.diff()
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = s.diff()
assert rs[1] == 1
# neg n
rs = self.ts.diff(-1)
xp = self.ts - self.ts.shift(-1)
assert_series_equal(rs, xp)
# 0
rs = self.ts.diff(0)
xp = self.ts - self.ts
assert_series_equal(rs, xp)
# datetime diff (GH3100)
s = Series(date_range('20130102', periods=5))
rs = s - s.shift(1)
xp = s.diff()
assert_series_equal(rs, xp)
# timedelta diff
nrs = rs - rs.shift(1)
nxp = xp.diff()
assert_series_equal(nrs, nxp)
# with tz
s = Series(
date_range('2000-01-01 09:00:00', periods=5,
tz='US/Eastern'), name='foo')
result = s.diff()
assert_series_equal(result, Series(
TimedeltaIndex(['NaT'] + ['1 days'] * 4), name='foo'))
def test_pct_change(self):
rs = self.ts.pct_change(fill_method=None)
assert_series_equal(rs, self.ts / self.ts.shift(1) - 1)
rs = self.ts.pct_change(2)
filled = self.ts.fillna(method='pad')
assert_series_equal(rs, filled / filled.shift(2) - 1)
rs = self.ts.pct_change(fill_method='bfill', limit=1)
filled = self.ts.fillna(method='bfill', limit=1)
assert_series_equal(rs, filled / filled.shift(1) - 1)
rs = self.ts.pct_change(freq='5D')
filled = self.ts.fillna(method='pad')
assert_series_equal(rs, filled / filled.shift(freq='5D') - 1)
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
chg = s.pct_change()
expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])
assert_series_equal(chg, expected)
def test_autocorr(self):
# Just run the function
corr1 = self.ts.autocorr()
# Now run it with the lag parameter
corr2 = self.ts.autocorr(lag=1)
# corr() with lag needs Series of at least length 2
if len(self.ts) <= 2:
assert np.isnan(corr1)
assert np.isnan(corr2)
else:
assert corr1 == corr2
# Choose a random lag between 1 and length of Series - 2
# and compare the result with the Series corr() function
n = 1 + np.random.randint(max(1, len(self.ts) - 2))
corr1 = self.ts.corr(self.ts.shift(n))
corr2 = self.ts.autocorr(lag=n)
# corr() with lag needs Series of at least length 2
if len(self.ts) <= 2:
assert np.isnan(corr1)
assert np.isnan(corr2)
else:
assert corr1 == corr2
def test_first_last_valid(self):
ts = self.ts.copy()
ts[:5] = np.NaN
index = ts.first_valid_index()
assert index == ts.index[5]
ts[-5:] = np.NaN
index = ts.last_valid_index()
assert index == ts.index[-6]
ts[:] = np.nan
assert ts.last_valid_index() is None
assert ts.first_valid_index() is None
ser = Series([], index=[])
assert ser.last_valid_index() is None
assert ser.first_valid_index() is None
# GH12800
empty = Series()
assert empty.last_valid_index() is None
assert empty.first_valid_index() is None
def test_mpl_compat_hack(self):
result = self.ts[:, np.newaxis]
expected = self.ts.values[:, np.newaxis]
assert_almost_equal(result, expected)
def test_timeseries_coercion(self):
idx = tm.makeDateIndex(10000)
ser = Series(np.random.randn(len(idx)), idx.astype(object))
assert ser.index.is_all_dates
assert isinstance(ser.index, DatetimeIndex)
def test_empty_series_ops(self):
# see issue #13844
a = Series(dtype='M8[ns]')
b = Series(dtype='m8[ns]')
assert_series_equal(a, a + b)
assert_series_equal(a, a - b)
assert_series_equal(a, b + a)
pytest.raises(TypeError, lambda x, y: x - y, b, a)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
assert expected.freq is not None
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
assert masked.freq is None
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([epoch + t for t in range(20)])
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in range(20)])
assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)]).astype(float)
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in range(20)])
assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)] + [iNaT])
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in range(20)] + [NaT])
assert_series_equal(result, expected)
s = Series([epoch + t for t in range(20)] + [iNaT]).astype(float)
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in range(20)] + [NaT])
assert_series_equal(result, expected)
# GH13834
s = Series([epoch + t for t in np.arange(0, 2, .25)] +
[iNaT]).astype(float)
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in np.arange(0, 2, .25)] + [NaT])
assert_series_equal(result, expected)
s = concat([Series([epoch + t for t in range(20)]
).astype(float), Series([np.nan])],
ignore_index=True)
result = to_datetime(s, unit='s')
expected = Series([Timestamp('2013-06-09 02:42:28') + timedelta(
seconds=t) for t in range(20)] + [NaT])
assert_series_equal(result, expected)
result = to_datetime([1, 2, 'NaT', pd.NaT, np.nan], unit='D')
expected = DatetimeIndex([Timestamp('1970-01-02'),
Timestamp('1970-01-03')] + ['NaT'] * 3)
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError):
to_datetime([1, 2, 'foo'], unit='D')
with pytest.raises(ValueError):
to_datetime([1, 2, 111111111], unit='D')
# coerce we can process
expected = DatetimeIndex([Timestamp('1970-01-02'),
Timestamp('1970-01-03')] + ['NaT'] * 1)
result = to_datetime([1, 2, 'foo'], unit='D', errors='coerce')
tm.assert_index_equal(result, expected)
result = to_datetime([1, 2, 111111111], unit='D', errors='coerce')
tm.assert_index_equal(result, expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
dates = np.asarray(rng)
series = Series(dates)
assert np.issubdtype(series.dtype, np.dtype('M8[ns]'))
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00.000000\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
assert result == expected
def test_asfreq_keep_index_name(self):
# GH #9854
index_name = 'bar'
index = pd.date_range('20130101', periods=20, name=index_name)
df = pd.DataFrame([x for x in range(20)], columns=['foo'], index=index)
assert index_name == df.index.name
assert index_name == df.asfreq('10D').index.name
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
tm.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
assert len(result) == 20
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
assert len(result) == 10
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
assert len(result) == 20
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
assert len(result) == 10
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.loc[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.loc['1/4/2000':]
result = chunk.loc[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
assert len(rs) == 0
def test_between(self):
series = Series(date_range('1/1/2000', periods=10))
left, right = series[[2, 7]]
result = series.between(left, right)
expected = (series >= left) & (series <= right)
assert_series_equal(result, expected)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_types(self):
# GH11818
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
pytest.raises(ValueError, rng.indexer_between_time,
datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
frame = DataFrame({'A': 0}, index=rng)
pytest.raises(ValueError, frame.between_time,
datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
series = Series(0, index=rng)
pytest.raises(ValueError, series.between_time,
datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))
def test_between_time_formats(self):
# GH11818
_skip_if_has_locale()
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
strings = [("2:00", "2:30"), ("0200", "0230"), ("2:00am", "2:30am"),
("0200am", "0230am"), ("2:00:00", "2:30:00"),
("020000", "023000"), ("2:00:00am", "2:30:00am"),
("020000am", "023000am")]
expected_length = 28
for time_string in strings:
assert len(ts.between_time(*time_string)) == expected_length
def test_to_period(self):
from pandas.core.indexes.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
exp.index = exp.index.asfreq('M')
tm.assert_index_equal(pts.index, exp.index.asfreq('M'))
assert_series_equal(pts, exp)
# GH 7606 without freq
idx = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
exp_idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], freq='D')
s = Series(np.random.randn(4), index=idx)
expected = s.copy()
expected.index = exp_idx
assert_series_equal(s.to_period(), expected)
df = DataFrame(np.random.randn(4, 4), index=idx, columns=idx)
expected = df.copy()
expected.index = exp_idx
assert_frame_equal(df.to_period(), expected)
expected = df.copy()
expected.columns = exp_idx
assert_frame_equal(df.to_period(axis=1), expected)
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
assert '2000-01-01' in result
def test_series_map_box_timedelta(self):
# GH 11349
s = Series(timedelta_range('1 day 1 s', periods=5, freq='h'))
def f(x):
return x.total_seconds()
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_asfreq_resample_set_correct_freq(self):
# GH5613
# we test if .asfreq() and .resample() set the correct value for .freq
df = pd.DataFrame({'date': ["2012-01-01", "2012-01-02", "2012-01-03"],
'col': [1, 2, 3]})
df = df.set_index(pd.to_datetime(df.date))
# testing the settings before calling .asfreq() and .resample()
assert df.index.freq is None
assert df.index.inferred_freq == 'D'
# does .asfreq() set .freq correctly?
assert df.asfreq('D').index.freq == 'D'
# does .resample() set .freq correctly?
assert df.resample('D').asfreq().index.freq == 'D'
def test_pickle(self):
# GH4606
p = tm.round_trip_pickle(NaT)
assert p is NaT
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = tm.round_trip_pickle(idx)
assert idx_p[0] == idx[0]
assert idx_p[1] is NaT
assert idx_p[2] == idx[2]
# GH11002
# don't infer freq
idx = date_range('1750-1-1', '2050-1-1', freq='7D')
idx_p = tm.round_trip_pickle(idx)
tm.assert_index_equal(idx, idx_p)
def test_setops_preserve_freq(self):
for tz in [None, 'Asia/Tokyo', 'US/Eastern']:
rng = date_range('1/1/2000', '1/1/2002', name='idx', tz=tz)
result = rng[:50].union(rng[50:100])
assert result.name == rng.name
assert result.freq == rng.freq
assert result.tz == rng.tz
result = rng[:50].union(rng[30:100])
assert result.name == rng.name
assert result.freq == rng.freq
assert result.tz == rng.tz
result = rng[:50].union(rng[60:100])
assert result.name == rng.name
assert result.freq is None
assert result.tz == rng.tz
result = rng[:50].intersection(rng[25:75])
assert result.name == rng.name
assert result.freqstr == 'D'
assert result.tz == rng.tz
nofreq = DatetimeIndex(list(rng[25:75]), name='other')
result = rng[:50].union(nofreq)
assert result.name is None
assert result.freq == rng.freq
assert result.tz == rng.tz
result = rng[:50].intersection(nofreq)
assert result.name is None
assert result.freq == rng.freq
assert result.tz == rng.tz
def test_min_max(self):
rng = date_range('1/1/2000', '12/31/2000')
rng2 = rng.take(np.random.permutation(len(rng)))
the_min = rng2.min()
the_max = rng2.max()
assert isinstance(the_min, Timestamp)
assert isinstance(the_max, Timestamp)
assert the_min == rng[0]
assert the_max == rng[-1]
assert rng.min() == rng[0]
assert rng.max() == rng[-1]
def test_min_max_series(self):
rng = date_range('1/1/2000', periods=10, freq='4h')
lvls = ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C', 'C']
df = DataFrame({'TS': rng, 'V': np.random.randn(len(rng)), 'L': lvls})
result = df.TS.max()
exp = Timestamp(df.TS.iat[-1])
assert isinstance(result, Timestamp)
assert result == exp
result = df.TS.min()
exp = Timestamp(df.TS.iat[0])
assert isinstance(result, Timestamp)
assert result == exp
def test_from_M8_structured(self):
dates = [(datetime(2012, 9, 9, 0, 0), datetime(2012, 9, 8, 15, 10))]
arr = np.array(dates,
dtype=[('Date', 'M8[us]'), ('Forecasting', 'M8[us]')])
df = DataFrame(arr)
assert df['Date'][0] == dates[0][0]
assert df['Forecasting'][0] == dates[0][1]
s = Series(arr['Date'])
assert isinstance(s[0], Timestamp)
assert s[0] == dates[0][0]
with pytest.warns(FutureWarning):
s = Series.from_array(arr['Date'], Index([0]))
assert s[0] == dates[0][0]
def test_get_level_values_box(self):
from pandas import MultiIndex
dates = date_range('1/1/2000', periods=4)
levels = [dates, [0, 1]]
labels = [[0, 0, 1, 1, 2, 2, 3, 3], [0, 1, 0, 1, 0, 1, 0, 1]]
index = MultiIndex(levels=levels, labels=labels)
assert isinstance(index.get_level_values(0)[0], Timestamp)
|
bsd-3-clause
|
garvitr/sympy
|
sympy/polys/polyquinticconst.py
|
117
|
96143
|
"""
Solving solvable quintics - An implementation of DS Dummit's paper
Paper :
http://www.ams.org/journals/mcom/1991-57-195/S0025-5718-1991-1079014-X/S0025-5718-1991-1079014-X.pdf
Mathematica notebook:
http://www.emba.uvm.edu/~ddummit/quintics/quintics.nb
"""
from __future__ import print_function, division
from sympy.core import S, Symbol
from sympy.core.numbers import I
from sympy.polys.polytools import Poly
from sympy.core.evalf import N
from sympy.functions import sqrt
from sympy.utilities import public
x = Symbol('x')
@public
class PolyQuintic(object):
"""Special functions for solvable quintics"""
def __init__(self, poly):
_, _, self.p, self.q, self.r, self.s = poly.all_coeffs()
self.zeta1 = S(-1)/4 + (sqrt(5)/4) + I*sqrt((sqrt(5)/8) + S(5)/8)
self.zeta2 = (-sqrt(5)/4) - S(1)/4 + I*sqrt((-sqrt(5)/8) + S(5)/8)
self.zeta3 = (-sqrt(5)/4) - S(1)/4 - I*sqrt((-sqrt(5)/8) + S(5)/8)
self.zeta4 = S(-1)/4 + (sqrt(5)/4) - I*sqrt((sqrt(5)/8) + S(5)/8)
@property
def f20(self):
p, q, r, s = self.p, self.q, self.r, self.s
f20 = q**8 - 13*p*q**6*r + p**5*q**2*r**2 + 65*p**2*q**4*r**2 - 4*p**6*r**3 - 128*p**3*q**2*r**3 + 17*q**4*r**3 + 48*p**4*r**4 - 16*p*q**2*r**4 - 192*p**2*r**5 + 256*r**6 - 4*p**5*q**3*s - 12*p**2*q**5*s + 18*p**6*q*r*s + 12*p**3*q**3*r*s - 124*q**5*r*s + 196*p**4*q*r**2*s + 590*p*q**3*r**2*s - 160*p**2*q*r**3*s - 1600*q*r**4*s - 27*p**7*s**2 - 150*p**4*q**2*s**2 - 125*p*q**4*s**2 - 99*p**5*r*s**2 - 725*p**2*q**2*r*s**2 + 1200*p**3*r**2*s**2 + 3250*q**2*r**2*s**2 - 2000*p*r**3*s**2 - 1250*p*q*r*s**3 + 3125*p**2*s**4 - 9375*r*s**4-(2*p*q**6 - 19*p**2*q**4*r + 51*p**3*q**2*r**2 - 3*q**4*r**2 - 32*p**4*r**3 - 76*p*q**2*r**3 + 256*p**2*r**4 - 512*r**5 + 31*p**3*q**3*s + 58*q**5*s - 117*p**4*q*r*s - 105*p*q**3*r*s - 260*p**2*q*r**2*s + 2400*q*r**3*s + 108*p**5*s**2 + 325*p**2*q**2*s**2 - 525*p**3*r*s**2 - 2750*q**2*r*s**2 + 500*p*r**2*s**2 - 625*p*q*s**3 + 3125*s**4)*x+(p**2*q**4 - 6*p**3*q**2*r - 8*q**4*r + 9*p**4*r**2 + 76*p*q**2*r**2 - 136*p**2*r**3 + 400*r**4 - 50*p*q**3*s + 90*p**2*q*r*s - 1400*q*r**2*s + 625*q**2*s**2 + 500*p*r*s**2)*x**2-(2*q**4 - 21*p*q**2*r + 40*p**2*r**2 - 160*r**3 + 15*p**2*q*s + 400*q*r*s - 125*p*s**2)*x**3+(2*p*q**2 - 6*p**2*r + 40*r**2 - 50*q*s)*x**4 + 8*r*x**5 + x**6
return Poly(f20, x)
@property
def b(self):
p, q, r, s = self.p, self.q, self.r, self.s
b = ( [], [0,0,0,0,0,0], [0,0,0,0,0,0], [0,0,0,0,0,0], [0,0,0,0,0,0],)
b[1][5] = 100*p**7*q**7 + 2175*p**4*q**9 + 10500*p*q**11 - 1100*p**8*q**5*r - 27975*p**5*q**7*r - 152950*p**2*q**9*r + 4125*p**9*q**3*r**2 + 128875*p**6*q**5*r**2 + 830525*p**3*q**7*r**2 - 59450*q**9*r**2 - 5400*p**10*q*r**3 - 243800*p**7*q**3*r**3 - 2082650*p**4*q**5*r**3 + 333925*p*q**7*r**3 + 139200*p**8*q*r**4 + 2406000*p**5*q**3*r**4 + 122600*p**2*q**5*r**4 - 1254400*p**6*q*r**5 - 3776000*p**3*q**3*r**5 - 1832000*q**5*r**5 + 4736000*p**4*q*r**6 + 6720000*p*q**3*r**6 - 6400000*p**2*q*r**7 + 900*p**9*q**4*s + 37400*p**6*q**6*s + 281625*p**3*q**8*s + 435000*q**10*s - 6750*p**10*q**2*r*s - 322300*p**7*q**4*r*s - 2718575*p**4*q**6*r*s - 4214250*p*q**8*r*s + 16200*p**11*r**2*s + 859275*p**8*q**2*r**2*s + 8925475*p**5*q**4*r**2*s + 14427875*p**2*q**6*r**2*s - 453600*p**9*r**3*s - 10038400*p**6*q**2*r**3*s - 17397500*p**3*q**4*r**3*s + 11333125*q**6*r**3*s + 4451200*p**7*r**4*s + 15850000*p**4*q**2*r**4*s - 34000000*p*q**4*r**4*s - 17984000*p**5*r**5*s + 10000000*p**2*q**2*r**5*s + 25600000*p**3*r**6*s + 8000000*q**2*r**6*s - 6075*p**11*q*s**2 + 83250*p**8*q**3*s**2 + 1282500*p**5*q**5*s**2 + 2862500*p**2*q**7*s**2 - 724275*p**9*q*r*s**2 - 9807250*p**6*q**3*r*s**2 - 28374375*p**3*q**5*r*s**2 - 22212500*q**7*r*s**2 + 8982000*p**7*q*r**2*s**2 + 39600000*p**4*q**3*r**2*s**2 + 61746875*p*q**5*r**2*s**2 + 1010000*p**5*q*r**3*s**2 + 1000000*p**2*q**3*r**3*s**2 - 78000000*p**3*q*r**4*s**2 - 30000000*q**3*r**4*s**2 - 80000000*p*q*r**5*s**2 + 759375*p**10*s**3 + 9787500*p**7*q**2*s**3 + 39062500*p**4*q**4*s**3 + 52343750*p*q**6*s**3 - 12301875*p**8*r*s**3 - 98175000*p**5*q**2*r*s**3 - 225078125*p**2*q**4*r*s**3 + 54900000*p**6*r**2*s**3 + 310000000*p**3*q**2*r**2*s**3 + 7890625*q**4*r**2*s**3 - 51250000*p**4*r**3*s**3 + 420000000*p*q**2*r**3*s**3 - 110000000*p**2*r**4*s**3 + 200000000*r**5*s**3 - 2109375*p**6*q*s**4 + 21093750*p**3*q**3*s**4 + 89843750*q**5*s**4 - 182343750*p**4*q*r*s**4 - 733203125*p*q**3*r*s**4 + 196875000*p**2*q*r**2*s**4 - 1125000000*q*r**3*s**4 + 158203125*p**5*s**5 + 566406250*p**2*q**2*s**5 - 101562500*p**3*r*s**5 + 1669921875*q**2*r*s**5 - 1250000000*p*r**2*s**5 + 1220703125*p*q*s**6 - 6103515625*s**7
b[1][4] = -1000*p**5*q**7 - 7250*p**2*q**9 + 10800*p**6*q**5*r + 96900*p**3*q**7*r + 52500*q**9*r - 37400*p**7*q**3*r**2 - 470850*p**4*q**5*r**2 - 640600*p*q**7*r**2 + 39600*p**8*q*r**3 + 983600*p**5*q**3*r**3 + 2848100*p**2*q**5*r**3 - 814400*p**6*q*r**4 - 6076000*p**3*q**3*r**4 - 2308000*q**5*r**4 + 5024000*p**4*q*r**5 + 9680000*p*q**3*r**5 - 9600000*p**2*q*r**6 - 13800*p**7*q**4*s - 94650*p**4*q**6*s + 26500*p*q**8*s + 86400*p**8*q**2*r*s + 816500*p**5*q**4*r*s + 257500*p**2*q**6*r*s - 91800*p**9*r**2*s - 1853700*p**6*q**2*r**2*s - 630000*p**3*q**4*r**2*s + 8971250*q**6*r**2*s + 2071200*p**7*r**3*s + 7240000*p**4*q**2*r**3*s - 29375000*p*q**4*r**3*s - 14416000*p**5*r**4*s + 5200000*p**2*q**2*r**4*s + 30400000*p**3*r**5*s + 12000000*q**2*r**5*s - 64800*p**9*q*s**2 - 567000*p**6*q**3*s**2 - 1655000*p**3*q**5*s**2 - 6987500*q**7*s**2 - 337500*p**7*q*r*s**2 - 8462500*p**4*q**3*r*s**2 + 5812500*p*q**5*r*s**2 + 24930000*p**5*q*r**2*s**2 + 69125000*p**2*q**3*r**2*s**2 - 103500000*p**3*q*r**3*s**2 - 30000000*q**3*r**3*s**2 - 90000000*p*q*r**4*s**2 + 708750*p**8*s**3 + 5400000*p**5*q**2*s**3 - 8906250*p**2*q**4*s**3 - 18562500*p**6*r*s**3 + 625000*p**3*q**2*r*s**3 - 29687500*q**4*r*s**3 + 75000000*p**4*r**2*s**3 + 416250000*p*q**2*r**2*s**3 - 60000000*p**2*r**3*s**3 + 300000000*r**4*s**3 - 71718750*p**4*q*s**4 - 189062500*p*q**3*s**4 - 210937500*p**2*q*r*s**4 - 1187500000*q*r**2*s**4 + 187500000*p**3*s**5 + 800781250*q**2*s**5 + 390625000*p*r*s**5
b[1][3] = 500*p**6*q**5 + 6350*p**3*q**7 + 19800*q**9 - 3750*p**7*q**3*r - 65100*p**4*q**5*r - 264950*p*q**7*r + 6750*p**8*q*r**2 + 209050*p**5*q**3*r**2 + 1217250*p**2*q**5*r**2 - 219000*p**6*q*r**3 - 2510000*p**3*q**3*r**3 - 1098500*q**5*r**3 + 2068000*p**4*q*r**4 + 5060000*p*q**3*r**4 - 5200000*p**2*q*r**5 + 6750*p**8*q**2*s + 96350*p**5*q**4*s + 346000*p**2*q**6*s - 20250*p**9*r*s - 459900*p**6*q**2*r*s - 1828750*p**3*q**4*r*s + 2930000*q**6*r*s + 594000*p**7*r**2*s + 4301250*p**4*q**2*r**2*s - 10906250*p*q**4*r**2*s - 5252000*p**5*r**3*s + 1450000*p**2*q**2*r**3*s + 12800000*p**3*r**4*s + 6500000*q**2*r**4*s - 74250*p**7*q*s**2 - 1418750*p**4*q**3*s**2 - 5956250*p*q**5*s**2 + 4297500*p**5*q*r*s**2 + 29906250*p**2*q**3*r*s**2 - 31500000*p**3*q*r**2*s**2 - 12500000*q**3*r**2*s**2 - 35000000*p*q*r**3*s**2 - 1350000*p**6*s**3 - 6093750*p**3*q**2*s**3 - 17500000*q**4*s**3 + 7031250*p**4*r*s**3 + 127812500*p*q**2*r*s**3 - 18750000*p**2*r**2*s**3 + 162500000*r**3*s**3 - 107812500*p**2*q*s**4 - 460937500*q*r*s**4 + 214843750*p*s**5
b[1][2] = -1950*p**4*q**5 - 14100*p*q**7 + 14350*p**5*q**3*r + 125600*p**2*q**5*r - 27900*p**6*q*r**2 - 402250*p**3*q**3*r**2 - 288250*q**5*r**2 + 436000*p**4*q*r**3 + 1345000*p*q**3*r**3 - 1400000*p**2*q*r**4 - 9450*p**6*q**2*s + 1250*p**3*q**4*s + 465000*q**6*s + 49950*p**7*r*s + 302500*p**4*q**2*r*s - 1718750*p*q**4*r*s - 834000*p**5*r**2*s - 437500*p**2*q**2*r**2*s + 3100000*p**3*r**3*s + 1750000*q**2*r**3*s + 292500*p**5*q*s**2 + 1937500*p**2*q**3*s**2 - 3343750*p**3*q*r*s**2 - 1875000*q**3*r*s**2 - 8125000*p*q*r**2*s**2 + 1406250*p**4*s**3 + 12343750*p*q**2*s**3 - 5312500*p**2*r*s**3 + 43750000*r**2*s**3 - 74218750*q*s**4
b[1][1] = 300*p**5*q**3 + 2150*p**2*q**5 - 1350*p**6*q*r - 21500*p**3*q**3*r - 61500*q**5*r + 42000*p**4*q*r**2 + 290000*p*q**3*r**2 - 300000*p**2*q*r**3 + 4050*p**7*s + 45000*p**4*q**2*s + 125000*p*q**4*s - 108000*p**5*r*s - 643750*p**2*q**2*r*s + 700000*p**3*r**2*s + 375000*q**2*r**2*s + 93750*p**3*q*s**2 + 312500*q**3*s**2 - 1875000*p*q*r*s**2 + 1406250*p**2*s**3 + 9375000*r*s**3
b[1][0] = -1250*p**3*q**3 - 9000*q**5 + 4500*p**4*q*r + 46250*p*q**3*r - 50000*p**2*q*r**2 - 6750*p**5*s - 43750*p**2*q**2*s + 75000*p**3*r*s + 62500*q**2*r*s - 156250*p*q*s**2 + 1562500*s**3
b[2][5] = 200*p**6*q**11 - 250*p**3*q**13 - 10800*q**15 - 3900*p**7*q**9*r - 3325*p**4*q**11*r + 181800*p*q**13*r + 26950*p**8*q**7*r**2 + 69625*p**5*q**9*r**2 - 1214450*p**2*q**11*r**2 - 78725*p**9*q**5*r**3 - 368675*p**6*q**7*r**3 + 4166325*p**3*q**9*r**3 + 1131100*q**11*r**3 + 73400*p**10*q**3*r**4 + 661950*p**7*q**5*r**4 - 9151950*p**4*q**7*r**4 - 16633075*p*q**9*r**4 + 36000*p**11*q*r**5 + 135600*p**8*q**3*r**5 + 17321400*p**5*q**5*r**5 + 85338300*p**2*q**7*r**5 - 832000*p**9*q*r**6 - 21379200*p**6*q**3*r**6 - 176044000*p**3*q**5*r**6 - 1410000*q**7*r**6 + 6528000*p**7*q*r**7 + 129664000*p**4*q**3*r**7 + 47344000*p*q**5*r**7 - 21504000*p**5*q*r**8 - 115200000*p**2*q**3*r**8 + 25600000*p**3*q*r**9 + 64000000*q**3*r**9 + 15700*p**8*q**8*s + 120525*p**5*q**10*s + 113250*p**2*q**12*s - 196900*p**9*q**6*r*s - 1776925*p**6*q**8*r*s - 3062475*p**3*q**10*r*s - 4153500*q**12*r*s + 857925*p**10*q**4*r**2*s + 10562775*p**7*q**6*r**2*s + 34866250*p**4*q**8*r**2*s + 73486750*p*q**10*r**2*s - 1333800*p**11*q**2*r**3*s - 29212625*p**8*q**4*r**3*s - 168729675*p**5*q**6*r**3*s - 427230750*p**2*q**8*r**3*s + 108000*p**12*r**4*s + 30384200*p**9*q**2*r**4*s + 324535100*p**6*q**4*r**4*s + 952666750*p**3*q**6*r**4*s - 38076875*q**8*r**4*s - 4296000*p**10*r**5*s - 213606400*p**7*q**2*r**5*s - 842060000*p**4*q**4*r**5*s - 95285000*p*q**6*r**5*s + 61184000*p**8*r**6*s + 567520000*p**5*q**2*r**6*s + 547000000*p**2*q**4*r**6*s - 390912000*p**6*r**7*s - 812800000*p**3*q**2*r**7*s - 924000000*q**4*r**7*s + 1152000000*p**4*r**8*s + 800000000*p*q**2*r**8*s - 1280000000*p**2*r**9*s + 141750*p**10*q**5*s**2 - 31500*p**7*q**7*s**2 - 11325000*p**4*q**9*s**2 - 31687500*p*q**11*s**2 - 1293975*p**11*q**3*r*s**2 - 4803800*p**8*q**5*r*s**2 + 71398250*p**5*q**7*r*s**2 + 227625000*p**2*q**9*r*s**2 + 3256200*p**12*q*r**2*s**2 + 43870125*p**9*q**3*r**2*s**2 + 64581500*p**6*q**5*r**2*s**2 + 56090625*p**3*q**7*r**2*s**2 + 260218750*q**9*r**2*s**2 - 74610000*p**10*q*r**3*s**2 - 662186500*p**7*q**3*r**3*s**2 - 1987747500*p**4*q**5*r**3*s**2 - 811928125*p*q**7*r**3*s**2 + 471286000*p**8*q*r**4*s**2 + 2106040000*p**5*q**3*r**4*s**2 + 792687500*p**2*q**5*r**4*s**2 - 135120000*p**6*q*r**5*s**2 + 2479000000*p**3*q**3*r**5*s**2 + 5242250000*q**5*r**5*s**2 - 6400000000*p**4*q*r**6*s**2 - 8620000000*p*q**3*r**6*s**2 + 13280000000*p**2*q*r**7*s**2 + 1600000000*q*r**8*s**2 + 273375*p**12*q**2*s**3 - 13612500*p**9*q**4*s**3 - 177250000*p**6*q**6*s**3 - 511015625*p**3*q**8*s**3 - 320937500*q**10*s**3 - 2770200*p**13*r*s**3 + 12595500*p**10*q**2*r*s**3 + 543950000*p**7*q**4*r*s**3 + 1612281250*p**4*q**6*r*s**3 + 968125000*p*q**8*r*s**3 + 77031000*p**11*r**2*s**3 + 373218750*p**8*q**2*r**2*s**3 + 1839765625*p**5*q**4*r**2*s**3 + 1818515625*p**2*q**6*r**2*s**3 - 776745000*p**9*r**3*s**3 - 6861075000*p**6*q**2*r**3*s**3 - 20014531250*p**3*q**4*r**3*s**3 - 13747812500*q**6*r**3*s**3 + 3768000000*p**7*r**4*s**3 + 35365000000*p**4*q**2*r**4*s**3 + 34441875000*p*q**4*r**4*s**3 - 9628000000*p**5*r**5*s**3 - 63230000000*p**2*q**2*r**5*s**3 + 13600000000*p**3*r**6*s**3 - 15000000000*q**2*r**6*s**3 - 10400000000*p*r**7*s**3 - 45562500*p**11*q*s**4 - 525937500*p**8*q**3*s**4 - 1364218750*p**5*q**5*s**4 - 1382812500*p**2*q**7*s**4 + 572062500*p**9*q*r*s**4 + 2473515625*p**6*q**3*r*s**4 + 13192187500*p**3*q**5*r*s**4 + 12703125000*q**7*r*s**4 - 451406250*p**7*q*r**2*s**4 - 18153906250*p**4*q**3*r**2*s**4 - 36908203125*p*q**5*r**2*s**4 - 9069375000*p**5*q*r**3*s**4 + 79957812500*p**2*q**3*r**3*s**4 + 5512500000*p**3*q*r**4*s**4 + 50656250000*q**3*r**4*s**4 + 74750000000*p*q*r**5*s**4 + 56953125*p**10*s**5 + 1381640625*p**7*q**2*s**5 - 781250000*p**4*q**4*s**5 + 878906250*p*q**6*s**5 - 2655703125*p**8*r*s**5 - 3223046875*p**5*q**2*r*s**5 - 35117187500*p**2*q**4*r*s**5 + 26573437500*p**6*r**2*s**5 + 14785156250*p**3*q**2*r**2*s**5 - 52050781250*q**4*r**2*s**5 - 103062500000*p**4*r**3*s**5 - 281796875000*p*q**2*r**3*s**5 + 146875000000*p**2*r**4*s**5 - 37500000000*r**5*s**5 - 8789062500*p**6*q*s**6 - 3906250000*p**3*q**3*s**6 + 1464843750*q**5*s**6 + 102929687500*p**4*q*r*s**6 + 297119140625*p*q**3*r*s**6 - 217773437500*p**2*q*r**2*s**6 + 167968750000*q*r**3*s**6 + 10986328125*p**5*s**7 + 98876953125*p**2*q**2*s**7 - 188964843750*p**3*r*s**7 - 278320312500*q**2*r*s**7 + 517578125000*p*r**2*s**7 - 610351562500*p*q*s**8 + 762939453125*s**9
b[2][4] = -200*p**7*q**9 + 1850*p**4*q**11 + 21600*p*q**13 + 3200*p**8*q**7*r - 19200*p**5*q**9*r - 316350*p**2*q**11*r - 19050*p**9*q**5*r**2 + 37400*p**6*q**7*r**2 + 1759250*p**3*q**9*r**2 + 440100*q**11*r**2 + 48750*p**10*q**3*r**3 + 190200*p**7*q**5*r**3 - 4604200*p**4*q**7*r**3 - 6072800*p*q**9*r**3 - 43200*p**11*q*r**4 - 834500*p**8*q**3*r**4 + 4916000*p**5*q**5*r**4 + 27926850*p**2*q**7*r**4 + 969600*p**9*q*r**5 + 2467200*p**6*q**3*r**5 - 45393200*p**3*q**5*r**5 - 5399500*q**7*r**5 - 7283200*p**7*q*r**6 + 10536000*p**4*q**3*r**6 + 41656000*p*q**5*r**6 + 22784000*p**5*q*r**7 - 35200000*p**2*q**3*r**7 - 25600000*p**3*q*r**8 + 96000000*q**3*r**8 - 3000*p**9*q**6*s + 40400*p**6*q**8*s + 136550*p**3*q**10*s - 1647000*q**12*s + 40500*p**10*q**4*r*s - 173600*p**7*q**6*r*s - 126500*p**4*q**8*r*s + 23969250*p*q**10*r*s - 153900*p**11*q**2*r**2*s - 486150*p**8*q**4*r**2*s - 4115800*p**5*q**6*r**2*s - 112653250*p**2*q**8*r**2*s + 129600*p**12*r**3*s + 2683350*p**9*q**2*r**3*s + 10906650*p**6*q**4*r**3*s + 187289500*p**3*q**6*r**3*s + 44098750*q**8*r**3*s - 4384800*p**10*r**4*s - 35660800*p**7*q**2*r**4*s - 175420000*p**4*q**4*r**4*s - 426538750*p*q**6*r**4*s + 60857600*p**8*r**5*s + 349436000*p**5*q**2*r**5*s + 900600000*p**2*q**4*r**5*s - 429568000*p**6*r**6*s - 1511200000*p**3*q**2*r**6*s - 1286000000*q**4*r**6*s + 1472000000*p**4*r**7*s + 1440000000*p*q**2*r**7*s - 1920000000*p**2*r**8*s - 36450*p**11*q**3*s**2 - 188100*p**8*q**5*s**2 - 5504750*p**5*q**7*s**2 - 37968750*p**2*q**9*s**2 + 255150*p**12*q*r*s**2 + 2754000*p**9*q**3*r*s**2 + 49196500*p**6*q**5*r*s**2 + 323587500*p**3*q**7*r*s**2 - 83250000*q**9*r*s**2 - 465750*p**10*q*r**2*s**2 - 31881500*p**7*q**3*r**2*s**2 - 415585000*p**4*q**5*r**2*s**2 + 1054775000*p*q**7*r**2*s**2 - 96823500*p**8*q*r**3*s**2 - 701490000*p**5*q**3*r**3*s**2 - 2953531250*p**2*q**5*r**3*s**2 + 1454560000*p**6*q*r**4*s**2 + 7670500000*p**3*q**3*r**4*s**2 + 5661062500*q**5*r**4*s**2 - 7785000000*p**4*q*r**5*s**2 - 9450000000*p*q**3*r**5*s**2 + 14000000000*p**2*q*r**6*s**2 + 2400000000*q*r**7*s**2 - 437400*p**13*s**3 - 10145250*p**10*q**2*s**3 - 121912500*p**7*q**4*s**3 - 576531250*p**4*q**6*s**3 - 528593750*p*q**8*s**3 + 12939750*p**11*r*s**3 + 313368750*p**8*q**2*r*s**3 + 2171812500*p**5*q**4*r*s**3 + 2381718750*p**2*q**6*r*s**3 - 124638750*p**9*r**2*s**3 - 3001575000*p**6*q**2*r**2*s**3 - 12259375000*p**3*q**4*r**2*s**3 - 9985312500*q**6*r**2*s**3 + 384000000*p**7*r**3*s**3 + 13997500000*p**4*q**2*r**3*s**3 + 20749531250*p*q**4*r**3*s**3 - 553500000*p**5*r**4*s**3 - 41835000000*p**2*q**2*r**4*s**3 + 5420000000*p**3*r**5*s**3 - 16300000000*q**2*r**5*s**3 - 17600000000*p*r**6*s**3 - 7593750*p**9*q*s**4 + 289218750*p**6*q**3*s**4 + 3591406250*p**3*q**5*s**4 + 5992187500*q**7*s**4 + 658125000*p**7*q*r*s**4 - 269531250*p**4*q**3*r*s**4 - 15882812500*p*q**5*r*s**4 - 4785000000*p**5*q*r**2*s**4 + 54375781250*p**2*q**3*r**2*s**4 - 5668750000*p**3*q*r**3*s**4 + 35867187500*q**3*r**3*s**4 + 113875000000*p*q*r**4*s**4 - 544218750*p**8*s**5 - 5407031250*p**5*q**2*s**5 - 14277343750*p**2*q**4*s**5 + 5421093750*p**6*r*s**5 - 24941406250*p**3*q**2*r*s**5 - 25488281250*q**4*r*s**5 - 11500000000*p**4*r**2*s**5 - 231894531250*p*q**2*r**2*s**5 - 6250000000*p**2*r**3*s**5 - 43750000000*r**4*s**5 + 35449218750*p**4*q*s**6 + 137695312500*p*q**3*s**6 + 34667968750*p**2*q*r*s**6 + 202148437500*q*r**2*s**6 - 33691406250*p**3*s**7 - 214843750000*q**2*s**7 - 31738281250*p*r*s**7
b[2][3] = -800*p**5*q**9 - 5400*p**2*q**11 + 5800*p**6*q**7*r + 48750*p**3*q**9*r + 16200*q**11*r - 3000*p**7*q**5*r**2 - 108350*p**4*q**7*r**2 - 263250*p*q**9*r**2 - 60700*p**8*q**3*r**3 - 386250*p**5*q**5*r**3 + 253100*p**2*q**7*r**3 + 127800*p**9*q*r**4 + 2326700*p**6*q**3*r**4 + 6565550*p**3*q**5*r**4 - 705750*q**7*r**4 - 2903200*p**7*q*r**5 - 21218000*p**4*q**3*r**5 + 1057000*p*q**5*r**5 + 20368000*p**5*q*r**6 + 33000000*p**2*q**3*r**6 - 43200000*p**3*q*r**7 + 52000000*q**3*r**7 + 6200*p**7*q**6*s + 188250*p**4*q**8*s + 931500*p*q**10*s - 73800*p**8*q**4*r*s - 1466850*p**5*q**6*r*s - 6894000*p**2*q**8*r*s + 315900*p**9*q**2*r**2*s + 4547000*p**6*q**4*r**2*s + 20362500*p**3*q**6*r**2*s + 15018750*q**8*r**2*s - 653400*p**10*r**3*s - 13897550*p**7*q**2*r**3*s - 76757500*p**4*q**4*r**3*s - 124207500*p*q**6*r**3*s + 18567600*p**8*r**4*s + 175911000*p**5*q**2*r**4*s + 253787500*p**2*q**4*r**4*s - 183816000*p**6*r**5*s - 706900000*p**3*q**2*r**5*s - 665750000*q**4*r**5*s + 740000000*p**4*r**6*s + 890000000*p*q**2*r**6*s - 1040000000*p**2*r**7*s - 763000*p**6*q**5*s**2 - 12375000*p**3*q**7*s**2 - 40500000*q**9*s**2 + 364500*p**10*q*r*s**2 + 15537000*p**7*q**3*r*s**2 + 154392500*p**4*q**5*r*s**2 + 372206250*p*q**7*r*s**2 - 25481250*p**8*q*r**2*s**2 - 386300000*p**5*q**3*r**2*s**2 - 996343750*p**2*q**5*r**2*s**2 + 459872500*p**6*q*r**3*s**2 + 2943937500*p**3*q**3*r**3*s**2 + 2437781250*q**5*r**3*s**2 - 2883750000*p**4*q*r**4*s**2 - 4343750000*p*q**3*r**4*s**2 + 5495000000*p**2*q*r**5*s**2 + 1300000000*q*r**6*s**2 - 364500*p**11*s**3 - 13668750*p**8*q**2*s**3 - 113406250*p**5*q**4*s**3 - 159062500*p**2*q**6*s**3 + 13972500*p**9*r*s**3 + 61537500*p**6*q**2*r*s**3 - 1622656250*p**3*q**4*r*s**3 - 2720625000*q**6*r*s**3 - 201656250*p**7*r**2*s**3 + 1949687500*p**4*q**2*r**2*s**3 + 4979687500*p*q**4*r**2*s**3 + 497125000*p**5*r**3*s**3 - 11150625000*p**2*q**2*r**3*s**3 + 2982500000*p**3*r**4*s**3 - 6612500000*q**2*r**4*s**3 - 10450000000*p*r**5*s**3 + 126562500*p**7*q*s**4 + 1443750000*p**4*q**3*s**4 + 281250000*p*q**5*s**4 - 1648125000*p**5*q*r*s**4 + 11271093750*p**2*q**3*r*s**4 - 4785156250*p**3*q*r**2*s**4 + 8808593750*q**3*r**2*s**4 + 52390625000*p*q*r**3*s**4 - 611718750*p**6*s**5 - 13027343750*p**3*q**2*s**5 - 1464843750*q**4*s**5 + 6492187500*p**4*r*s**5 - 65351562500*p*q**2*r*s**5 - 13476562500*p**2*r**2*s**5 - 24218750000*r**3*s**5 + 41992187500*p**2*q*s**6 + 69824218750*q*r*s**6 - 34179687500*p*s**7
b[2][2] = -1000*p**6*q**7 - 5150*p**3*q**9 + 10800*q**11 + 11000*p**7*q**5*r + 66450*p**4*q**7*r - 127800*p*q**9*r - 41250*p**8*q**3*r**2 - 368400*p**5*q**5*r**2 + 204200*p**2*q**7*r**2 + 54000*p**9*q*r**3 + 1040950*p**6*q**3*r**3 + 2096500*p**3*q**5*r**3 + 200000*q**7*r**3 - 1140000*p**7*q*r**4 - 7691000*p**4*q**3*r**4 - 2281000*p*q**5*r**4 + 7296000*p**5*q*r**5 + 13300000*p**2*q**3*r**5 - 14400000*p**3*q*r**6 + 14000000*q**3*r**6 - 9000*p**8*q**4*s + 52100*p**5*q**6*s + 710250*p**2*q**8*s + 67500*p**9*q**2*r*s - 256100*p**6*q**4*r*s - 5753000*p**3*q**6*r*s + 292500*q**8*r*s - 162000*p**10*r**2*s - 1432350*p**7*q**2*r**2*s + 5410000*p**4*q**4*r**2*s - 7408750*p*q**6*r**2*s + 4401000*p**8*r**3*s + 24185000*p**5*q**2*r**3*s + 20781250*p**2*q**4*r**3*s - 43012000*p**6*r**4*s - 146300000*p**3*q**2*r**4*s - 165875000*q**4*r**4*s + 182000000*p**4*r**5*s + 250000000*p*q**2*r**5*s - 280000000*p**2*r**6*s + 60750*p**10*q*s**2 + 2414250*p**7*q**3*s**2 + 15770000*p**4*q**5*s**2 + 15825000*p*q**7*s**2 - 6021000*p**8*q*r*s**2 - 62252500*p**5*q**3*r*s**2 - 74718750*p**2*q**5*r*s**2 + 90888750*p**6*q*r**2*s**2 + 471312500*p**3*q**3*r**2*s**2 + 525875000*q**5*r**2*s**2 - 539375000*p**4*q*r**3*s**2 - 1030000000*p*q**3*r**3*s**2 + 1142500000*p**2*q*r**4*s**2 + 350000000*q*r**5*s**2 - 303750*p**9*s**3 - 35943750*p**6*q**2*s**3 - 331875000*p**3*q**4*s**3 - 505937500*q**6*s**3 + 8437500*p**7*r*s**3 + 530781250*p**4*q**2*r*s**3 + 1150312500*p*q**4*r*s**3 - 154500000*p**5*r**2*s**3 - 2059062500*p**2*q**2*r**2*s**3 + 1150000000*p**3*r**3*s**3 - 1343750000*q**2*r**3*s**3 - 2900000000*p*r**4*s**3 + 30937500*p**5*q*s**4 + 1166406250*p**2*q**3*s**4 - 1496875000*p**3*q*r*s**4 + 1296875000*q**3*r*s**4 + 10640625000*p*q*r**2*s**4 - 281250000*p**4*s**5 - 9746093750*p*q**2*s**5 + 1269531250*p**2*r*s**5 - 7421875000*r**2*s**5 + 15625000000*q*s**6
b[2][1] = -1600*p**4*q**7 - 10800*p*q**9 + 9800*p**5*q**5*r + 80550*p**2*q**7*r - 4600*p**6*q**3*r**2 - 112700*p**3*q**5*r**2 + 40500*q**7*r**2 - 34200*p**7*q*r**3 - 279500*p**4*q**3*r**3 - 665750*p*q**5*r**3 + 632000*p**5*q*r**4 + 3200000*p**2*q**3*r**4 - 2800000*p**3*q*r**5 + 3000000*q**3*r**5 - 18600*p**6*q**4*s - 51750*p**3*q**6*s + 405000*q**8*s + 21600*p**7*q**2*r*s - 122500*p**4*q**4*r*s - 2891250*p*q**6*r*s + 156600*p**8*r**2*s + 1569750*p**5*q**2*r**2*s + 6943750*p**2*q**4*r**2*s - 3774000*p**6*r**3*s - 27100000*p**3*q**2*r**3*s - 30187500*q**4*r**3*s + 28000000*p**4*r**4*s + 52500000*p*q**2*r**4*s - 60000000*p**2*r**5*s - 81000*p**8*q*s**2 - 240000*p**5*q**3*s**2 + 937500*p**2*q**5*s**2 + 3273750*p**6*q*r*s**2 + 30406250*p**3*q**3*r*s**2 + 55687500*q**5*r*s**2 - 42187500*p**4*q*r**2*s**2 - 112812500*p*q**3*r**2*s**2 + 152500000*p**2*q*r**3*s**2 + 75000000*q*r**4*s**2 - 4218750*p**4*q**2*s**3 + 15156250*p*q**4*s**3 + 5906250*p**5*r*s**3 - 206562500*p**2*q**2*r*s**3 + 107500000*p**3*r**2*s**3 - 159375000*q**2*r**2*s**3 - 612500000*p*r**3*s**3 + 135937500*p**3*q*s**4 + 46875000*q**3*s**4 + 1175781250*p*q*r*s**4 - 292968750*p**2*s**5 - 1367187500*r*s**5
b[2][0] = -800*p**5*q**5 - 5400*p**2*q**7 + 6000*p**6*q**3*r + 51700*p**3*q**5*r + 27000*q**7*r - 10800*p**7*q*r**2 - 163250*p**4*q**3*r**2 - 285750*p*q**5*r**2 + 192000*p**5*q*r**3 + 1000000*p**2*q**3*r**3 - 800000*p**3*q*r**4 + 500000*q**3*r**4 - 10800*p**7*q**2*s - 57500*p**4*q**4*s + 67500*p*q**6*s + 32400*p**8*r*s + 279000*p**5*q**2*r*s - 131250*p**2*q**4*r*s - 729000*p**6*r**2*s - 4100000*p**3*q**2*r**2*s - 5343750*q**4*r**2*s + 5000000*p**4*r**3*s + 10000000*p*q**2*r**3*s - 10000000*p**2*r**4*s + 641250*p**6*q*s**2 + 5812500*p**3*q**3*s**2 + 10125000*q**5*s**2 - 7031250*p**4*q*r*s**2 - 20625000*p*q**3*r*s**2 + 17500000*p**2*q*r**2*s**2 + 12500000*q*r**3*s**2 - 843750*p**5*s**3 - 19375000*p**2*q**2*s**3 + 30000000*p**3*r*s**3 - 20312500*q**2*r*s**3 - 112500000*p*r**2*s**3 + 183593750*p*q*s**4 - 292968750*s**5
b[3][5] = 500*p**11*q**6 + 9875*p**8*q**8 + 42625*p**5*q**10 - 35000*p**2*q**12 - 4500*p**12*q**4*r - 108375*p**9*q**6*r - 516750*p**6*q**8*r + 1110500*p**3*q**10*r + 2730000*q**12*r + 10125*p**13*q**2*r**2 + 358250*p**10*q**4*r**2 + 1908625*p**7*q**6*r**2 - 11744250*p**4*q**8*r**2 - 43383250*p*q**10*r**2 - 313875*p**11*q**2*r**3 - 2074875*p**8*q**4*r**3 + 52094750*p**5*q**6*r**3 + 264567500*p**2*q**8*r**3 + 796125*p**9*q**2*r**4 - 92486250*p**6*q**4*r**4 - 757957500*p**3*q**6*r**4 - 29354375*q**8*r**4 + 60970000*p**7*q**2*r**5 + 1112462500*p**4*q**4*r**5 + 571094375*p*q**6*r**5 - 685290000*p**5*q**2*r**6 - 2037800000*p**2*q**4*r**6 + 2279600000*p**3*q**2*r**7 + 849000000*q**4*r**7 - 1480000000*p*q**2*r**8 + 13500*p**13*q**3*s + 363000*p**10*q**5*s + 2861250*p**7*q**7*s + 8493750*p**4*q**9*s + 17031250*p*q**11*s - 60750*p**14*q*r*s - 2319750*p**11*q**3*r*s - 22674250*p**8*q**5*r*s - 74368750*p**5*q**7*r*s - 170578125*p**2*q**9*r*s + 2760750*p**12*q*r**2*s + 46719000*p**9*q**3*r**2*s + 163356375*p**6*q**5*r**2*s + 360295625*p**3*q**7*r**2*s - 195990625*q**9*r**2*s - 37341750*p**10*q*r**3*s - 194739375*p**7*q**3*r**3*s - 105463125*p**4*q**5*r**3*s - 415825000*p*q**7*r**3*s + 90180000*p**8*q*r**4*s - 990552500*p**5*q**3*r**4*s + 3519212500*p**2*q**5*r**4*s + 1112220000*p**6*q*r**5*s - 4508750000*p**3*q**3*r**5*s - 8159500000*q**5*r**5*s - 4356000000*p**4*q*r**6*s + 14615000000*p*q**3*r**6*s - 2160000000*p**2*q*r**7*s + 91125*p**15*s**2 + 3290625*p**12*q**2*s**2 + 35100000*p**9*q**4*s**2 + 175406250*p**6*q**6*s**2 + 629062500*p**3*q**8*s**2 + 910937500*q**10*s**2 - 5710500*p**13*r*s**2 - 100423125*p**10*q**2*r*s**2 - 604743750*p**7*q**4*r*s**2 - 2954843750*p**4*q**6*r*s**2 - 4587578125*p*q**8*r*s**2 + 116194500*p**11*r**2*s**2 + 1280716250*p**8*q**2*r**2*s**2 + 7401190625*p**5*q**4*r**2*s**2 + 11619937500*p**2*q**6*r**2*s**2 - 952173125*p**9*r**3*s**2 - 6519712500*p**6*q**2*r**3*s**2 - 10238593750*p**3*q**4*r**3*s**2 + 29984609375*q**6*r**3*s**2 + 2558300000*p**7*r**4*s**2 + 16225000000*p**4*q**2*r**4*s**2 - 64994140625*p*q**4*r**4*s**2 + 4202250000*p**5*r**5*s**2 + 46925000000*p**2*q**2*r**5*s**2 - 28950000000*p**3*r**6*s**2 - 1000000000*q**2*r**6*s**2 + 37000000000*p*r**7*s**2 - 48093750*p**11*q*s**3 - 673359375*p**8*q**3*s**3 - 2170312500*p**5*q**5*s**3 - 2466796875*p**2*q**7*s**3 + 647578125*p**9*q*r*s**3 + 597031250*p**6*q**3*r*s**3 - 7542578125*p**3*q**5*r*s**3 - 41125000000*q**7*r*s**3 - 2175828125*p**7*q*r**2*s**3 - 7101562500*p**4*q**3*r**2*s**3 + 100596875000*p*q**5*r**2*s**3 - 8984687500*p**5*q*r**3*s**3 - 120070312500*p**2*q**3*r**3*s**3 + 57343750000*p**3*q*r**4*s**3 + 9500000000*q**3*r**4*s**3 - 342875000000*p*q*r**5*s**3 + 400781250*p**10*s**4 + 8531250000*p**7*q**2*s**4 + 34033203125*p**4*q**4*s**4 + 42724609375*p*q**6*s**4 - 6289453125*p**8*r*s**4 - 24037109375*p**5*q**2*r*s**4 - 62626953125*p**2*q**4*r*s**4 + 17299218750*p**6*r**2*s**4 + 108357421875*p**3*q**2*r**2*s**4 - 55380859375*q**4*r**2*s**4 + 105648437500*p**4*r**3*s**4 + 1204228515625*p*q**2*r**3*s**4 - 365000000000*p**2*r**4*s**4 + 184375000000*r**5*s**4 - 32080078125*p**6*q*s**5 - 98144531250*p**3*q**3*s**5 + 93994140625*q**5*s**5 - 178955078125*p**4*q*r*s**5 - 1299804687500*p*q**3*r*s**5 + 332421875000*p**2*q*r**2*s**5 - 1195312500000*q*r**3*s**5 + 72021484375*p**5*s**6 + 323486328125*p**2*q**2*s**6 + 682373046875*p**3*r*s**6 + 2447509765625*q**2*r*s**6 - 3011474609375*p*r**2*s**6 + 3051757812500*p*q*s**7 - 7629394531250*s**8
b[3][4] = 1500*p**9*q**6 + 69625*p**6*q**8 + 590375*p**3*q**10 + 1035000*q**12 - 13500*p**10*q**4*r - 760625*p**7*q**6*r - 7904500*p**4*q**8*r - 18169250*p*q**10*r + 30375*p**11*q**2*r**2 + 2628625*p**8*q**4*r**2 + 37879000*p**5*q**6*r**2 + 121367500*p**2*q**8*r**2 - 2699250*p**9*q**2*r**3 - 76776875*p**6*q**4*r**3 - 403583125*p**3*q**6*r**3 - 78865625*q**8*r**3 + 60907500*p**7*q**2*r**4 + 735291250*p**4*q**4*r**4 + 781142500*p*q**6*r**4 - 558270000*p**5*q**2*r**5 - 2150725000*p**2*q**4*r**5 + 2015400000*p**3*q**2*r**6 + 1181000000*q**4*r**6 - 2220000000*p*q**2*r**7 + 40500*p**11*q**3*s + 1376500*p**8*q**5*s + 9953125*p**5*q**7*s + 9765625*p**2*q**9*s - 182250*p**12*q*r*s - 8859000*p**9*q**3*r*s - 82854500*p**6*q**5*r*s - 71511250*p**3*q**7*r*s + 273631250*q**9*r*s + 10233000*p**10*q*r**2*s + 179627500*p**7*q**3*r**2*s + 25164375*p**4*q**5*r**2*s - 2927290625*p*q**7*r**2*s - 171305000*p**8*q*r**3*s - 544768750*p**5*q**3*r**3*s + 7583437500*p**2*q**5*r**3*s + 1139860000*p**6*q*r**4*s - 6489375000*p**3*q**3*r**4*s - 9625375000*q**5*r**4*s - 1838000000*p**4*q*r**5*s + 19835000000*p*q**3*r**5*s - 3240000000*p**2*q*r**6*s + 273375*p**13*s**2 + 9753750*p**10*q**2*s**2 + 82575000*p**7*q**4*s**2 + 202265625*p**4*q**6*s**2 + 556093750*p*q**8*s**2 - 11552625*p**11*r*s**2 - 115813125*p**8*q**2*r*s**2 + 630590625*p**5*q**4*r*s**2 + 1347015625*p**2*q**6*r*s**2 + 157578750*p**9*r**2*s**2 - 689206250*p**6*q**2*r**2*s**2 - 4299609375*p**3*q**4*r**2*s**2 + 23896171875*q**6*r**2*s**2 - 1022437500*p**7*r**3*s**2 + 6648125000*p**4*q**2*r**3*s**2 - 52895312500*p*q**4*r**3*s**2 + 4401750000*p**5*r**4*s**2 + 26500000000*p**2*q**2*r**4*s**2 - 22125000000*p**3*r**5*s**2 - 1500000000*q**2*r**5*s**2 + 55500000000*p*r**6*s**2 - 137109375*p**9*q*s**3 - 1955937500*p**6*q**3*s**3 - 6790234375*p**3*q**5*s**3 - 16996093750*q**7*s**3 + 2146218750*p**7*q*r*s**3 + 6570312500*p**4*q**3*r*s**3 + 39918750000*p*q**5*r*s**3 - 7673281250*p**5*q*r**2*s**3 - 52000000000*p**2*q**3*r**2*s**3 + 50796875000*p**3*q*r**3*s**3 + 18750000000*q**3*r**3*s**3 - 399875000000*p*q*r**4*s**3 + 780468750*p**8*s**4 + 14455078125*p**5*q**2*s**4 + 10048828125*p**2*q**4*s**4 - 15113671875*p**6*r*s**4 + 39298828125*p**3*q**2*r*s**4 - 52138671875*q**4*r*s**4 + 45964843750*p**4*r**2*s**4 + 914414062500*p*q**2*r**2*s**4 + 1953125000*p**2*r**3*s**4 + 334375000000*r**4*s**4 - 149169921875*p**4*q*s**5 - 459716796875*p*q**3*s**5 - 325585937500*p**2*q*r*s**5 - 1462890625000*q*r**2*s**5 + 296630859375*p**3*s**6 + 1324462890625*q**2*s**6 + 307617187500*p*r*s**6
b[3][3] = -20750*p**7*q**6 - 290125*p**4*q**8 - 993000*p*q**10 + 146125*p**8*q**4*r + 2721500*p**5*q**6*r + 11833750*p**2*q**8*r - 237375*p**9*q**2*r**2 - 8167500*p**6*q**4*r**2 - 54605625*p**3*q**6*r**2 - 23802500*q**8*r**2 + 8927500*p**7*q**2*r**3 + 131184375*p**4*q**4*r**3 + 254695000*p*q**6*r**3 - 121561250*p**5*q**2*r**4 - 728003125*p**2*q**4*r**4 + 702550000*p**3*q**2*r**5 + 597312500*q**4*r**5 - 1202500000*p*q**2*r**6 - 194625*p**9*q**3*s - 1568875*p**6*q**5*s + 9685625*p**3*q**7*s + 74662500*q**9*s + 327375*p**10*q*r*s + 1280000*p**7*q**3*r*s - 123703750*p**4*q**5*r*s - 850121875*p*q**7*r*s - 7436250*p**8*q*r**2*s + 164820000*p**5*q**3*r**2*s + 2336659375*p**2*q**5*r**2*s + 32202500*p**6*q*r**3*s - 2429765625*p**3*q**3*r**3*s - 4318609375*q**5*r**3*s + 148000000*p**4*q*r**4*s + 9902812500*p*q**3*r**4*s - 1755000000*p**2*q*r**5*s + 1154250*p**11*s**2 + 36821250*p**8*q**2*s**2 + 372825000*p**5*q**4*s**2 + 1170921875*p**2*q**6*s**2 - 38913750*p**9*r*s**2 - 797071875*p**6*q**2*r*s**2 - 2848984375*p**3*q**4*r*s**2 + 7651406250*q**6*r*s**2 + 415068750*p**7*r**2*s**2 + 3151328125*p**4*q**2*r**2*s**2 - 17696875000*p*q**4*r**2*s**2 - 725968750*p**5*r**3*s**2 + 5295312500*p**2*q**2*r**3*s**2 - 8581250000*p**3*r**4*s**2 - 812500000*q**2*r**4*s**2 + 30062500000*p*r**5*s**2 - 110109375*p**7*q*s**3 - 1976562500*p**4*q**3*s**3 - 6329296875*p*q**5*s**3 + 2256328125*p**5*q*r*s**3 + 8554687500*p**2*q**3*r*s**3 + 12947265625*p**3*q*r**2*s**3 + 7984375000*q**3*r**2*s**3 - 167039062500*p*q*r**3*s**3 + 1181250000*p**6*s**4 + 17873046875*p**3*q**2*s**4 - 20449218750*q**4*s**4 - 16265625000*p**4*r*s**4 + 260869140625*p*q**2*r*s**4 + 21025390625*p**2*r**2*s**4 + 207617187500*r**3*s**4 - 207177734375*p**2*q*s**5 - 615478515625*q*r*s**5 + 301513671875*p*s**6
b[3][2] = 53125*p**5*q**6 + 425000*p**2*q**8 - 394375*p**6*q**4*r - 4301875*p**3*q**6*r - 3225000*q**8*r + 851250*p**7*q**2*r**2 + 16910625*p**4*q**4*r**2 + 44210000*p*q**6*r**2 - 20474375*p**5*q**2*r**3 - 147190625*p**2*q**4*r**3 + 163975000*p**3*q**2*r**4 + 156812500*q**4*r**4 - 323750000*p*q**2*r**5 - 99375*p**7*q**3*s - 6395000*p**4*q**5*s - 49243750*p*q**7*s - 1164375*p**8*q*r*s + 4465625*p**5*q**3*r*s + 205546875*p**2*q**5*r*s + 12163750*p**6*q*r**2*s - 315546875*p**3*q**3*r**2*s - 946453125*q**5*r**2*s - 23500000*p**4*q*r**3*s + 2313437500*p*q**3*r**3*s - 472500000*p**2*q*r**4*s + 1316250*p**9*s**2 + 22715625*p**6*q**2*s**2 + 206953125*p**3*q**4*s**2 + 1220000000*q**6*s**2 - 20953125*p**7*r*s**2 - 277656250*p**4*q**2*r*s**2 - 3317187500*p*q**4*r*s**2 + 293734375*p**5*r**2*s**2 + 1351562500*p**2*q**2*r**2*s**2 - 2278125000*p**3*r**3*s**2 - 218750000*q**2*r**3*s**2 + 8093750000*p*r**4*s**2 - 9609375*p**5*q*s**3 + 240234375*p**2*q**3*s**3 + 2310546875*p**3*q*r*s**3 + 1171875000*q**3*r*s**3 - 33460937500*p*q*r**2*s**3 + 2185546875*p**4*s**4 + 32578125000*p*q**2*s**4 - 8544921875*p**2*r*s**4 + 58398437500*r**2*s**4 - 114013671875*q*s**5
b[3][1] = -16250*p**6*q**4 - 191875*p**3*q**6 - 495000*q**8 + 73125*p**7*q**2*r + 1437500*p**4*q**4*r + 5866250*p*q**6*r - 2043125*p**5*q**2*r**2 - 17218750*p**2*q**4*r**2 + 19106250*p**3*q**2*r**3 + 34015625*q**4*r**3 - 69375000*p*q**2*r**4 - 219375*p**8*q*s - 2846250*p**5*q**3*s - 8021875*p**2*q**5*s + 3420000*p**6*q*r*s - 1640625*p**3*q**3*r*s - 152468750*q**5*r*s + 3062500*p**4*q*r**2*s + 381171875*p*q**3*r**2*s - 101250000*p**2*q*r**3*s + 2784375*p**7*s**2 + 43515625*p**4*q**2*s**2 + 115625000*p*q**4*s**2 - 48140625*p**5*r*s**2 - 307421875*p**2*q**2*r*s**2 - 25781250*p**3*r**2*s**2 - 46875000*q**2*r**2*s**2 + 1734375000*p*r**3*s**2 - 128906250*p**3*q*s**3 + 339843750*q**3*s**3 - 4583984375*p*q*r*s**3 + 2236328125*p**2*s**4 + 12255859375*r*s**4
b[3][0] = 31875*p**4*q**4 + 255000*p*q**6 - 82500*p**5*q**2*r - 1106250*p**2*q**4*r + 1653125*p**3*q**2*r**2 + 5187500*q**4*r**2 - 11562500*p*q**2*r**3 - 118125*p**6*q*s - 3593750*p**3*q**3*s - 23812500*q**5*s + 4656250*p**4*q*r*s + 67109375*p*q**3*r*s - 16875000*p**2*q*r**2*s - 984375*p**5*s**2 - 19531250*p**2*q**2*s**2 - 37890625*p**3*r*s**2 - 7812500*q**2*r*s**2 + 289062500*p*r**2*s**2 - 529296875*p*q*s**3 + 2343750000*s**4
b[4][5] = 600*p**10*q**10 + 13850*p**7*q**12 + 106150*p**4*q**14 + 270000*p*q**16 - 9300*p**11*q**8*r - 234075*p**8*q**10*r - 1942825*p**5*q**12*r - 5319900*p**2*q**14*r + 52050*p**12*q**6*r**2 + 1481025*p**9*q**8*r**2 + 13594450*p**6*q**10*r**2 + 40062750*p**3*q**12*r**2 - 3569400*q**14*r**2 - 122175*p**13*q**4*r**3 - 4260350*p**10*q**6*r**3 - 45052375*p**7*q**8*r**3 - 142634900*p**4*q**10*r**3 + 54186350*p*q**12*r**3 + 97200*p**14*q**2*r**4 + 5284225*p**11*q**4*r**4 + 70389525*p**8*q**6*r**4 + 232732850*p**5*q**8*r**4 - 318849400*p**2*q**10*r**4 - 2046000*p**12*q**2*r**5 - 43874125*p**9*q**4*r**5 - 107411850*p**6*q**6*r**5 + 948310700*p**3*q**8*r**5 - 34763575*q**10*r**5 + 5915600*p**10*q**2*r**6 - 115887800*p**7*q**4*r**6 - 1649542400*p**4*q**6*r**6 + 224468875*p*q**8*r**6 + 120252800*p**8*q**2*r**7 + 1779902000*p**5*q**4*r**7 - 288250000*p**2*q**6*r**7 - 915200000*p**6*q**2*r**8 - 1164000000*p**3*q**4*r**8 - 444200000*q**6*r**8 + 2502400000*p**4*q**2*r**9 + 1984000000*p*q**4*r**9 - 2880000000*p**2*q**2*r**10 + 20700*p**12*q**7*s + 551475*p**9*q**9*s + 5194875*p**6*q**11*s + 18985000*p**3*q**13*s + 16875000*q**15*s - 218700*p**13*q**5*r*s - 6606475*p**10*q**7*r*s - 69770850*p**7*q**9*r*s - 285325500*p**4*q**11*r*s - 292005000*p*q**13*r*s + 694575*p**14*q**3*r**2*s + 26187750*p**11*q**5*r**2*s + 328992825*p**8*q**7*r**2*s + 1573292400*p**5*q**9*r**2*s + 1930043875*p**2*q**11*r**2*s - 583200*p**15*q*r**3*s - 37263225*p**12*q**3*r**3*s - 638579425*p**9*q**5*r**3*s - 3920212225*p**6*q**7*r**3*s - 6327336875*p**3*q**9*r**3*s + 440969375*q**11*r**3*s + 13446000*p**13*q*r**4*s + 462330325*p**10*q**3*r**4*s + 4509088275*p**7*q**5*r**4*s + 11709795625*p**4*q**7*r**4*s - 3579565625*p*q**9*r**4*s - 85033600*p**11*q*r**5*s - 2136801600*p**8*q**3*r**5*s - 12221575800*p**5*q**5*r**5*s + 9431044375*p**2*q**7*r**5*s + 10643200*p**9*q*r**6*s + 4565594000*p**6*q**3*r**6*s - 1778590000*p**3*q**5*r**6*s + 4842175000*q**7*r**6*s + 712320000*p**7*q*r**7*s - 16182000000*p**4*q**3*r**7*s - 21918000000*p*q**5*r**7*s - 742400000*p**5*q*r**8*s + 31040000000*p**2*q**3*r**8*s + 1280000000*p**3*q*r**9*s + 4800000000*q**3*r**9*s + 230850*p**14*q**4*s**2 + 7373250*p**11*q**6*s**2 + 85045625*p**8*q**8*s**2 + 399140625*p**5*q**10*s**2 + 565031250*p**2*q**12*s**2 - 1257525*p**15*q**2*r*s**2 - 52728975*p**12*q**4*r*s**2 - 743466375*p**9*q**6*r*s**2 - 4144915000*p**6*q**8*r*s**2 - 7102690625*p**3*q**10*r*s**2 - 1389937500*q**12*r*s**2 + 874800*p**16*r**2*s**2 + 89851275*p**13*q**2*r**2*s**2 + 1897236775*p**10*q**4*r**2*s**2 + 14144163000*p**7*q**6*r**2*s**2 + 31942921875*p**4*q**8*r**2*s**2 + 13305118750*p*q**10*r**2*s**2 - 23004000*p**14*r**3*s**2 - 1450715475*p**11*q**2*r**3*s**2 - 19427105000*p**8*q**4*r**3*s**2 - 70634028750*p**5*q**6*r**3*s**2 - 47854218750*p**2*q**8*r**3*s**2 + 204710400*p**12*r**4*s**2 + 10875135000*p**9*q**2*r**4*s**2 + 83618806250*p**6*q**4*r**4*s**2 + 62744500000*p**3*q**6*r**4*s**2 - 19806718750*q**8*r**4*s**2 - 757094800*p**10*r**5*s**2 - 37718030000*p**7*q**2*r**5*s**2 - 22479500000*p**4*q**4*r**5*s**2 + 91556093750*p*q**6*r**5*s**2 + 2306320000*p**8*r**6*s**2 + 55539600000*p**5*q**2*r**6*s**2 - 112851250000*p**2*q**4*r**6*s**2 - 10720000000*p**6*r**7*s**2 - 64720000000*p**3*q**2*r**7*s**2 - 59925000000*q**4*r**7*s**2 + 28000000000*p**4*r**8*s**2 + 28000000000*p*q**2*r**8*s**2 - 24000000000*p**2*r**9*s**2 + 820125*p**16*q*s**3 + 36804375*p**13*q**3*s**3 + 552225000*p**10*q**5*s**3 + 3357593750*p**7*q**7*s**3 + 7146562500*p**4*q**9*s**3 + 3851562500*p*q**11*s**3 - 92400750*p**14*q*r*s**3 - 2350175625*p**11*q**3*r*s**3 - 19470640625*p**8*q**5*r*s**3 - 52820593750*p**5*q**7*r*s**3 - 45447734375*p**2*q**9*r*s**3 + 1824363000*p**12*q*r**2*s**3 + 31435234375*p**9*q**3*r**2*s**3 + 141717537500*p**6*q**5*r**2*s**3 + 228370781250*p**3*q**7*r**2*s**3 + 34610078125*q**9*r**2*s**3 - 17591825625*p**10*q*r**3*s**3 - 188927187500*p**7*q**3*r**3*s**3 - 502088984375*p**4*q**5*r**3*s**3 - 187849296875*p*q**7*r**3*s**3 + 75577750000*p**8*q*r**4*s**3 + 342800000000*p**5*q**3*r**4*s**3 + 295384296875*p**2*q**5*r**4*s**3 - 107681250000*p**6*q*r**5*s**3 + 53330000000*p**3*q**3*r**5*s**3 + 271586875000*q**5*r**5*s**3 - 26410000000*p**4*q*r**6*s**3 - 188200000000*p*q**3*r**6*s**3 + 92000000000*p**2*q*r**7*s**3 + 120000000000*q*r**8*s**3 + 47840625*p**15*s**4 + 1150453125*p**12*q**2*s**4 + 9229453125*p**9*q**4*s**4 + 24954687500*p**6*q**6*s**4 + 22978515625*p**3*q**8*s**4 + 1367187500*q**10*s**4 - 1193737500*p**13*r*s**4 - 20817843750*p**10*q**2*r*s**4 - 98640000000*p**7*q**4*r*s**4 - 225767187500*p**4*q**6*r*s**4 - 74707031250*p*q**8*r*s**4 + 13431318750*p**11*r**2*s**4 + 188709843750*p**8*q**2*r**2*s**4 + 875157656250*p**5*q**4*r**2*s**4 + 593812890625*p**2*q**6*r**2*s**4 - 69869296875*p**9*r**3*s**4 - 854811093750*p**6*q**2*r**3*s**4 - 1730658203125*p**3*q**4*r**3*s**4 - 570867187500*q**6*r**3*s**4 + 162075625000*p**7*r**4*s**4 + 1536375000000*p**4*q**2*r**4*s**4 + 765156250000*p*q**4*r**4*s**4 - 165988750000*p**5*r**5*s**4 - 728968750000*p**2*q**2*r**5*s**4 + 121500000000*p**3*r**6*s**4 - 1039375000000*q**2*r**6*s**4 - 100000000000*p*r**7*s**4 - 379687500*p**11*q*s**5 - 11607421875*p**8*q**3*s**5 - 20830078125*p**5*q**5*s**5 - 33691406250*p**2*q**7*s**5 - 41491406250*p**9*q*r*s**5 - 419054687500*p**6*q**3*r*s**5 - 129511718750*p**3*q**5*r*s**5 + 311767578125*q**7*r*s**5 + 620116015625*p**7*q*r**2*s**5 + 1154687500000*p**4*q**3*r**2*s**5 + 36455078125*p*q**5*r**2*s**5 - 2265953125000*p**5*q*r**3*s**5 - 1509521484375*p**2*q**3*r**3*s**5 + 2530468750000*p**3*q*r**4*s**5 + 3259765625000*q**3*r**4*s**5 + 93750000000*p*q*r**5*s**5 + 23730468750*p**10*s**6 + 243603515625*p**7*q**2*s**6 + 341552734375*p**4*q**4*s**6 - 12207031250*p*q**6*s**6 - 357099609375*p**8*r*s**6 - 298193359375*p**5*q**2*r*s**6 + 406738281250*p**2*q**4*r*s**6 + 1615683593750*p**6*r**2*s**6 + 558593750000*p**3*q**2*r**2*s**6 - 2811035156250*q**4*r**2*s**6 - 2960937500000*p**4*r**3*s**6 - 3802246093750*p*q**2*r**3*s**6 + 2347656250000*p**2*r**4*s**6 - 671875000000*r**5*s**6 - 651855468750*p**6*q*s**7 - 1458740234375*p**3*q**3*s**7 - 152587890625*q**5*s**7 + 1628417968750*p**4*q*r*s**7 + 3948974609375*p*q**3*r*s**7 - 916748046875*p**2*q*r**2*s**7 + 1611328125000*q*r**3*s**7 + 640869140625*p**5*s**8 + 1068115234375*p**2*q**2*s**8 - 2044677734375*p**3*r*s**8 - 3204345703125*q**2*r*s**8 + 1739501953125*p*r**2*s**8
b[4][4] = -600*p**11*q**8 - 14050*p**8*q**10 - 109100*p**5*q**12 - 280800*p**2*q**14 + 7200*p**12*q**6*r + 188700*p**9*q**8*r + 1621725*p**6*q**10*r + 4577075*p**3*q**12*r + 5400*q**14*r - 28350*p**13*q**4*r**2 - 910600*p**10*q**6*r**2 - 9237975*p**7*q**8*r**2 - 30718900*p**4*q**10*r**2 - 5575950*p*q**12*r**2 + 36450*p**14*q**2*r**3 + 1848125*p**11*q**4*r**3 + 25137775*p**8*q**6*r**3 + 109591450*p**5*q**8*r**3 + 70627650*p**2*q**10*r**3 - 1317150*p**12*q**2*r**4 - 32857100*p**9*q**4*r**4 - 219125575*p**6*q**6*r**4 - 327565875*p**3*q**8*r**4 - 13011875*q**10*r**4 + 16484150*p**10*q**2*r**5 + 222242250*p**7*q**4*r**5 + 642173750*p**4*q**6*r**5 + 101263750*p*q**8*r**5 - 79345000*p**8*q**2*r**6 - 433180000*p**5*q**4*r**6 - 93731250*p**2*q**6*r**6 - 74300000*p**6*q**2*r**7 - 1057900000*p**3*q**4*r**7 - 591175000*q**6*r**7 + 1891600000*p**4*q**2*r**8 + 2796000000*p*q**4*r**8 - 4320000000*p**2*q**2*r**9 - 16200*p**13*q**5*s - 359500*p**10*q**7*s - 2603825*p**7*q**9*s - 4590375*p**4*q**11*s + 12352500*p*q**13*s + 121500*p**14*q**3*r*s + 3227400*p**11*q**5*r*s + 27301725*p**8*q**7*r*s + 59480975*p**5*q**9*r*s - 137308875*p**2*q**11*r*s - 218700*p**15*q*r**2*s - 8903925*p**12*q**3*r**2*s - 100918225*p**9*q**5*r**2*s - 325291300*p**6*q**7*r**2*s + 365705000*p**3*q**9*r**2*s + 94342500*q**11*r**2*s + 7632900*p**13*q*r**3*s + 162995400*p**10*q**3*r**3*s + 974558975*p**7*q**5*r**3*s + 930991250*p**4*q**7*r**3*s - 495368750*p*q**9*r**3*s - 97344900*p**11*q*r**4*s - 1406739250*p**8*q**3*r**4*s - 5572526250*p**5*q**5*r**4*s - 1903987500*p**2*q**7*r**4*s + 678550000*p**9*q*r**5*s + 8176215000*p**6*q**3*r**5*s + 18082050000*p**3*q**5*r**5*s + 5435843750*q**7*r**5*s - 2979800000*p**7*q*r**6*s - 29163500000*p**4*q**3*r**6*s - 27417500000*p*q**5*r**6*s + 6282400000*p**5*q*r**7*s + 48690000000*p**2*q**3*r**7*s - 2880000000*p**3*q*r**8*s + 7200000000*q**3*r**8*s - 109350*p**15*q**2*s**2 - 2405700*p**12*q**4*s**2 - 16125250*p**9*q**6*s**2 - 4930000*p**6*q**8*s**2 + 201150000*p**3*q**10*s**2 - 243000000*q**12*s**2 + 328050*p**16*r*s**2 + 10552275*p**13*q**2*r*s**2 + 88019100*p**10*q**4*r*s**2 - 4208625*p**7*q**6*r*s**2 - 1920390625*p**4*q**8*r*s**2 + 1759537500*p*q**10*r*s**2 - 11955600*p**14*r**2*s**2 - 196375050*p**11*q**2*r**2*s**2 - 555196250*p**8*q**4*r**2*s**2 + 4213270000*p**5*q**6*r**2*s**2 - 157468750*p**2*q**8*r**2*s**2 + 162656100*p**12*r**3*s**2 + 1880870000*p**9*q**2*r**3*s**2 + 753684375*p**6*q**4*r**3*s**2 - 25423062500*p**3*q**6*r**3*s**2 - 14142031250*q**8*r**3*s**2 - 1251948750*p**10*r**4*s**2 - 12524475000*p**7*q**2*r**4*s**2 + 18067656250*p**4*q**4*r**4*s**2 + 60531875000*p*q**6*r**4*s**2 + 6827725000*p**8*r**5*s**2 + 57157000000*p**5*q**2*r**5*s**2 - 75844531250*p**2*q**4*r**5*s**2 - 24452500000*p**6*r**6*s**2 - 144950000000*p**3*q**2*r**6*s**2 - 82109375000*q**4*r**6*s**2 + 46950000000*p**4*r**7*s**2 + 60000000000*p*q**2*r**7*s**2 - 36000000000*p**2*r**8*s**2 + 1549125*p**14*q*s**3 + 51873750*p**11*q**3*s**3 + 599781250*p**8*q**5*s**3 + 2421156250*p**5*q**7*s**3 - 1693515625*p**2*q**9*s**3 - 104884875*p**12*q*r*s**3 - 1937437500*p**9*q**3*r*s**3 - 11461053125*p**6*q**5*r*s**3 + 10299375000*p**3*q**7*r*s**3 + 10551250000*q**9*r*s**3 + 1336263750*p**10*q*r**2*s**3 + 23737250000*p**7*q**3*r**2*s**3 + 57136718750*p**4*q**5*r**2*s**3 - 8288906250*p*q**7*r**2*s**3 - 10907218750*p**8*q*r**3*s**3 - 160615000000*p**5*q**3*r**3*s**3 - 111134687500*p**2*q**5*r**3*s**3 + 46743125000*p**6*q*r**4*s**3 + 570509375000*p**3*q**3*r**4*s**3 + 274839843750*q**5*r**4*s**3 - 73312500000*p**4*q*r**5*s**3 - 145437500000*p*q**3*r**5*s**3 + 8750000000*p**2*q*r**6*s**3 + 180000000000*q*r**7*s**3 + 15946875*p**13*s**4 + 1265625*p**10*q**2*s**4 - 3282343750*p**7*q**4*s**4 - 38241406250*p**4*q**6*s**4 - 40136718750*p*q**8*s**4 - 113146875*p**11*r*s**4 - 2302734375*p**8*q**2*r*s**4 + 68450156250*p**5*q**4*r*s**4 + 177376562500*p**2*q**6*r*s**4 + 3164062500*p**9*r**2*s**4 + 14392890625*p**6*q**2*r**2*s**4 - 543781250000*p**3*q**4*r**2*s**4 - 319769531250*q**6*r**2*s**4 - 21048281250*p**7*r**3*s**4 - 240687500000*p**4*q**2*r**3*s**4 - 228164062500*p*q**4*r**3*s**4 + 23062500000*p**5*r**4*s**4 + 300410156250*p**2*q**2*r**4*s**4 + 93437500000*p**3*r**5*s**4 - 1141015625000*q**2*r**5*s**4 - 187500000000*p*r**6*s**4 + 1761328125*p**9*q*s**5 - 3177734375*p**6*q**3*s**5 + 60019531250*p**3*q**5*s**5 + 108398437500*q**7*s**5 + 24106640625*p**7*q*r*s**5 + 429589843750*p**4*q**3*r*s**5 + 410371093750*p*q**5*r*s**5 - 23582031250*p**5*q*r**2*s**5 + 202441406250*p**2*q**3*r**2*s**5 - 383203125000*p**3*q*r**3*s**5 + 2232910156250*q**3*r**3*s**5 + 1500000000000*p*q*r**4*s**5 - 13710937500*p**8*s**6 - 202832031250*p**5*q**2*s**6 - 531738281250*p**2*q**4*s**6 + 73330078125*p**6*r*s**6 - 3906250000*p**3*q**2*r*s**6 - 1275878906250*q**4*r*s**6 - 121093750000*p**4*r**2*s**6 - 3308593750000*p*q**2*r**2*s**6 + 18066406250*p**2*r**3*s**6 - 244140625000*r**4*s**6 + 327148437500*p**4*q*s**7 + 1672363281250*p*q**3*s**7 + 446777343750*p**2*q*r*s**7 + 1232910156250*q*r**2*s**7 - 274658203125*p**3*s**8 - 1068115234375*q**2*s**8 - 61035156250*p*r*s**8
b[4][3] = 200*p**9*q**8 + 7550*p**6*q**10 + 78650*p**3*q**12 + 248400*q**14 - 4800*p**10*q**6*r - 164300*p**7*q**8*r - 1709575*p**4*q**10*r - 5566500*p*q**12*r + 31050*p**11*q**4*r**2 + 1116175*p**8*q**6*r**2 + 12674650*p**5*q**8*r**2 + 45333850*p**2*q**10*r**2 - 60750*p**12*q**2*r**3 - 2872725*p**9*q**4*r**3 - 40403050*p**6*q**6*r**3 - 173564375*p**3*q**8*r**3 - 11242250*q**10*r**3 + 2174100*p**10*q**2*r**4 + 54010000*p**7*q**4*r**4 + 331074875*p**4*q**6*r**4 + 114173750*p*q**8*r**4 - 24858500*p**8*q**2*r**5 - 300875000*p**5*q**4*r**5 - 319430625*p**2*q**6*r**5 + 69810000*p**6*q**2*r**6 - 23900000*p**3*q**4*r**6 - 294662500*q**6*r**6 + 524200000*p**4*q**2*r**7 + 1432000000*p*q**4*r**7 - 2340000000*p**2*q**2*r**8 + 5400*p**11*q**5*s + 310400*p**8*q**7*s + 3591725*p**5*q**9*s + 11556750*p**2*q**11*s - 105300*p**12*q**3*r*s - 4234650*p**9*q**5*r*s - 49928875*p**6*q**7*r*s - 174078125*p**3*q**9*r*s + 18000000*q**11*r*s + 364500*p**13*q*r**2*s + 15763050*p**10*q**3*r**2*s + 220187400*p**7*q**5*r**2*s + 929609375*p**4*q**7*r**2*s - 43653125*p*q**9*r**2*s - 13427100*p**11*q*r**3*s - 346066250*p**8*q**3*r**3*s - 2287673375*p**5*q**5*r**3*s - 1403903125*p**2*q**7*r**3*s + 184586000*p**9*q*r**4*s + 2983460000*p**6*q**3*r**4*s + 8725818750*p**3*q**5*r**4*s + 2527734375*q**7*r**4*s - 1284480000*p**7*q*r**5*s - 13138250000*p**4*q**3*r**5*s - 14001625000*p*q**5*r**5*s + 4224800000*p**5*q*r**6*s + 27460000000*p**2*q**3*r**6*s - 3760000000*p**3*q*r**7*s + 3900000000*q**3*r**7*s + 36450*p**13*q**2*s**2 + 2765475*p**10*q**4*s**2 + 34027625*p**7*q**6*s**2 + 97375000*p**4*q**8*s**2 - 88275000*p*q**10*s**2 - 546750*p**14*r*s**2 - 21961125*p**11*q**2*r*s**2 - 273059375*p**8*q**4*r*s**2 - 761562500*p**5*q**6*r*s**2 + 1869656250*p**2*q**8*r*s**2 + 20545650*p**12*r**2*s**2 + 473934375*p**9*q**2*r**2*s**2 + 1758053125*p**6*q**4*r**2*s**2 - 8743359375*p**3*q**6*r**2*s**2 - 4154375000*q**8*r**2*s**2 - 296559000*p**10*r**3*s**2 - 4065056250*p**7*q**2*r**3*s**2 - 186328125*p**4*q**4*r**3*s**2 + 19419453125*p*q**6*r**3*s**2 + 2326262500*p**8*r**4*s**2 + 21189375000*p**5*q**2*r**4*s**2 - 26301953125*p**2*q**4*r**4*s**2 - 10513250000*p**6*r**5*s**2 - 69937500000*p**3*q**2*r**5*s**2 - 42257812500*q**4*r**5*s**2 + 23375000000*p**4*r**6*s**2 + 40750000000*p*q**2*r**6*s**2 - 19500000000*p**2*r**7*s**2 + 4009500*p**12*q*s**3 + 36140625*p**9*q**3*s**3 - 335459375*p**6*q**5*s**3 - 2695312500*p**3*q**7*s**3 - 1486250000*q**9*s**3 + 102515625*p**10*q*r*s**3 + 4006812500*p**7*q**3*r*s**3 + 27589609375*p**4*q**5*r*s**3 + 20195312500*p*q**7*r*s**3 - 2792812500*p**8*q*r**2*s**3 - 44115156250*p**5*q**3*r**2*s**3 - 72609453125*p**2*q**5*r**2*s**3 + 18752500000*p**6*q*r**3*s**3 + 218140625000*p**3*q**3*r**3*s**3 + 109940234375*q**5*r**3*s**3 - 21893750000*p**4*q*r**4*s**3 - 65187500000*p*q**3*r**4*s**3 - 31000000000*p**2*q*r**5*s**3 + 97500000000*q*r**6*s**3 - 86568750*p**11*s**4 - 1955390625*p**8*q**2*s**4 - 8960781250*p**5*q**4*s**4 - 1357812500*p**2*q**6*s**4 + 1657968750*p**9*r*s**4 + 10467187500*p**6*q**2*r*s**4 - 55292968750*p**3*q**4*r*s**4 - 60683593750*q**6*r*s**4 - 11473593750*p**7*r**2*s**4 - 123281250000*p**4*q**2*r**2*s**4 - 164912109375*p*q**4*r**2*s**4 + 13150000000*p**5*r**3*s**4 + 190751953125*p**2*q**2*r**3*s**4 + 61875000000*p**3*r**4*s**4 - 467773437500*q**2*r**4*s**4 - 118750000000*p*r**5*s**4 + 7583203125*p**7*q*s**5 + 54638671875*p**4*q**3*s**5 + 39423828125*p*q**5*s**5 + 32392578125*p**5*q*r*s**5 + 278515625000*p**2*q**3*r*s**5 - 298339843750*p**3*q*r**2*s**5 + 560791015625*q**3*r**2*s**5 + 720703125000*p*q*r**3*s**5 - 19687500000*p**6*s**6 - 159667968750*p**3*q**2*s**6 - 72265625000*q**4*s**6 + 116699218750*p**4*r*s**6 - 924072265625*p*q**2*r*s**6 - 156005859375*p**2*r**2*s**6 - 112304687500*r**3*s**6 + 349121093750*p**2*q*s**7 + 396728515625*q*r*s**7 - 213623046875*p*s**8
b[4][2] = -600*p**10*q**6 - 18450*p**7*q**8 - 174000*p**4*q**10 - 518400*p*q**12 + 5400*p**11*q**4*r + 197550*p**8*q**6*r + 2147775*p**5*q**8*r + 7219800*p**2*q**10*r - 12150*p**12*q**2*r**2 - 662200*p**9*q**4*r**2 - 9274775*p**6*q**6*r**2 - 38330625*p**3*q**8*r**2 - 5508000*q**10*r**2 + 656550*p**10*q**2*r**3 + 16233750*p**7*q**4*r**3 + 97335875*p**4*q**6*r**3 + 58271250*p*q**8*r**3 - 9845500*p**8*q**2*r**4 - 119464375*p**5*q**4*r**4 - 194431875*p**2*q**6*r**4 + 49465000*p**6*q**2*r**5 + 166000000*p**3*q**4*r**5 - 80793750*q**6*r**5 + 54400000*p**4*q**2*r**6 + 377750000*p*q**4*r**6 - 630000000*p**2*q**2*r**7 - 16200*p**12*q**3*s - 459300*p**9*q**5*s - 4207225*p**6*q**7*s - 10827500*p**3*q**9*s + 13635000*q**11*s + 72900*p**13*q*r*s + 2877300*p**10*q**3*r*s + 33239700*p**7*q**5*r*s + 107080625*p**4*q**7*r*s - 114975000*p*q**9*r*s - 3601800*p**11*q*r**2*s - 75214375*p**8*q**3*r**2*s - 387073250*p**5*q**5*r**2*s + 55540625*p**2*q**7*r**2*s + 53793000*p**9*q*r**3*s + 687176875*p**6*q**3*r**3*s + 1670018750*p**3*q**5*r**3*s + 665234375*q**7*r**3*s - 391570000*p**7*q*r**4*s - 3420125000*p**4*q**3*r**4*s - 3609625000*p*q**5*r**4*s + 1365600000*p**5*q*r**5*s + 7236250000*p**2*q**3*r**5*s - 1220000000*p**3*q*r**6*s + 1050000000*q**3*r**6*s - 109350*p**14*s**2 - 3065850*p**11*q**2*s**2 - 26908125*p**8*q**4*s**2 - 44606875*p**5*q**6*s**2 + 269812500*p**2*q**8*s**2 + 5200200*p**12*r*s**2 + 81826875*p**9*q**2*r*s**2 + 155378125*p**6*q**4*r*s**2 - 1936203125*p**3*q**6*r*s**2 - 998437500*q**8*r*s**2 - 77145750*p**10*r**2*s**2 - 745528125*p**7*q**2*r**2*s**2 + 683437500*p**4*q**4*r**2*s**2 + 4083359375*p*q**6*r**2*s**2 + 593287500*p**8*r**3*s**2 + 4799375000*p**5*q**2*r**3*s**2 - 4167578125*p**2*q**4*r**3*s**2 - 2731125000*p**6*r**4*s**2 - 18668750000*p**3*q**2*r**4*s**2 - 10480468750*q**4*r**4*s**2 + 6200000000*p**4*r**5*s**2 + 11750000000*p*q**2*r**5*s**2 - 5250000000*p**2*r**6*s**2 + 26527500*p**10*q*s**3 + 526031250*p**7*q**3*s**3 + 3160703125*p**4*q**5*s**3 + 2650312500*p*q**7*s**3 - 448031250*p**8*q*r*s**3 - 6682968750*p**5*q**3*r*s**3 - 11642812500*p**2*q**5*r*s**3 + 2553203125*p**6*q*r**2*s**3 + 37234375000*p**3*q**3*r**2*s**3 + 21871484375*q**5*r**2*s**3 + 2803125000*p**4*q*r**3*s**3 - 10796875000*p*q**3*r**3*s**3 - 16656250000*p**2*q*r**4*s**3 + 26250000000*q*r**5*s**3 - 75937500*p**9*s**4 - 704062500*p**6*q**2*s**4 - 8363281250*p**3*q**4*s**4 - 10398437500*q**6*s**4 + 197578125*p**7*r*s**4 - 16441406250*p**4*q**2*r*s**4 - 24277343750*p*q**4*r*s**4 - 5716015625*p**5*r**2*s**4 + 31728515625*p**2*q**2*r**2*s**4 + 27031250000*p**3*r**3*s**4 - 92285156250*q**2*r**3*s**4 - 33593750000*p*r**4*s**4 + 10394531250*p**5*q*s**5 + 38037109375*p**2*q**3*s**5 - 48144531250*p**3*q*r*s**5 + 74462890625*q**3*r*s**5 + 121093750000*p*q*r**2*s**5 - 2197265625*p**4*s**6 - 92529296875*p*q**2*s**6 + 15380859375*p**2*r*s**6 - 31738281250*r**2*s**6 + 54931640625*q*s**7
b[4][1] = 200*p**8*q**6 + 2950*p**5*q**8 + 10800*p**2*q**10 - 1800*p**9*q**4*r - 49650*p**6*q**6*r - 403375*p**3*q**8*r - 999000*q**10*r + 4050*p**10*q**2*r**2 + 236625*p**7*q**4*r**2 + 3109500*p**4*q**6*r**2 + 11463750*p*q**8*r**2 - 331500*p**8*q**2*r**3 - 7818125*p**5*q**4*r**3 - 41411250*p**2*q**6*r**3 + 4782500*p**6*q**2*r**4 + 47475000*p**3*q**4*r**4 - 16728125*q**6*r**4 - 8700000*p**4*q**2*r**5 + 81750000*p*q**4*r**5 - 135000000*p**2*q**2*r**6 + 5400*p**10*q**3*s + 144200*p**7*q**5*s + 939375*p**4*q**7*s + 1012500*p*q**9*s - 24300*p**11*q*r*s - 1169250*p**8*q**3*r*s - 14027250*p**5*q**5*r*s - 44446875*p**2*q**7*r*s + 2011500*p**9*q*r**2*s + 49330625*p**6*q**3*r**2*s + 272009375*p**3*q**5*r**2*s + 104062500*q**7*r**2*s - 34660000*p**7*q*r**3*s - 455062500*p**4*q**3*r**3*s - 625906250*p*q**5*r**3*s + 210200000*p**5*q*r**4*s + 1298750000*p**2*q**3*r**4*s - 240000000*p**3*q*r**5*s + 225000000*q**3*r**5*s + 36450*p**12*s**2 + 1231875*p**9*q**2*s**2 + 10712500*p**6*q**4*s**2 + 21718750*p**3*q**6*s**2 + 16875000*q**8*s**2 - 2814750*p**10*r*s**2 - 67612500*p**7*q**2*r*s**2 - 345156250*p**4*q**4*r*s**2 - 283125000*p*q**6*r*s**2 + 51300000*p**8*r**2*s**2 + 734531250*p**5*q**2*r**2*s**2 + 1267187500*p**2*q**4*r**2*s**2 - 384312500*p**6*r**3*s**2 - 3912500000*p**3*q**2*r**3*s**2 - 1822265625*q**4*r**3*s**2 + 1112500000*p**4*r**4*s**2 + 2437500000*p*q**2*r**4*s**2 - 1125000000*p**2*r**5*s**2 - 72578125*p**5*q**3*s**3 - 189296875*p**2*q**5*s**3 + 127265625*p**6*q*r*s**3 + 1415625000*p**3*q**3*r*s**3 + 1229687500*q**5*r*s**3 + 1448437500*p**4*q*r**2*s**3 + 2218750000*p*q**3*r**2*s**3 - 4031250000*p**2*q*r**3*s**3 + 5625000000*q*r**4*s**3 - 132890625*p**7*s**4 - 529296875*p**4*q**2*s**4 - 175781250*p*q**4*s**4 - 401953125*p**5*r*s**4 - 4482421875*p**2*q**2*r*s**4 + 4140625000*p**3*r**2*s**4 - 10498046875*q**2*r**2*s**4 - 7031250000*p*r**3*s**4 + 1220703125*p**3*q*s**5 + 1953125000*q**3*s**5 + 14160156250*p*q*r*s**5 - 1708984375*p**2*s**6 - 3662109375*r*s**6
b[4][0] = -4600*p**6*q**6 - 67850*p**3*q**8 - 248400*q**10 + 38900*p**7*q**4*r + 679575*p**4*q**6*r + 2866500*p*q**8*r - 81900*p**8*q**2*r**2 - 2009750*p**5*q**4*r**2 - 10783750*p**2*q**6*r**2 + 1478750*p**6*q**2*r**3 + 14165625*p**3*q**4*r**3 - 2743750*q**6*r**3 - 5450000*p**4*q**2*r**4 + 12687500*p*q**4*r**4 - 22500000*p**2*q**2*r**5 - 101700*p**8*q**3*s - 1700975*p**5*q**5*s - 7061250*p**2*q**7*s + 423900*p**9*q*r*s + 9292375*p**6*q**3*r*s + 50438750*p**3*q**5*r*s + 20475000*q**7*r*s - 7852500*p**7*q*r**2*s - 87765625*p**4*q**3*r**2*s - 121609375*p*q**5*r**2*s + 47700000*p**5*q*r**3*s + 264687500*p**2*q**3*r**3*s - 65000000*p**3*q*r**4*s + 37500000*q**3*r**4*s - 534600*p**10*s**2 - 10344375*p**7*q**2*s**2 - 54859375*p**4*q**4*s**2 - 40312500*p*q**6*s**2 + 10158750*p**8*r*s**2 + 117778125*p**5*q**2*r*s**2 + 192421875*p**2*q**4*r*s**2 - 70593750*p**6*r**2*s**2 - 685312500*p**3*q**2*r**2*s**2 - 334375000*q**4*r**2*s**2 + 193750000*p**4*r**3*s**2 + 500000000*p*q**2*r**3*s**2 - 187500000*p**2*r**4*s**2 + 8437500*p**6*q*s**3 + 159218750*p**3*q**3*s**3 + 220625000*q**5*s**3 + 353828125*p**4*q*r*s**3 + 412500000*p*q**3*r*s**3 - 1023437500*p**2*q*r**2*s**3 + 937500000*q*r**3*s**3 - 206015625*p**5*s**4 - 701171875*p**2*q**2*s**4 + 998046875*p**3*r*s**4 - 1308593750*q**2*r*s**4 - 1367187500*p*r**2*s**4 + 1708984375*p*q*s**5 - 976562500*s**6
return b
@property
def o(self):
p, q, r, s = self.p, self.q, self.r, self.s
o = [0]*6
o[5] = -1600*p**10*q**10 - 23600*p**7*q**12 - 86400*p**4*q**14 + 24800*p**11*q**8*r + 419200*p**8*q**10*r + 1850450*p**5*q**12*r + 896400*p**2*q**14*r - 138800*p**12*q**6*r**2 - 2921900*p**9*q**8*r**2 - 17295200*p**6*q**10*r**2 - 27127750*p**3*q**12*r**2 - 26076600*q**14*r**2 + 325800*p**13*q**4*r**3 + 9993850*p**10*q**6*r**3 + 88010500*p**7*q**8*r**3 + 274047650*p**4*q**10*r**3 + 410171400*p*q**12*r**3 - 259200*p**14*q**2*r**4 - 17147100*p**11*q**4*r**4 - 254289150*p**8*q**6*r**4 - 1318548225*p**5*q**8*r**4 - 2633598475*p**2*q**10*r**4 + 12636000*p**12*q**2*r**5 + 388911000*p**9*q**4*r**5 + 3269704725*p**6*q**6*r**5 + 8791192300*p**3*q**8*r**5 + 93560575*q**10*r**5 - 228361600*p**10*q**2*r**6 - 3951199200*p**7*q**4*r**6 - 16276981100*p**4*q**6*r**6 - 1597227000*p*q**8*r**6 + 1947899200*p**8*q**2*r**7 + 17037648000*p**5*q**4*r**7 + 8919740000*p**2*q**6*r**7 - 7672160000*p**6*q**2*r**8 - 15496000000*p**3*q**4*r**8 + 4224000000*q**6*r**8 + 9968000000*p**4*q**2*r**9 - 8640000000*p*q**4*r**9 + 4800000000*p**2*q**2*r**10 - 55200*p**12*q**7*s - 685600*p**9*q**9*s + 1028250*p**6*q**11*s + 37650000*p**3*q**13*s + 111375000*q**15*s + 583200*p**13*q**5*r*s + 9075600*p**10*q**7*r*s - 883150*p**7*q**9*r*s - 506830750*p**4*q**11*r*s - 1793137500*p*q**13*r*s - 1852200*p**14*q**3*r**2*s - 41435250*p**11*q**5*r**2*s - 80566700*p**8*q**7*r**2*s + 2485673600*p**5*q**9*r**2*s + 11442286125*p**2*q**11*r**2*s + 1555200*p**15*q*r**3*s + 80846100*p**12*q**3*r**3*s + 564906800*p**9*q**5*r**3*s - 4493012400*p**6*q**7*r**3*s - 35492391250*p**3*q**9*r**3*s - 789931875*q**11*r**3*s - 71766000*p**13*q*r**4*s - 1551149200*p**10*q**3*r**4*s - 1773437900*p**7*q**5*r**4*s + 51957593125*p**4*q**7*r**4*s + 14964765625*p*q**9*r**4*s + 1231569600*p**11*q*r**5*s + 12042977600*p**8*q**3*r**5*s - 27151011200*p**5*q**5*r**5*s - 88080610000*p**2*q**7*r**5*s - 9912995200*p**9*q*r**6*s - 29448104000*p**6*q**3*r**6*s + 144954840000*p**3*q**5*r**6*s - 44601300000*q**7*r**6*s + 35453760000*p**7*q*r**7*s - 63264000000*p**4*q**3*r**7*s + 60544000000*p*q**5*r**7*s - 30048000000*p**5*q*r**8*s + 37040000000*p**2*q**3*r**8*s - 60800000000*p**3*q*r**9*s - 48000000000*q**3*r**9*s - 615600*p**14*q**4*s**2 - 10524500*p**11*q**6*s**2 - 33831250*p**8*q**8*s**2 + 222806250*p**5*q**10*s**2 + 1099687500*p**2*q**12*s**2 + 3353400*p**15*q**2*r*s**2 + 74269350*p**12*q**4*r*s**2 + 276445750*p**9*q**6*r*s**2 - 2618600000*p**6*q**8*r*s**2 - 14473243750*p**3*q**10*r*s**2 + 1383750000*q**12*r*s**2 - 2332800*p**16*r**2*s**2 - 132750900*p**13*q**2*r**2*s**2 - 900775150*p**10*q**4*r**2*s**2 + 8249244500*p**7*q**6*r**2*s**2 + 59525796875*p**4*q**8*r**2*s**2 - 40292868750*p*q**10*r**2*s**2 + 128304000*p**14*r**3*s**2 + 3160232100*p**11*q**2*r**3*s**2 + 8329580000*p**8*q**4*r**3*s**2 - 45558458750*p**5*q**6*r**3*s**2 + 297252890625*p**2*q**8*r**3*s**2 - 2769854400*p**12*r**4*s**2 - 37065970000*p**9*q**2*r**4*s**2 - 90812546875*p**6*q**4*r**4*s**2 - 627902000000*p**3*q**6*r**4*s**2 + 181347421875*q**8*r**4*s**2 + 30946932800*p**10*r**5*s**2 + 249954680000*p**7*q**2*r**5*s**2 + 802954812500*p**4*q**4*r**5*s**2 - 80900000000*p*q**6*r**5*s**2 - 192137320000*p**8*r**6*s**2 - 932641600000*p**5*q**2*r**6*s**2 - 943242500000*p**2*q**4*r**6*s**2 + 658412000000*p**6*r**7*s**2 + 1930720000000*p**3*q**2*r**7*s**2 + 593800000000*q**4*r**7*s**2 - 1162800000000*p**4*r**8*s**2 - 280000000000*p*q**2*r**8*s**2 + 840000000000*p**2*r**9*s**2 - 2187000*p**16*q*s**3 - 47418750*p**13*q**3*s**3 - 180618750*p**10*q**5*s**3 + 2231250000*p**7*q**7*s**3 + 17857734375*p**4*q**9*s**3 + 29882812500*p*q**11*s**3 + 24664500*p**14*q*r*s**3 - 853368750*p**11*q**3*r*s**3 - 25939693750*p**8*q**5*r*s**3 - 177541562500*p**5*q**7*r*s**3 - 297978828125*p**2*q**9*r*s**3 - 153468000*p**12*q*r**2*s**3 + 30188125000*p**9*q**3*r**2*s**3 + 344049821875*p**6*q**5*r**2*s**3 + 534026875000*p**3*q**7*r**2*s**3 - 340726484375*q**9*r**2*s**3 - 9056190000*p**10*q*r**3*s**3 - 322314687500*p**7*q**3*r**3*s**3 - 769632109375*p**4*q**5*r**3*s**3 - 83276875000*p*q**7*r**3*s**3 + 164061000000*p**8*q*r**4*s**3 + 1381358750000*p**5*q**3*r**4*s**3 + 3088020000000*p**2*q**5*r**4*s**3 - 1267655000000*p**6*q*r**5*s**3 - 7642630000000*p**3*q**3*r**5*s**3 - 2759877500000*q**5*r**5*s**3 + 4597760000000*p**4*q*r**6*s**3 + 1846200000000*p*q**3*r**6*s**3 - 7006000000000*p**2*q*r**7*s**3 - 1200000000000*q*r**8*s**3 + 18225000*p**15*s**4 + 1328906250*p**12*q**2*s**4 + 24729140625*p**9*q**4*s**4 + 169467187500*p**6*q**6*s**4 + 413281250000*p**3*q**8*s**4 + 223828125000*q**10*s**4 + 710775000*p**13*r*s**4 - 18611015625*p**10*q**2*r*s**4 - 314344375000*p**7*q**4*r*s**4 - 828439843750*p**4*q**6*r*s**4 + 460937500000*p*q**8*r*s**4 - 25674975000*p**11*r**2*s**4 - 52223515625*p**8*q**2*r**2*s**4 - 387160000000*p**5*q**4*r**2*s**4 - 4733680078125*p**2*q**6*r**2*s**4 + 343911875000*p**9*r**3*s**4 + 3328658359375*p**6*q**2*r**3*s**4 + 16532406250000*p**3*q**4*r**3*s**4 + 5980613281250*q**6*r**3*s**4 - 2295497500000*p**7*r**4*s**4 - 14809820312500*p**4*q**2*r**4*s**4 - 6491406250000*p*q**4*r**4*s**4 + 7768470000000*p**5*r**5*s**4 + 34192562500000*p**2*q**2*r**5*s**4 - 11859000000000*p**3*r**6*s**4 + 10530000000000*q**2*r**6*s**4 + 6000000000000*p*r**7*s**4 + 11453906250*p**11*q*s**5 + 149765625000*p**8*q**3*s**5 + 545537109375*p**5*q**5*s**5 + 527343750000*p**2*q**7*s**5 - 371313281250*p**9*q*r*s**5 - 3461455078125*p**6*q**3*r*s**5 - 7920878906250*p**3*q**5*r*s**5 - 4747314453125*q**7*r*s**5 + 2417815625000*p**7*q*r**2*s**5 + 5465576171875*p**4*q**3*r**2*s**5 + 5937128906250*p*q**5*r**2*s**5 - 10661156250000*p**5*q*r**3*s**5 - 63574218750000*p**2*q**3*r**3*s**5 + 24059375000000*p**3*q*r**4*s**5 - 33023437500000*q**3*r**4*s**5 - 43125000000000*p*q*r**5*s**5 + 94394531250*p**10*s**6 + 1097167968750*p**7*q**2*s**6 + 2829833984375*p**4*q**4*s**6 - 1525878906250*p*q**6*s**6 + 2724609375*p**8*r*s**6 + 13998535156250*p**5*q**2*r*s**6 + 57094482421875*p**2*q**4*r*s**6 - 8512509765625*p**6*r**2*s**6 - 37941406250000*p**3*q**2*r**2*s**6 + 33191894531250*q**4*r**2*s**6 + 50534179687500*p**4*r**3*s**6 + 156656250000000*p*q**2*r**3*s**6 - 85023437500000*p**2*r**4*s**6 + 10125000000000*r**5*s**6 - 2717285156250*p**6*q*s**7 - 11352539062500*p**3*q**3*s**7 - 2593994140625*q**5*s**7 - 47154541015625*p**4*q*r*s**7 - 160644531250000*p*q**3*r*s**7 + 142500000000000*p**2*q*r**2*s**7 - 26757812500000*q*r**3*s**7 - 4364013671875*p**5*s**8 - 94604492187500*p**2*q**2*s**8 + 114379882812500*p**3*r*s**8 + 51116943359375*q**2*r*s**8 - 346435546875000*p*r**2*s**8 + 476837158203125*p*q*s**9 - 476837158203125*s**10
o[4] = 1600*p**11*q**8 + 20800*p**8*q**10 + 45100*p**5*q**12 - 151200*p**2*q**14 - 19200*p**12*q**6*r - 293200*p**9*q**8*r - 794600*p**6*q**10*r + 2634675*p**3*q**12*r + 2640600*q**14*r + 75600*p**13*q**4*r**2 + 1529100*p**10*q**6*r**2 + 6233350*p**7*q**8*r**2 - 12013350*p**4*q**10*r**2 - 29069550*p*q**12*r**2 - 97200*p**14*q**2*r**3 - 3562500*p**11*q**4*r**3 - 26984900*p**8*q**6*r**3 - 15900325*p**5*q**8*r**3 + 76267100*p**2*q**10*r**3 + 3272400*p**12*q**2*r**4 + 59486850*p**9*q**4*r**4 + 221270075*p**6*q**6*r**4 + 74065250*p**3*q**8*r**4 - 300564375*q**10*r**4 - 45569400*p**10*q**2*r**5 - 438666000*p**7*q**4*r**5 - 444821250*p**4*q**6*r**5 + 2448256250*p*q**8*r**5 + 290640000*p**8*q**2*r**6 + 855850000*p**5*q**4*r**6 - 5741875000*p**2*q**6*r**6 - 644000000*p**6*q**2*r**7 + 5574000000*p**3*q**4*r**7 + 4643000000*q**6*r**7 - 1696000000*p**4*q**2*r**8 - 12660000000*p*q**4*r**8 + 7200000000*p**2*q**2*r**9 + 43200*p**13*q**5*s + 572000*p**10*q**7*s - 59800*p**7*q**9*s - 24174625*p**4*q**11*s - 74587500*p*q**13*s - 324000*p**14*q**3*r*s - 5531400*p**11*q**5*r*s - 3712100*p**8*q**7*r*s + 293009275*p**5*q**9*r*s + 1115548875*p**2*q**11*r*s + 583200*p**15*q*r**2*s + 18343800*p**12*q**3*r**2*s + 77911100*p**9*q**5*r**2*s - 957488825*p**6*q**7*r**2*s - 5449661250*p**3*q**9*r**2*s + 960120000*q**11*r**2*s - 23684400*p**13*q*r**3*s - 373761900*p**10*q**3*r**3*s - 27944975*p**7*q**5*r**3*s + 10375740625*p**4*q**7*r**3*s - 4649093750*p*q**9*r**3*s + 395816400*p**11*q*r**4*s + 2910968000*p**8*q**3*r**4*s - 9126162500*p**5*q**5*r**4*s - 11696118750*p**2*q**7*r**4*s - 3028640000*p**9*q*r**5*s - 3251550000*p**6*q**3*r**5*s + 47914250000*p**3*q**5*r**5*s - 30255625000*q**7*r**5*s + 9304000000*p**7*q*r**6*s - 42970000000*p**4*q**3*r**6*s + 31475000000*p*q**5*r**6*s + 2176000000*p**5*q*r**7*s + 62100000000*p**2*q**3*r**7*s - 43200000000*p**3*q*r**8*s - 72000000000*q**3*r**8*s + 291600*p**15*q**2*s**2 + 2702700*p**12*q**4*s**2 - 38692250*p**9*q**6*s**2 - 538903125*p**6*q**8*s**2 - 1613112500*p**3*q**10*s**2 + 320625000*q**12*s**2 - 874800*p**16*r*s**2 - 14166900*p**13*q**2*r*s**2 + 193284900*p**10*q**4*r*s**2 + 3688520500*p**7*q**6*r*s**2 + 11613390625*p**4*q**8*r*s**2 - 15609881250*p*q**10*r*s**2 + 44031600*p**14*r**2*s**2 + 482345550*p**11*q**2*r**2*s**2 - 2020881875*p**8*q**4*r**2*s**2 - 7407026250*p**5*q**6*r**2*s**2 + 136175750000*p**2*q**8*r**2*s**2 - 1000884600*p**12*r**3*s**2 - 8888950000*p**9*q**2*r**3*s**2 - 30101703125*p**6*q**4*r**3*s**2 - 319761000000*p**3*q**6*r**3*s**2 + 51519218750*q**8*r**3*s**2 + 12622395000*p**10*r**4*s**2 + 97032450000*p**7*q**2*r**4*s**2 + 469929218750*p**4*q**4*r**4*s**2 + 291342187500*p*q**6*r**4*s**2 - 96382000000*p**8*r**5*s**2 - 598070000000*p**5*q**2*r**5*s**2 - 1165021875000*p**2*q**4*r**5*s**2 + 446500000000*p**6*r**6*s**2 + 1651500000000*p**3*q**2*r**6*s**2 + 789375000000*q**4*r**6*s**2 - 1152000000000*p**4*r**7*s**2 - 600000000000*p*q**2*r**7*s**2 + 1260000000000*p**2*r**8*s**2 - 24786000*p**14*q*s**3 - 660487500*p**11*q**3*s**3 - 5886356250*p**8*q**5*s**3 - 18137187500*p**5*q**7*s**3 - 5120546875*p**2*q**9*s**3 + 827658000*p**12*q*r*s**3 + 13343062500*p**9*q**3*r*s**3 + 39782068750*p**6*q**5*r*s**3 - 111288437500*p**3*q**7*r*s**3 - 15438750000*q**9*r*s**3 - 14540782500*p**10*q*r**2*s**3 - 135889750000*p**7*q**3*r**2*s**3 - 176892578125*p**4*q**5*r**2*s**3 - 934462656250*p*q**7*r**2*s**3 + 171669250000*p**8*q*r**3*s**3 + 1164538125000*p**5*q**3*r**3*s**3 + 3192346406250*p**2*q**5*r**3*s**3 - 1295476250000*p**6*q*r**4*s**3 - 6540712500000*p**3*q**3*r**4*s**3 - 2957828125000*q**5*r**4*s**3 + 5366750000000*p**4*q*r**5*s**3 + 3165000000000*p*q**3*r**5*s**3 - 8862500000000*p**2*q*r**6*s**3 - 1800000000000*q*r**7*s**3 + 236925000*p**13*s**4 + 8895234375*p**10*q**2*s**4 + 106180781250*p**7*q**4*s**4 + 474221875000*p**4*q**6*s**4 + 616210937500*p*q**8*s**4 - 6995868750*p**11*r*s**4 - 184190625000*p**8*q**2*r*s**4 - 1299254453125*p**5*q**4*r*s**4 - 2475458593750*p**2*q**6*r*s**4 + 63049218750*p**9*r**2*s**4 + 1646791484375*p**6*q**2*r**2*s**4 + 9086886718750*p**3*q**4*r**2*s**4 + 4673421875000*q**6*r**2*s**4 - 215665000000*p**7*r**3*s**4 - 7864589843750*p**4*q**2*r**3*s**4 - 5987890625000*p*q**4*r**3*s**4 + 594843750000*p**5*r**4*s**4 + 27791171875000*p**2*q**2*r**4*s**4 - 3881250000000*p**3*r**5*s**4 + 12203125000000*q**2*r**5*s**4 + 10312500000000*p*r**6*s**4 - 34720312500*p**9*q*s**5 - 545126953125*p**6*q**3*s**5 - 2176425781250*p**3*q**5*s**5 - 2792968750000*q**7*s**5 - 1395703125*p**7*q*r*s**5 - 1957568359375*p**4*q**3*r*s**5 + 5122636718750*p*q**5*r*s**5 + 858210937500*p**5*q*r**2*s**5 - 42050097656250*p**2*q**3*r**2*s**5 + 7088281250000*p**3*q*r**3*s**5 - 25974609375000*q**3*r**3*s**5 - 69296875000000*p*q*r**4*s**5 + 384697265625*p**8*s**6 + 6403320312500*p**5*q**2*s**6 + 16742675781250*p**2*q**4*s**6 - 3467080078125*p**6*r*s**6 + 11009765625000*p**3*q**2*r*s**6 + 16451660156250*q**4*r*s**6 + 6979003906250*p**4*r**2*s**6 + 145403320312500*p*q**2*r**2*s**6 + 4076171875000*p**2*r**3*s**6 + 22265625000000*r**4*s**6 - 21915283203125*p**4*q*s**7 - 86608886718750*p*q**3*s**7 - 22785644531250*p**2*q*r*s**7 - 103466796875000*q*r**2*s**7 + 18798828125000*p**3*s**8 + 106048583984375*q**2*s**8 + 17761230468750*p*r*s**8
o[3] = 2800*p**9*q**8 + 55700*p**6*q**10 + 363600*p**3*q**12 + 777600*q**14 - 27200*p**10*q**6*r - 700200*p**7*q**8*r - 5726550*p**4*q**10*r - 15066000*p*q**12*r + 74700*p**11*q**4*r**2 + 2859575*p**8*q**6*r**2 + 31175725*p**5*q**8*r**2 + 103147650*p**2*q**10*r**2 - 40500*p**12*q**2*r**3 - 4274400*p**9*q**4*r**3 - 76065825*p**6*q**6*r**3 - 365623750*p**3*q**8*r**3 - 132264000*q**10*r**3 + 2192400*p**10*q**2*r**4 + 92562500*p**7*q**4*r**4 + 799193875*p**4*q**6*r**4 + 1188193125*p*q**8*r**4 - 41231500*p**8*q**2*r**5 - 914210000*p**5*q**4*r**5 - 3318853125*p**2*q**6*r**5 + 398850000*p**6*q**2*r**6 + 3944000000*p**3*q**4*r**6 + 2211312500*q**6*r**6 - 1817000000*p**4*q**2*r**7 - 6720000000*p*q**4*r**7 + 3900000000*p**2*q**2*r**8 + 75600*p**11*q**5*s + 1823100*p**8*q**7*s + 14534150*p**5*q**9*s + 38265750*p**2*q**11*s - 394200*p**12*q**3*r*s - 11453850*p**9*q**5*r*s - 101213000*p**6*q**7*r*s - 223565625*p**3*q**9*r*s + 415125000*q**11*r*s + 243000*p**13*q*r**2*s + 13654575*p**10*q**3*r**2*s + 163811725*p**7*q**5*r**2*s + 173461250*p**4*q**7*r**2*s - 3008671875*p*q**9*r**2*s - 2016900*p**11*q*r**3*s - 86576250*p**8*q**3*r**3*s - 324146625*p**5*q**5*r**3*s + 3378506250*p**2*q**7*r**3*s - 89211000*p**9*q*r**4*s - 55207500*p**6*q**3*r**4*s + 1493950000*p**3*q**5*r**4*s - 12573609375*q**7*r**4*s + 1140100000*p**7*q*r**5*s + 42500000*p**4*q**3*r**5*s + 21511250000*p*q**5*r**5*s - 4058000000*p**5*q*r**6*s + 6725000000*p**2*q**3*r**6*s - 1400000000*p**3*q*r**7*s - 39000000000*q**3*r**7*s + 510300*p**13*q**2*s**2 + 4814775*p**10*q**4*s**2 - 70265125*p**7*q**6*s**2 - 1016484375*p**4*q**8*s**2 - 3221100000*p*q**10*s**2 - 364500*p**14*r*s**2 + 30314250*p**11*q**2*r*s**2 + 1106765625*p**8*q**4*r*s**2 + 10984203125*p**5*q**6*r*s**2 + 33905812500*p**2*q**8*r*s**2 - 37980900*p**12*r**2*s**2 - 2142905625*p**9*q**2*r**2*s**2 - 26896125000*p**6*q**4*r**2*s**2 - 95551328125*p**3*q**6*r**2*s**2 + 11320312500*q**8*r**2*s**2 + 1743781500*p**10*r**3*s**2 + 35432262500*p**7*q**2*r**3*s**2 + 177855859375*p**4*q**4*r**3*s**2 + 121260546875*p*q**6*r**3*s**2 - 25943162500*p**8*r**4*s**2 - 249165500000*p**5*q**2*r**4*s**2 - 461739453125*p**2*q**4*r**4*s**2 + 177823750000*p**6*r**5*s**2 + 726225000000*p**3*q**2*r**5*s**2 + 404195312500*q**4*r**5*s**2 - 565875000000*p**4*r**6*s**2 - 407500000000*p*q**2*r**6*s**2 + 682500000000*p**2*r**7*s**2 - 59140125*p**12*q*s**3 - 1290515625*p**9*q**3*s**3 - 8785071875*p**6*q**5*s**3 - 15588281250*p**3*q**7*s**3 + 17505000000*q**9*s**3 + 896062500*p**10*q*r*s**3 + 2589750000*p**7*q**3*r*s**3 - 82700156250*p**4*q**5*r*s**3 - 347683593750*p*q**7*r*s**3 + 17022656250*p**8*q*r**2*s**3 + 320923593750*p**5*q**3*r**2*s**3 + 1042116875000*p**2*q**5*r**2*s**3 - 353262812500*p**6*q*r**3*s**3 - 2212664062500*p**3*q**3*r**3*s**3 - 1252408984375*q**5*r**3*s**3 + 1967362500000*p**4*q*r**4*s**3 + 1583343750000*p*q**3*r**4*s**3 - 3560625000000*p**2*q*r**5*s**3 - 975000000000*q*r**6*s**3 + 462459375*p**11*s**4 + 14210859375*p**8*q**2*s**4 + 99521718750*p**5*q**4*s**4 + 114955468750*p**2*q**6*s**4 - 17720859375*p**9*r*s**4 - 100320703125*p**6*q**2*r*s**4 + 1021943359375*p**3*q**4*r*s**4 + 1193203125000*q**6*r*s**4 + 171371250000*p**7*r**2*s**4 - 1113390625000*p**4*q**2*r**2*s**4 - 1211474609375*p*q**4*r**2*s**4 - 274056250000*p**5*r**3*s**4 + 8285166015625*p**2*q**2*r**3*s**4 - 2079375000000*p**3*r**4*s**4 + 5137304687500*q**2*r**4*s**4 + 6187500000000*p*r**5*s**4 - 135675000000*p**7*q*s**5 - 1275244140625*p**4*q**3*s**5 - 28388671875*p*q**5*s**5 + 1015166015625*p**5*q*r*s**5 - 10584423828125*p**2*q**3*r*s**5 + 3559570312500*p**3*q*r**2*s**5 - 6929931640625*q**3*r**2*s**5 - 32304687500000*p*q*r**3*s**5 + 430576171875*p**6*s**6 + 9397949218750*p**3*q**2*s**6 + 575195312500*q**4*s**6 - 4086425781250*p**4*r*s**6 + 42183837890625*p*q**2*r*s**6 + 8156494140625*p**2*r**2*s**6 + 12612304687500*r**3*s**6 - 25513916015625*p**2*q*s**7 - 37017822265625*q*r*s**7 + 18981933593750*p*s**8
o[2] = 1600*p**10*q**6 + 9200*p**7*q**8 - 126000*p**4*q**10 - 777600*p*q**12 - 14400*p**11*q**4*r - 119300*p**8*q**6*r + 1203225*p**5*q**8*r + 9412200*p**2*q**10*r + 32400*p**12*q**2*r**2 + 417950*p**9*q**4*r**2 - 4543725*p**6*q**6*r**2 - 49008125*p**3*q**8*r**2 - 24192000*q**10*r**2 - 292050*p**10*q**2*r**3 + 8760000*p**7*q**4*r**3 + 137506625*p**4*q**6*r**3 + 225438750*p*q**8*r**3 - 4213250*p**8*q**2*r**4 - 173595625*p**5*q**4*r**4 - 653003125*p**2*q**6*r**4 + 82575000*p**6*q**2*r**5 + 838125000*p**3*q**4*r**5 + 578562500*q**6*r**5 - 421500000*p**4*q**2*r**6 - 1796250000*p*q**4*r**6 + 1050000000*p**2*q**2*r**7 + 43200*p**12*q**3*s + 807300*p**9*q**5*s + 5328225*p**6*q**7*s + 16946250*p**3*q**9*s + 29565000*q**11*s - 194400*p**13*q*r*s - 5505300*p**10*q**3*r*s - 49886700*p**7*q**5*r*s - 178821875*p**4*q**7*r*s - 222750000*p*q**9*r*s + 6814800*p**11*q*r**2*s + 120525625*p**8*q**3*r**2*s + 526694500*p**5*q**5*r**2*s + 84065625*p**2*q**7*r**2*s - 123670500*p**9*q*r**3*s - 1106731875*p**6*q**3*r**3*s - 669556250*p**3*q**5*r**3*s - 2869265625*q**7*r**3*s + 1004350000*p**7*q*r**4*s + 3384375000*p**4*q**3*r**4*s + 5665625000*p*q**5*r**4*s - 3411000000*p**5*q*r**5*s - 418750000*p**2*q**3*r**5*s + 1700000000*p**3*q*r**6*s - 10500000000*q**3*r**6*s + 291600*p**14*s**2 + 9829350*p**11*q**2*s**2 + 114151875*p**8*q**4*s**2 + 522169375*p**5*q**6*s**2 + 716906250*p**2*q**8*s**2 - 18625950*p**12*r*s**2 - 387703125*p**9*q**2*r*s**2 - 2056109375*p**6*q**4*r*s**2 - 760203125*p**3*q**6*r*s**2 + 3071250000*q**8*r*s**2 + 512419500*p**10*r**2*s**2 + 5859053125*p**7*q**2*r**2*s**2 + 12154062500*p**4*q**4*r**2*s**2 + 15931640625*p*q**6*r**2*s**2 - 6598393750*p**8*r**3*s**2 - 43549625000*p**5*q**2*r**3*s**2 - 82011328125*p**2*q**4*r**3*s**2 + 43538125000*p**6*r**4*s**2 + 160831250000*p**3*q**2*r**4*s**2 + 99070312500*q**4*r**4*s**2 - 141812500000*p**4*r**5*s**2 - 117500000000*p*q**2*r**5*s**2 + 183750000000*p**2*r**6*s**2 - 154608750*p**10*q*s**3 - 3309468750*p**7*q**3*s**3 - 20834140625*p**4*q**5*s**3 - 34731562500*p*q**7*s**3 + 5970375000*p**8*q*r*s**3 + 68533281250*p**5*q**3*r*s**3 + 142698281250*p**2*q**5*r*s**3 - 74509140625*p**6*q*r**2*s**3 - 389148437500*p**3*q**3*r**2*s**3 - 270937890625*q**5*r**2*s**3 + 366696875000*p**4*q*r**3*s**3 + 400031250000*p*q**3*r**3*s**3 - 735156250000*p**2*q*r**4*s**3 - 262500000000*q*r**5*s**3 + 371250000*p**9*s**4 + 21315000000*p**6*q**2*s**4 + 179515625000*p**3*q**4*s**4 + 238406250000*q**6*s**4 - 9071015625*p**7*r*s**4 - 268945312500*p**4*q**2*r*s**4 - 379785156250*p*q**4*r*s**4 + 140262890625*p**5*r**2*s**4 + 1486259765625*p**2*q**2*r**2*s**4 - 806484375000*p**3*r**3*s**4 + 1066210937500*q**2*r**3*s**4 + 1722656250000*p*r**4*s**4 - 125648437500*p**5*q*s**5 - 1236279296875*p**2*q**3*s**5 + 1267871093750*p**3*q*r*s**5 - 1044677734375*q**3*r*s**5 - 6630859375000*p*q*r**2*s**5 + 160888671875*p**4*s**6 + 6352294921875*p*q**2*s**6 - 708740234375*p**2*r*s**6 + 3901367187500*r**2*s**6 - 8050537109375*q*s**7
o[1] = 2800*p**8*q**6 + 41300*p**5*q**8 + 151200*p**2*q**10 - 25200*p**9*q**4*r - 542600*p**6*q**6*r - 3397875*p**3*q**8*r - 5751000*q**10*r + 56700*p**10*q**2*r**2 + 1972125*p**7*q**4*r**2 + 18624250*p**4*q**6*r**2 + 50253750*p*q**8*r**2 - 1701000*p**8*q**2*r**3 - 32630625*p**5*q**4*r**3 - 139868750*p**2*q**6*r**3 + 18162500*p**6*q**2*r**4 + 177125000*p**3*q**4*r**4 + 121734375*q**6*r**4 - 100500000*p**4*q**2*r**5 - 386250000*p*q**4*r**5 + 225000000*p**2*q**2*r**6 + 75600*p**10*q**3*s + 1708800*p**7*q**5*s + 12836875*p**4*q**7*s + 32062500*p*q**9*s - 340200*p**11*q*r*s - 10185750*p**8*q**3*r*s - 97502750*p**5*q**5*r*s - 301640625*p**2*q**7*r*s + 7168500*p**9*q*r**2*s + 135960625*p**6*q**3*r**2*s + 587471875*p**3*q**5*r**2*s - 384750000*q**7*r**2*s - 29325000*p**7*q*r**3*s - 320625000*p**4*q**3*r**3*s + 523437500*p*q**5*r**3*s - 42000000*p**5*q*r**4*s + 343750000*p**2*q**3*r**4*s + 150000000*p**3*q*r**5*s - 2250000000*q**3*r**5*s + 510300*p**12*s**2 + 12808125*p**9*q**2*s**2 + 107062500*p**6*q**4*s**2 + 270312500*p**3*q**6*s**2 - 168750000*q**8*s**2 - 2551500*p**10*r*s**2 - 5062500*p**7*q**2*r*s**2 + 712343750*p**4*q**4*r*s**2 + 4788281250*p*q**6*r*s**2 - 256837500*p**8*r**2*s**2 - 3574812500*p**5*q**2*r**2*s**2 - 14967968750*p**2*q**4*r**2*s**2 + 4040937500*p**6*r**3*s**2 + 26400000000*p**3*q**2*r**3*s**2 + 17083984375*q**4*r**3*s**2 - 21812500000*p**4*r**4*s**2 - 24375000000*p*q**2*r**4*s**2 + 39375000000*p**2*r**5*s**2 - 127265625*p**5*q**3*s**3 - 680234375*p**2*q**5*s**3 - 2048203125*p**6*q*r*s**3 - 18794531250*p**3*q**3*r*s**3 - 25050000000*q**5*r*s**3 + 26621875000*p**4*q*r**2*s**3 + 37007812500*p*q**3*r**2*s**3 - 105468750000*p**2*q*r**3*s**3 - 56250000000*q*r**4*s**3 + 1124296875*p**7*s**4 + 9251953125*p**4*q**2*s**4 - 8007812500*p*q**4*s**4 - 4004296875*p**5*r*s**4 + 179931640625*p**2*q**2*r*s**4 - 75703125000*p**3*r**2*s**4 + 133447265625*q**2*r**2*s**4 + 363281250000*p*r**3*s**4 - 91552734375*p**3*q*s**5 - 19531250000*q**3*s**5 - 751953125000*p*q*r*s**5 + 157958984375*p**2*s**6 + 748291015625*r*s**6
o[0] = -14400*p**6*q**6 - 212400*p**3*q**8 - 777600*q**10 + 92100*p**7*q**4*r + 1689675*p**4*q**6*r + 7371000*p*q**8*r - 122850*p**8*q**2*r**2 - 3735250*p**5*q**4*r**2 - 22432500*p**2*q**6*r**2 + 2298750*p**6*q**2*r**3 + 29390625*p**3*q**4*r**3 + 18000000*q**6*r**3 - 17750000*p**4*q**2*r**4 - 62812500*p*q**4*r**4 + 37500000*p**2*q**2*r**5 - 51300*p**8*q**3*s - 768025*p**5*q**5*s - 2801250*p**2*q**7*s - 275400*p**9*q*r*s - 5479875*p**6*q**3*r*s - 35538750*p**3*q**5*r*s - 68850000*q**7*r*s + 12757500*p**7*q*r**2*s + 133640625*p**4*q**3*r**2*s + 222609375*p*q**5*r**2*s - 108500000*p**5*q*r**3*s - 290312500*p**2*q**3*r**3*s + 275000000*p**3*q*r**4*s - 375000000*q**3*r**4*s + 1931850*p**10*s**2 + 40213125*p**7*q**2*s**2 + 253921875*p**4*q**4*s**2 + 464062500*p*q**6*s**2 - 71077500*p**8*r*s**2 - 818746875*p**5*q**2*r*s**2 - 1882265625*p**2*q**4*r*s**2 + 826031250*p**6*r**2*s**2 + 4369687500*p**3*q**2*r**2*s**2 + 3107812500*q**4*r**2*s**2 - 3943750000*p**4*r**3*s**2 - 5000000000*p*q**2*r**3*s**2 + 6562500000*p**2*r**4*s**2 - 295312500*p**6*q*s**3 - 2938906250*p**3*q**3*s**3 - 4848750000*q**5*s**3 + 3791484375*p**4*q*r*s**3 + 7556250000*p*q**3*r*s**3 - 11960937500*p**2*q*r**2*s**3 - 9375000000*q*r**3*s**3 + 1668515625*p**5*s**4 + 20447265625*p**2*q**2*s**4 - 21955078125*p**3*r*s**4 + 18984375000*q**2*r*s**4 + 67382812500*p*r**2*s**4 - 120849609375*p*q*s**5 + 157226562500*s**6
return o
@property
def a(self):
p, q, r, s = self.p, self.q, self.r, self.s
a = [0]*6
a[5] = -100*p**7*q**7 - 2175*p**4*q**9 - 10500*p*q**11 + 1100*p**8*q**5*r + 27975*p**5*q**7*r + 152950*p**2*q**9*r - 4125*p**9*q**3*r**2 - 128875*p**6*q**5*r**2 - 830525*p**3*q**7*r**2 + 59450*q**9*r**2 + 5400*p**10*q*r**3 + 243800*p**7*q**3*r**3 + 2082650*p**4*q**5*r**3 - 333925*p*q**7*r**3 - 139200*p**8*q*r**4 - 2406000*p**5*q**3*r**4 - 122600*p**2*q**5*r**4 + 1254400*p**6*q*r**5 + 3776000*p**3*q**3*r**5 + 1832000*q**5*r**5 - 4736000*p**4*q*r**6 - 6720000*p*q**3*r**6 + 6400000*p**2*q*r**7 - 900*p**9*q**4*s - 37400*p**6*q**6*s - 281625*p**3*q**8*s - 435000*q**10*s + 6750*p**10*q**2*r*s + 322300*p**7*q**4*r*s + 2718575*p**4*q**6*r*s + 4214250*p*q**8*r*s - 16200*p**11*r**2*s - 859275*p**8*q**2*r**2*s - 8925475*p**5*q**4*r**2*s - 14427875*p**2*q**6*r**2*s + 453600*p**9*r**3*s + 10038400*p**6*q**2*r**3*s + 17397500*p**3*q**4*r**3*s - 11333125*q**6*r**3*s - 4451200*p**7*r**4*s - 15850000*p**4*q**2*r**4*s + 34000000*p*q**4*r**4*s + 17984000*p**5*r**5*s - 10000000*p**2*q**2*r**5*s - 25600000*p**3*r**6*s - 8000000*q**2*r**6*s + 6075*p**11*q*s**2 - 83250*p**8*q**3*s**2 - 1282500*p**5*q**5*s**2 - 2862500*p**2*q**7*s**2 + 724275*p**9*q*r*s**2 + 9807250*p**6*q**3*r*s**2 + 28374375*p**3*q**5*r*s**2 + 22212500*q**7*r*s**2 - 8982000*p**7*q*r**2*s**2 - 39600000*p**4*q**3*r**2*s**2 - 61746875*p*q**5*r**2*s**2 - 1010000*p**5*q*r**3*s**2 - 1000000*p**2*q**3*r**3*s**2 + 78000000*p**3*q*r**4*s**2 + 30000000*q**3*r**4*s**2 + 80000000*p*q*r**5*s**2 - 759375*p**10*s**3 - 9787500*p**7*q**2*s**3 - 39062500*p**4*q**4*s**3 - 52343750*p*q**6*s**3 + 12301875*p**8*r*s**3 + 98175000*p**5*q**2*r*s**3 + 225078125*p**2*q**4*r*s**3 - 54900000*p**6*r**2*s**3 - 310000000*p**3*q**2*r**2*s**3 - 7890625*q**4*r**2*s**3 + 51250000*p**4*r**3*s**3 - 420000000*p*q**2*r**3*s**3 + 110000000*p**2*r**4*s**3 - 200000000*r**5*s**3 + 2109375*p**6*q*s**4 - 21093750*p**3*q**3*s**4 - 89843750*q**5*s**4 + 182343750*p**4*q*r*s**4 + 733203125*p*q**3*r*s**4 - 196875000*p**2*q*r**2*s**4 + 1125000000*q*r**3*s**4 - 158203125*p**5*s**5 - 566406250*p**2*q**2*s**5 + 101562500*p**3*r*s**5 - 1669921875*q**2*r*s**5 + 1250000000*p*r**2*s**5 - 1220703125*p*q*s**6 + 6103515625*s**7
a[4] = 1000*p**5*q**7 + 7250*p**2*q**9 - 10800*p**6*q**5*r - 96900*p**3*q**7*r - 52500*q**9*r + 37400*p**7*q**3*r**2 + 470850*p**4*q**5*r**2 + 640600*p*q**7*r**2 - 39600*p**8*q*r**3 - 983600*p**5*q**3*r**3 - 2848100*p**2*q**5*r**3 + 814400*p**6*q*r**4 + 6076000*p**3*q**3*r**4 + 2308000*q**5*r**4 - 5024000*p**4*q*r**5 - 9680000*p*q**3*r**5 + 9600000*p**2*q*r**6 + 13800*p**7*q**4*s + 94650*p**4*q**6*s - 26500*p*q**8*s - 86400*p**8*q**2*r*s - 816500*p**5*q**4*r*s - 257500*p**2*q**6*r*s + 91800*p**9*r**2*s + 1853700*p**6*q**2*r**2*s + 630000*p**3*q**4*r**2*s - 8971250*q**6*r**2*s - 2071200*p**7*r**3*s - 7240000*p**4*q**2*r**3*s + 29375000*p*q**4*r**3*s + 14416000*p**5*r**4*s - 5200000*p**2*q**2*r**4*s - 30400000*p**3*r**5*s - 12000000*q**2*r**5*s + 64800*p**9*q*s**2 + 567000*p**6*q**3*s**2 + 1655000*p**3*q**5*s**2 + 6987500*q**7*s**2 + 337500*p**7*q*r*s**2 + 8462500*p**4*q**3*r*s**2 - 5812500*p*q**5*r*s**2 - 24930000*p**5*q*r**2*s**2 - 69125000*p**2*q**3*r**2*s**2 + 103500000*p**3*q*r**3*s**2 + 30000000*q**3*r**3*s**2 + 90000000*p*q*r**4*s**2 - 708750*p**8*s**3 - 5400000*p**5*q**2*s**3 + 8906250*p**2*q**4*s**3 + 18562500*p**6*r*s**3 - 625000*p**3*q**2*r*s**3 + 29687500*q**4*r*s**3 - 75000000*p**4*r**2*s**3 - 416250000*p*q**2*r**2*s**3 + 60000000*p**2*r**3*s**3 - 300000000*r**4*s**3 + 71718750*p**4*q*s**4 + 189062500*p*q**3*s**4 + 210937500*p**2*q*r*s**4 + 1187500000*q*r**2*s**4 - 187500000*p**3*s**5 - 800781250*q**2*s**5 - 390625000*p*r*s**5
a[3] = -500*p**6*q**5 - 6350*p**3*q**7 - 19800*q**9 + 3750*p**7*q**3*r + 65100*p**4*q**5*r + 264950*p*q**7*r - 6750*p**8*q*r**2 - 209050*p**5*q**3*r**2 - 1217250*p**2*q**5*r**2 + 219000*p**6*q*r**3 + 2510000*p**3*q**3*r**3 + 1098500*q**5*r**3 - 2068000*p**4*q*r**4 - 5060000*p*q**3*r**4 + 5200000*p**2*q*r**5 - 6750*p**8*q**2*s - 96350*p**5*q**4*s - 346000*p**2*q**6*s + 20250*p**9*r*s + 459900*p**6*q**2*r*s + 1828750*p**3*q**4*r*s - 2930000*q**6*r*s - 594000*p**7*r**2*s - 4301250*p**4*q**2*r**2*s + 10906250*p*q**4*r**2*s + 5252000*p**5*r**3*s - 1450000*p**2*q**2*r**3*s - 12800000*p**3*r**4*s - 6500000*q**2*r**4*s + 74250*p**7*q*s**2 + 1418750*p**4*q**3*s**2 + 5956250*p*q**5*s**2 - 4297500*p**5*q*r*s**2 - 29906250*p**2*q**3*r*s**2 + 31500000*p**3*q*r**2*s**2 + 12500000*q**3*r**2*s**2 + 35000000*p*q*r**3*s**2 + 1350000*p**6*s**3 + 6093750*p**3*q**2*s**3 + 17500000*q**4*s**3 - 7031250*p**4*r*s**3 - 127812500*p*q**2*r*s**3 + 18750000*p**2*r**2*s**3 - 162500000*r**3*s**3 + 107812500*p**2*q*s**4 + 460937500*q*r*s**4 - 214843750*p*s**5
a[2] = 1950*p**4*q**5 + 14100*p*q**7 - 14350*p**5*q**3*r - 125600*p**2*q**5*r + 27900*p**6*q*r**2 + 402250*p**3*q**3*r**2 + 288250*q**5*r**2 - 436000*p**4*q*r**3 - 1345000*p*q**3*r**3 + 1400000*p**2*q*r**4 + 9450*p**6*q**2*s - 1250*p**3*q**4*s - 465000*q**6*s - 49950*p**7*r*s - 302500*p**4*q**2*r*s + 1718750*p*q**4*r*s + 834000*p**5*r**2*s + 437500*p**2*q**2*r**2*s - 3100000*p**3*r**3*s - 1750000*q**2*r**3*s - 292500*p**5*q*s**2 - 1937500*p**2*q**3*s**2 + 3343750*p**3*q*r*s**2 + 1875000*q**3*r*s**2 + 8125000*p*q*r**2*s**2 - 1406250*p**4*s**3 - 12343750*p*q**2*s**3 + 5312500*p**2*r*s**3 - 43750000*r**2*s**3 + 74218750*q*s**4
a[1] = -300*p**5*q**3 - 2150*p**2*q**5 + 1350*p**6*q*r + 21500*p**3*q**3*r + 61500*q**5*r - 42000*p**4*q*r**2 - 290000*p*q**3*r**2 + 300000*p**2*q*r**3 - 4050*p**7*s - 45000*p**4*q**2*s - 125000*p*q**4*s + 108000*p**5*r*s + 643750*p**2*q**2*r*s - 700000*p**3*r**2*s - 375000*q**2*r**2*s - 93750*p**3*q*s**2 - 312500*q**3*s**2 + 1875000*p*q*r*s**2 - 1406250*p**2*s**3 - 9375000*r*s**3
a[0] = 1250*p**3*q**3 + 9000*q**5 - 4500*p**4*q*r - 46250*p*q**3*r + 50000*p**2*q*r**2 + 6750*p**5*s + 43750*p**2*q**2*s - 75000*p**3*r*s - 62500*q**2*r*s + 156250*p*q*s**2 - 1562500*s**3
return a
@property
def c(self):
p, q, r, s = self.p, self.q, self.r, self.s
c = [0]*6
c[5] = -40*p**5*q**11 - 270*p**2*q**13 + 700*p**6*q**9*r + 5165*p**3*q**11*r + 540*q**13*r - 4230*p**7*q**7*r**2 - 31845*p**4*q**9*r**2 + 20880*p*q**11*r**2 + 9645*p**8*q**5*r**3 + 57615*p**5*q**7*r**3 - 358255*p**2*q**9*r**3 - 1880*p**9*q**3*r**4 + 114020*p**6*q**5*r**4 + 2012190*p**3*q**7*r**4 - 26855*q**9*r**4 - 14400*p**10*q*r**5 - 470400*p**7*q**3*r**5 - 5088640*p**4*q**5*r**5 + 920*p*q**7*r**5 + 332800*p**8*q*r**6 + 5797120*p**5*q**3*r**6 + 1608000*p**2*q**5*r**6 - 2611200*p**6*q*r**7 - 7424000*p**3*q**3*r**7 - 2323200*q**5*r**7 + 8601600*p**4*q*r**8 + 9472000*p*q**3*r**8 - 10240000*p**2*q*r**9 - 3060*p**7*q**8*s - 39085*p**4*q**10*s - 132300*p*q**12*s + 36580*p**8*q**6*r*s + 520185*p**5*q**8*r*s + 1969860*p**2*q**10*r*s - 144045*p**9*q**4*r**2*s - 2438425*p**6*q**6*r**2*s - 10809475*p**3*q**8*r**2*s + 518850*q**10*r**2*s + 182520*p**10*q**2*r**3*s + 4533930*p**7*q**4*r**3*s + 26196770*p**4*q**6*r**3*s - 4542325*p*q**8*r**3*s + 21600*p**11*r**4*s - 2208080*p**8*q**2*r**4*s - 24787960*p**5*q**4*r**4*s + 10813900*p**2*q**6*r**4*s - 499200*p**9*r**5*s + 3827840*p**6*q**2*r**5*s + 9596000*p**3*q**4*r**5*s + 22662000*q**6*r**5*s + 3916800*p**7*r**6*s - 29952000*p**4*q**2*r**6*s - 90800000*p*q**4*r**6*s - 12902400*p**5*r**7*s + 87040000*p**2*q**2*r**7*s + 15360000*p**3*r**8*s + 12800000*q**2*r**8*s - 38070*p**9*q**5*s**2 - 566700*p**6*q**7*s**2 - 2574375*p**3*q**9*s**2 - 1822500*q**11*s**2 + 292815*p**10*q**3*r*s**2 + 5170280*p**7*q**5*r*s**2 + 27918125*p**4*q**7*r*s**2 + 21997500*p*q**9*r*s**2 - 573480*p**11*q*r**2*s**2 - 14566350*p**8*q**3*r**2*s**2 - 104851575*p**5*q**5*r**2*s**2 - 96448750*p**2*q**7*r**2*s**2 + 11001240*p**9*q*r**3*s**2 + 147798600*p**6*q**3*r**3*s**2 + 158632750*p**3*q**5*r**3*s**2 - 78222500*q**7*r**3*s**2 - 62819200*p**7*q*r**4*s**2 - 136160000*p**4*q**3*r**4*s**2 + 317555000*p*q**5*r**4*s**2 + 160224000*p**5*q*r**5*s**2 - 267600000*p**2*q**3*r**5*s**2 - 153600000*p**3*q*r**6*s**2 - 120000000*q**3*r**6*s**2 - 32000000*p*q*r**7*s**2 - 127575*p**11*q**2*s**3 - 2148750*p**8*q**4*s**3 - 13652500*p**5*q**6*s**3 - 19531250*p**2*q**8*s**3 + 495720*p**12*r*s**3 + 11856375*p**9*q**2*r*s**3 + 107807500*p**6*q**4*r*s**3 + 222334375*p**3*q**6*r*s**3 + 105062500*q**8*r*s**3 - 11566800*p**10*r**2*s**3 - 216787500*p**7*q**2*r**2*s**3 - 633437500*p**4*q**4*r**2*s**3 - 504484375*p*q**6*r**2*s**3 + 90918000*p**8*r**3*s**3 + 567080000*p**5*q**2*r**3*s**3 + 692937500*p**2*q**4*r**3*s**3 - 326640000*p**6*r**4*s**3 - 339000000*p**3*q**2*r**4*s**3 + 369250000*q**4*r**4*s**3 + 560000000*p**4*r**5*s**3 + 508000000*p*q**2*r**5*s**3 - 480000000*p**2*r**6*s**3 + 320000000*r**7*s**3 - 455625*p**10*q*s**4 - 27562500*p**7*q**3*s**4 - 120593750*p**4*q**5*s**4 - 60312500*p*q**7*s**4 + 110615625*p**8*q*r*s**4 + 662984375*p**5*q**3*r*s**4 + 528515625*p**2*q**5*r*s**4 - 541687500*p**6*q*r**2*s**4 - 1262343750*p**3*q**3*r**2*s**4 - 466406250*q**5*r**2*s**4 + 633000000*p**4*q*r**3*s**4 - 1264375000*p*q**3*r**3*s**4 + 1085000000*p**2*q*r**4*s**4 - 2700000000*q*r**5*s**4 - 68343750*p**9*s**5 - 478828125*p**6*q**2*s**5 - 355468750*p**3*q**4*s**5 - 11718750*q**6*s**5 + 718031250*p**7*r*s**5 + 1658593750*p**4*q**2*r*s**5 + 2212890625*p*q**4*r*s**5 - 2855625000*p**5*r**2*s**5 - 4273437500*p**2*q**2*r**2*s**5 + 4537500000*p**3*r**3*s**5 + 8031250000*q**2*r**3*s**5 - 1750000000*p*r**4*s**5 + 1353515625*p**5*q*s**6 + 1562500000*p**2*q**3*s**6 - 3964843750*p**3*q*r*s**6 - 7226562500*q**3*r*s**6 + 1953125000*p*q*r**2*s**6 - 1757812500*p**4*s**7 - 3173828125*p*q**2*s**7 + 6445312500*p**2*r*s**7 - 3906250000*r**2*s**7 + 6103515625*q*s**8
c[4] = 40*p**6*q**9 + 110*p**3*q**11 - 1080*q**13 - 560*p**7*q**7*r - 1780*p**4*q**9*r + 17370*p*q**11*r + 2850*p**8*q**5*r**2 + 10520*p**5*q**7*r**2 - 115910*p**2*q**9*r**2 - 6090*p**9*q**3*r**3 - 25330*p**6*q**5*r**3 + 448740*p**3*q**7*r**3 + 128230*q**9*r**3 + 4320*p**10*q*r**4 + 16960*p**7*q**3*r**4 - 1143600*p**4*q**5*r**4 - 1410310*p*q**7*r**4 + 3840*p**8*q*r**5 + 1744480*p**5*q**3*r**5 + 5619520*p**2*q**5*r**5 - 1198080*p**6*q*r**6 - 10579200*p**3*q**3*r**6 - 2940800*q**5*r**6 + 8294400*p**4*q*r**7 + 13568000*p*q**3*r**7 - 15360000*p**2*q*r**8 + 840*p**8*q**6*s + 7580*p**5*q**8*s + 24420*p**2*q**10*s - 8100*p**9*q**4*r*s - 94100*p**6*q**6*r*s - 473000*p**3*q**8*r*s - 473400*q**10*r*s + 22680*p**10*q**2*r**2*s + 374370*p**7*q**4*r**2*s + 2888020*p**4*q**6*r**2*s + 5561050*p*q**8*r**2*s - 12960*p**11*r**3*s - 485820*p**8*q**2*r**3*s - 6723440*p**5*q**4*r**3*s - 23561400*p**2*q**6*r**3*s + 190080*p**9*r**4*s + 5894880*p**6*q**2*r**4*s + 50882000*p**3*q**4*r**4*s + 22411500*q**6*r**4*s - 258560*p**7*r**5*s - 46248000*p**4*q**2*r**5*s - 103800000*p*q**4*r**5*s - 3737600*p**5*r**6*s + 119680000*p**2*q**2*r**6*s + 10240000*p**3*r**7*s + 19200000*q**2*r**7*s + 7290*p**10*q**3*s**2 + 117360*p**7*q**5*s**2 + 691250*p**4*q**7*s**2 - 198750*p*q**9*s**2 - 36450*p**11*q*r*s**2 - 854550*p**8*q**3*r*s**2 - 7340700*p**5*q**5*r*s**2 - 2028750*p**2*q**7*r*s**2 + 995490*p**9*q*r**2*s**2 + 18896600*p**6*q**3*r**2*s**2 + 5026500*p**3*q**5*r**2*s**2 - 52272500*q**7*r**2*s**2 - 16636800*p**7*q*r**3*s**2 - 43200000*p**4*q**3*r**3*s**2 + 223426250*p*q**5*r**3*s**2 + 112068000*p**5*q*r**4*s**2 - 177000000*p**2*q**3*r**4*s**2 - 244000000*p**3*q*r**5*s**2 - 156000000*q**3*r**5*s**2 + 43740*p**12*s**3 + 1032750*p**9*q**2*s**3 + 8602500*p**6*q**4*s**3 + 15606250*p**3*q**6*s**3 + 39625000*q**8*s**3 - 1603800*p**10*r*s**3 - 26932500*p**7*q**2*r*s**3 - 19562500*p**4*q**4*r*s**3 - 152000000*p*q**6*r*s**3 + 25555500*p**8*r**2*s**3 + 16230000*p**5*q**2*r**2*s**3 + 42187500*p**2*q**4*r**2*s**3 - 165660000*p**6*r**3*s**3 + 373500000*p**3*q**2*r**3*s**3 + 332937500*q**4*r**3*s**3 + 465000000*p**4*r**4*s**3 + 586000000*p*q**2*r**4*s**3 - 592000000*p**2*r**5*s**3 + 480000000*r**6*s**3 - 1518750*p**8*q*s**4 - 62531250*p**5*q**3*s**4 + 7656250*p**2*q**5*s**4 + 184781250*p**6*q*r*s**4 - 15781250*p**3*q**3*r*s**4 - 135156250*q**5*r*s**4 - 1148250000*p**4*q*r**2*s**4 - 2121406250*p*q**3*r**2*s**4 + 1990000000*p**2*q*r**3*s**4 - 3150000000*q*r**4*s**4 - 2531250*p**7*s**5 + 660937500*p**4*q**2*s**5 + 1339843750*p*q**4*s**5 - 33750000*p**5*r*s**5 - 679687500*p**2*q**2*r*s**5 + 6250000*p**3*r**2*s**5 + 6195312500*q**2*r**2*s**5 + 1125000000*p*r**3*s**5 - 996093750*p**3*q*s**6 - 3125000000*q**3*s**6 - 3222656250*p*q*r*s**6 + 1171875000*p**2*s**7 + 976562500*r*s**7
c[3] = 80*p**4*q**9 + 540*p*q**11 - 600*p**5*q**7*r - 4770*p**2*q**9*r + 1230*p**6*q**5*r**2 + 20900*p**3*q**7*r**2 + 47250*q**9*r**2 - 710*p**7*q**3*r**3 - 84950*p**4*q**5*r**3 - 526310*p*q**7*r**3 + 720*p**8*q*r**4 + 216280*p**5*q**3*r**4 + 2068020*p**2*q**5*r**4 - 198080*p**6*q*r**5 - 3703200*p**3*q**3*r**5 - 1423600*q**5*r**5 + 2860800*p**4*q*r**6 + 7056000*p*q**3*r**6 - 8320000*p**2*q*r**7 - 2720*p**6*q**6*s - 46350*p**3*q**8*s - 178200*q**10*s + 25740*p**7*q**4*r*s + 489490*p**4*q**6*r*s + 2152350*p*q**8*r*s - 61560*p**8*q**2*r**2*s - 1568150*p**5*q**4*r**2*s - 9060500*p**2*q**6*r**2*s + 24840*p**9*r**3*s + 1692380*p**6*q**2*r**3*s + 18098250*p**3*q**4*r**3*s + 9387750*q**6*r**3*s - 382560*p**7*r**4*s - 16818000*p**4*q**2*r**4*s - 49325000*p*q**4*r**4*s + 1212800*p**5*r**5*s + 64840000*p**2*q**2*r**5*s - 320000*p**3*r**6*s + 10400000*q**2*r**6*s - 36450*p**8*q**3*s**2 - 588350*p**5*q**5*s**2 - 2156250*p**2*q**7*s**2 + 123930*p**9*q*r*s**2 + 2879700*p**6*q**3*r*s**2 + 12548000*p**3*q**5*r*s**2 - 14445000*q**7*r*s**2 - 3233250*p**7*q*r**2*s**2 - 28485000*p**4*q**3*r**2*s**2 + 72231250*p*q**5*r**2*s**2 + 32093000*p**5*q*r**3*s**2 - 61275000*p**2*q**3*r**3*s**2 - 107500000*p**3*q*r**4*s**2 - 78500000*q**3*r**4*s**2 + 22000000*p*q*r**5*s**2 - 72900*p**10*s**3 - 1215000*p**7*q**2*s**3 - 2937500*p**4*q**4*s**3 + 9156250*p*q**6*s**3 + 2612250*p**8*r*s**3 + 16560000*p**5*q**2*r*s**3 - 75468750*p**2*q**4*r*s**3 - 32737500*p**6*r**2*s**3 + 169062500*p**3*q**2*r**2*s**3 + 121718750*q**4*r**2*s**3 + 160250000*p**4*r**3*s**3 + 219750000*p*q**2*r**3*s**3 - 317000000*p**2*r**4*s**3 + 260000000*r**5*s**3 + 2531250*p**6*q*s**4 + 22500000*p**3*q**3*s**4 + 39843750*q**5*s**4 - 266343750*p**4*q*r*s**4 - 776406250*p*q**3*r*s**4 + 789062500*p**2*q*r**2*s**4 - 1368750000*q*r**3*s**4 + 67500000*p**5*s**5 + 441406250*p**2*q**2*s**5 - 311718750*p**3*r*s**5 + 1785156250*q**2*r*s**5 + 546875000*p*r**2*s**5 - 1269531250*p*q*s**6 + 488281250*s**7
c[2] = 120*p**5*q**7 + 810*p**2*q**9 - 1280*p**6*q**5*r - 9160*p**3*q**7*r + 3780*q**9*r + 4530*p**7*q**3*r**2 + 36640*p**4*q**5*r**2 - 45270*p*q**7*r**2 - 5400*p**8*q*r**3 - 60920*p**5*q**3*r**3 + 200050*p**2*q**5*r**3 + 31200*p**6*q*r**4 - 476000*p**3*q**3*r**4 - 378200*q**5*r**4 + 521600*p**4*q*r**5 + 1872000*p*q**3*r**5 - 2240000*p**2*q*r**6 + 1440*p**7*q**4*s + 15310*p**4*q**6*s + 59400*p*q**8*s - 9180*p**8*q**2*r*s - 115240*p**5*q**4*r*s - 589650*p**2*q**6*r*s + 16200*p**9*r**2*s + 316710*p**6*q**2*r**2*s + 2547750*p**3*q**4*r**2*s + 2178000*q**6*r**2*s - 259200*p**7*r**3*s - 4123000*p**4*q**2*r**3*s - 11700000*p*q**4*r**3*s + 937600*p**5*r**4*s + 16340000*p**2*q**2*r**4*s - 640000*p**3*r**5*s + 2800000*q**2*r**5*s - 2430*p**9*q*s**2 - 54450*p**6*q**3*s**2 - 285500*p**3*q**5*s**2 - 2767500*q**7*s**2 + 43200*p**7*q*r*s**2 - 916250*p**4*q**3*r*s**2 + 14482500*p*q**5*r*s**2 + 4806000*p**5*q*r**2*s**2 - 13212500*p**2*q**3*r**2*s**2 - 25400000*p**3*q*r**3*s**2 - 18750000*q**3*r**3*s**2 + 8000000*p*q*r**4*s**2 + 121500*p**8*s**3 + 2058750*p**5*q**2*s**3 - 6656250*p**2*q**4*s**3 - 6716250*p**6*r*s**3 + 24125000*p**3*q**2*r*s**3 + 23875000*q**4*r*s**3 + 43125000*p**4*r**2*s**3 + 45750000*p*q**2*r**2*s**3 - 87500000*p**2*r**3*s**3 + 70000000*r**4*s**3 - 44437500*p**4*q*s**4 - 107968750*p*q**3*s**4 + 159531250*p**2*q*r*s**4 - 284375000*q*r**2*s**4 + 7031250*p**3*s**5 + 265625000*q**2*s**5 + 31250000*p*r*s**5
c[1] = 160*p**3*q**7 + 1080*q**9 - 1080*p**4*q**5*r - 8730*p*q**7*r + 1510*p**5*q**3*r**2 + 20420*p**2*q**5*r**2 + 720*p**6*q*r**3 - 23200*p**3*q**3*r**3 - 79900*q**5*r**3 + 35200*p**4*q*r**4 + 404000*p*q**3*r**4 - 480000*p**2*q*r**5 + 960*p**5*q**4*s + 2850*p**2*q**6*s + 540*p**6*q**2*r*s + 63500*p**3*q**4*r*s + 319500*q**6*r*s - 7560*p**7*r**2*s - 253500*p**4*q**2*r**2*s - 1806250*p*q**4*r**2*s + 91200*p**5*r**3*s + 2600000*p**2*q**2*r**3*s - 80000*p**3*r**4*s + 600000*q**2*r**4*s - 4050*p**7*q*s**2 - 120000*p**4*q**3*s**2 - 273750*p*q**5*s**2 + 425250*p**5*q*r*s**2 + 2325000*p**2*q**3*r*s**2 - 5400000*p**3*q*r**2*s**2 - 2875000*q**3*r**2*s**2 + 1500000*p*q*r**3*s**2 - 303750*p**6*s**3 - 843750*p**3*q**2*s**3 - 812500*q**4*s**3 + 5062500*p**4*r*s**3 + 13312500*p*q**2*r*s**3 - 14500000*p**2*r**2*s**3 + 15000000*r**3*s**3 - 3750000*p**2*q*s**4 - 35937500*q*r*s**4 + 11718750*p*s**5
c[0] = 80*p**4*q**5 + 540*p*q**7 - 600*p**5*q**3*r - 4770*p**2*q**5*r + 1080*p**6*q*r**2 + 11200*p**3*q**3*r**2 - 12150*q**5*r**2 - 4800*p**4*q*r**3 + 64000*p*q**3*r**3 - 80000*p**2*q*r**4 + 1080*p**6*q**2*s + 13250*p**3*q**4*s + 54000*q**6*s - 3240*p**7*r*s - 56250*p**4*q**2*r*s - 337500*p*q**4*r*s + 43200*p**5*r**2*s + 560000*p**2*q**2*r**2*s - 80000*p**3*r**3*s + 100000*q**2*r**3*s + 6750*p**5*q*s**2 + 225000*p**2*q**3*s**2 - 900000*p**3*q*r*s**2 - 562500*q**3*r*s**2 + 500000*p*q*r**2*s**2 + 843750*p**4*s**3 + 1937500*p*q**2*s**3 - 3000000*p**2*r*s**3 + 2500000*r**2*s**3 - 5468750*q*s**4
return c
@property
def F(self):
p, q, r, s = self.p, self.q, self.r, self.s
F = 4*p**6*q**6 + 59*p**3*q**8 + 216*q**10 - 36*p**7*q**4*r - 623*p**4*q**6*r - 2610*p*q**8*r + 81*p**8*q**2*r**2 + 2015*p**5*q**4*r**2 + 10825*p**2*q**6*r**2 - 1800*p**6*q**2*r**3 - 17500*p**3*q**4*r**3 + 625*q**6*r**3 + 10000*p**4*q**2*r**4 + 108*p**8*q**3*s + 1584*p**5*q**5*s + 5700*p**2*q**7*s - 486*p**9*q*r*s - 9720*p**6*q**3*r*s - 45050*p**3*q**5*r*s - 9000*q**7*r*s + 10800*p**7*q*r**2*s + 92500*p**4*q**3*r**2*s + 32500*p*q**5*r**2*s - 60000*p**5*q*r**3*s - 50000*p**2*q**3*r**3*s + 729*p**10*s**2 + 12150*p**7*q**2*s**2 + 60000*p**4*q**4*s**2 + 93750*p*q**6*s**2 - 18225*p**8*r*s**2 - 175500*p**5*q**2*r*s**2 - 478125*p**2*q**4*r*s**2 + 135000*p**6*r**2*s**2 + 850000*p**3*q**2*r**2*s**2 + 15625*q**4*r**2*s**2 - 250000*p**4*r**3*s**2 + 225000*p**3*q**3*s**3 + 175000*q**5*s**3 - 1012500*p**4*q*r*s**3 - 1187500*p*q**3*r*s**3 + 1250000*p**2*q*r**2*s**3 + 928125*p**5*s**4 + 1875000*p**2*q**2*s**4 - 2812500*p**3*r*s**4 - 390625*q**2*r*s**4 - 9765625*s**6
return F
def l0(self, theta):
p, q, r, s, F = self.p, self.q, self.r, self.s, self.F
a = self.a
l0 = Poly(a, x).eval(theta)/F
return l0
def T(self, theta, d):
p, q, r, s, F = self.p, self.q, self.r, self.s, self.F
T = [0]*5
b = self.b
# Note that the order of sublists of the b's has been reversed compared to the paper
T[1] = -Poly(b[1], x).eval(theta)/(2*F)
T[2] = Poly(b[2], x).eval(theta)/(2*d*F)
T[3] = Poly(b[3], x).eval(theta)/(2*F)
T[4] = Poly(b[4], x).eval(theta)/(2*d*F)
return T
def order(self, theta, d):
p, q, r, s, F = self.p, self.q, self.r, self.s, self.F
o = self.o
order = Poly(o, x).eval(theta)/(d*F)
return N(order)
def uv(self, theta, d):
c = self.c
u = S(-25*self.q/2)
v = Poly(c, x).eval(theta)/(2*d*self.F)
return N(u), N(v)
@property
def zeta(self):
return [self.zeta1, self.zeta2, self.zeta3, self.zeta4]
|
bsd-3-clause
|
batermj/algorithm-challenger
|
code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/distutils/command/bdist_dumb.py
|
12
|
4913
|
"""distutils.command.bdist_dumb
Implements the Distutils 'bdist_dumb' command (create a "dumb" built
distribution -- i.e., just an archive to be unpacked under $prefix or
$exec_prefix)."""
import os
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import remove_tree, ensure_relative
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_dumb(Command):
description = "create a \"dumb\" built distribution"
user_options = [('bdist-dir=', 'd',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('format=', 'f',
"archive format to create (tar, gztar, bztar, xztar, "
"ztar, zip)"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
]
boolean_options = ['keep-temp', 'skip-build', 'relative']
default_format = { 'posix': 'gztar',
'nt': 'zip' }
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.format = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = None
self.relative = 0
self.owner = None
self.group = None
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'dumb')
if self.format is None:
try:
self.format = self.default_format[os.name]
except KeyError:
raise DistutilsPlatformError(
"don't know how to create dumb built distributions "
"on platform %s" % os.name)
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
('skip_build', 'skip_build'))
def run(self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
log.info("installing to %s" % self.bdist_dir)
self.run_command('install')
# And make an archive relative to the root of the
# pseudo-installation tree.
archive_basename = "%s.%s" % (self.distribution.get_fullname(),
self.plat_name)
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
if (self.distribution.has_ext_modules() and
(install.install_base != install.install_platbase)):
raise DistutilsPlatformError(
"can't make a dumb built distribution where "
"base and platbase are different (%s, %s)"
% (repr(install.install_base),
repr(install.install_platbase)))
else:
archive_root = os.path.join(self.bdist_dir,
ensure_relative(install.install_base))
# Make the archive
filename = self.make_archive(pseudoinstall_root,
self.format, root_dir=archive_root,
owner=self.owner, group=self.group)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_dumb', pyversion,
filename))
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
|
apache-2.0
|
kbrebanov/ansible
|
lib/ansible/module_utils/vmware.py
|
2
|
28470
|
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import atexit
import os
import ssl
import time
try:
# requests is required for exception handling of the ConnectionError
import requests
from pyVim import connect
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils._text import to_text
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six import integer_types, iteritems, string_types
class TaskError(Exception):
pass
def wait_for_task(task):
while True:
if task.info.state == vim.TaskInfo.State.success:
return True, task.info.result
if task.info.state == vim.TaskInfo.State.error:
try:
raise TaskError(task.info.error)
except AttributeError:
raise TaskError("An unknown error has occurred")
if task.info.state == vim.TaskInfo.State.running:
time.sleep(15)
if task.info.state == vim.TaskInfo.State.queued:
time.sleep(15)
def find_obj(content, vimtype, name, first=True):
container = content.viewManager.CreateContainerView(container=content.rootFolder, recursive=True, type=vimtype)
obj_list = container.view
container.Destroy()
# Backward compatible with former get_obj() function
if name is None:
if obj_list:
return obj_list[0]
return None
# Select the first match
if first is True:
for obj in obj_list:
if obj.name == name:
return obj
# If no object found, return None
return None
# Return all matching objects if needed
return [obj for obj in obj_list if obj.name == name]
def find_dvspg_by_name(dv_switch, portgroup_name):
portgroups = dv_switch.portgroup
for pg in portgroups:
if pg.name == portgroup_name:
return pg
return None
def find_entity_child_by_path(content, entityRootFolder, path):
entity = entityRootFolder
searchIndex = content.searchIndex
paths = path.split("/")
try:
for path in paths:
entity = searchIndex.FindChild(entity, path)
if entity.name == paths[-1]:
return entity
except:
pass
return None
# Maintain for legacy, or remove with 2.1 ?
# Should be replaced with find_cluster_by_name
def find_cluster_by_name_datacenter(datacenter, cluster_name):
host_folder = datacenter.hostFolder
for folder in host_folder.childEntity:
if folder.name == cluster_name:
return folder
return None
def find_cluster_by_name(content, cluster_name, datacenter=None):
if datacenter:
folder = datacenter.hostFolder
else:
folder = content.rootFolder
clusters = get_all_objs(content, [vim.ClusterComputeResource], folder)
for cluster in clusters:
if cluster.name == cluster_name:
return cluster
return None
def find_datacenter_by_name(content, datacenter_name):
datacenters = get_all_objs(content, [vim.Datacenter])
for dc in datacenters:
if dc.name == datacenter_name:
return dc
return None
def find_datastore_by_name(content, datastore_name):
datastores = get_all_objs(content, [vim.Datastore])
for ds in datastores:
if ds.name == datastore_name:
return ds
return None
def find_dvs_by_name(content, switch_name):
# https://github.com/vmware/govmomi/issues/879
# https://github.com/ansible/ansible/pull/31798#issuecomment-336936222
try:
vmware_distributed_switches = get_all_objs(content, [vim.dvs.VmwareDistributedVirtualSwitch])
except IndexError:
vmware_distributed_switches = get_all_objs(content, [vim.DistributedVirtualSwitch])
for dvs in vmware_distributed_switches:
if dvs.name == switch_name:
return dvs
return None
def find_hostsystem_by_name(content, hostname):
host_system = get_all_objs(content, [vim.HostSystem])
for host in host_system:
if host.name == hostname:
return host
return None
def find_vm_by_id(content, vm_id, vm_id_type="vm_name", datacenter=None, cluster=None, folder=None, match_first=False):
""" UUID is unique to a VM, every other id returns the first match. """
si = content.searchIndex
vm = None
if vm_id_type == 'dns_name':
vm = si.FindByDnsName(datacenter=datacenter, dnsName=vm_id, vmSearch=True)
elif vm_id_type == 'uuid':
# Search By BIOS UUID rather than instance UUID
vm = si.FindByUuid(datacenter=datacenter, instanceUuid=False, uuid=vm_id, vmSearch=True)
elif vm_id_type == 'ip':
vm = si.FindByIp(datacenter=datacenter, ip=vm_id, vmSearch=True)
elif vm_id_type == 'vm_name':
folder = None
if cluster:
folder = cluster
elif datacenter:
folder = datacenter.hostFolder
vm = find_vm_by_name(content, vm_id, folder)
elif vm_id_type == 'inventory_path':
searchpath = folder
# get all objects for this path
f_obj = si.FindByInventoryPath(searchpath)
if f_obj:
if isinstance(f_obj, vim.Datacenter):
f_obj = f_obj.vmFolder
for c_obj in f_obj.childEntity:
if not isinstance(c_obj, vim.VirtualMachine):
continue
if c_obj.name == vm_id:
vm = c_obj
if match_first:
break
return vm
def find_vm_by_name(content, vm_name, folder=None, recurse=True):
vms = get_all_objs(content, [vim.VirtualMachine], folder, recurse=recurse)
for vm in vms:
if vm.name == vm_name:
return vm
return None
def find_host_portgroup_by_name(host, portgroup_name):
for portgroup in host.config.network.portgroup:
if portgroup.spec.name == portgroup_name:
return portgroup
return None
def compile_folder_path_for_object(vobj):
""" make a /vm/foo/bar/baz like folder path for an object """
paths = []
if isinstance(vobj, vim.Folder):
paths.append(vobj.name)
thisobj = vobj
while hasattr(thisobj, 'parent'):
thisobj = thisobj.parent
try:
moid = thisobj._moId
except AttributeError:
moid = None
if moid in ['group-d1', 'ha-folder-root']:
break
if isinstance(thisobj, vim.Folder):
paths.append(thisobj.name)
paths.reverse()
return '/' + '/'.join(paths)
def _get_vm_prop(vm, attributes):
"""Safely get a property or return None"""
result = vm
for attribute in attributes:
try:
result = getattr(result, attribute)
except (AttributeError, IndexError):
return None
return result
def gather_vm_facts(content, vm):
""" Gather facts from vim.VirtualMachine object. """
facts = {
'module_hw': True,
'hw_name': vm.config.name,
'hw_power_status': vm.summary.runtime.powerState,
'hw_guest_full_name': vm.summary.guest.guestFullName,
'hw_guest_id': vm.summary.guest.guestId,
'hw_product_uuid': vm.config.uuid,
'hw_processor_count': vm.config.hardware.numCPU,
'hw_cores_per_socket': vm.config.hardware.numCoresPerSocket,
'hw_memtotal_mb': vm.config.hardware.memoryMB,
'hw_interfaces': [],
'hw_datastores': [],
'hw_files': [],
'hw_esxi_host': None,
'hw_guest_ha_state': None,
'hw_is_template': vm.config.template,
'hw_folder': None,
'guest_tools_status': _get_vm_prop(vm, ('guest', 'toolsRunningStatus')),
'guest_tools_version': _get_vm_prop(vm, ('guest', 'toolsVersion')),
'guest_question': vm.summary.runtime.question,
'guest_consolidation_needed': vm.summary.runtime.consolidationNeeded,
'ipv4': None,
'ipv6': None,
'annotation': vm.config.annotation,
'customvalues': {},
'snapshots': [],
'current_snapshot': None,
}
# facts that may or may not exist
if vm.summary.runtime.host:
host = vm.summary.runtime.host
facts['hw_esxi_host'] = host.summary.config.name
if vm.summary.runtime.dasVmProtection:
facts['hw_guest_ha_state'] = vm.summary.runtime.dasVmProtection.dasProtected
datastores = vm.datastore
for ds in datastores:
facts['hw_datastores'].append(ds.info.name)
try:
files = vm.config.files
layout = vm.layout
if files:
facts['hw_files'] = [files.vmPathName]
for item in layout.snapshot:
for snap in item.snapshotFile:
facts['hw_files'].append(files.snapshotDirectory + snap)
for item in layout.configFile:
facts['hw_files'].append(os.path.dirname(files.vmPathName) + '/' + item)
for item in vm.layout.logFile:
facts['hw_files'].append(files.logDirectory + item)
for item in vm.layout.disk:
for disk in item.diskFile:
facts['hw_files'].append(disk)
except:
pass
folder = vm.parent
if folder:
foldername = folder.name
fp = folder.parent
# climb back up the tree to find our path, stop before the root folder
while fp is not None and fp.name is not None and fp != content.rootFolder:
foldername = fp.name + '/' + foldername
try:
fp = fp.parent
except:
break
foldername = '/' + foldername
facts['hw_folder'] = foldername
cfm = content.customFieldsManager
# Resolve custom values
for value_obj in vm.summary.customValue:
kn = value_obj.key
if cfm is not None and cfm.field:
for f in cfm.field:
if f.key == value_obj.key:
kn = f.name
# Exit the loop immediately, we found it
break
facts['customvalues'][kn] = value_obj.value
net_dict = {}
vmnet = _get_vm_prop(vm, ('guest', 'net'))
if vmnet:
for device in vmnet:
net_dict[device.macAddress] = list(device.ipAddress)
for k, v in iteritems(net_dict):
for ipaddress in v:
if ipaddress:
if '::' in ipaddress:
facts['ipv6'] = ipaddress
else:
facts['ipv4'] = ipaddress
ethernet_idx = 0
for idx, entry in enumerate(vm.config.hardware.device):
if not hasattr(entry, 'macAddress'):
continue
if entry.macAddress:
mac_addr = entry.macAddress
mac_addr_dash = mac_addr.replace(':', '-')
else:
mac_addr = mac_addr_dash = None
factname = 'hw_eth' + str(ethernet_idx)
facts[factname] = {
'addresstype': entry.addressType,
'label': entry.deviceInfo.label,
'macaddress': mac_addr,
'ipaddresses': net_dict.get(entry.macAddress, None),
'macaddress_dash': mac_addr_dash,
'summary': entry.deviceInfo.summary,
}
facts['hw_interfaces'].append('eth' + str(ethernet_idx))
ethernet_idx += 1
snapshot_facts = list_snapshots(vm)
if 'snapshots' in snapshot_facts:
facts['snapshots'] = snapshot_facts['snapshots']
facts['current_snapshot'] = snapshot_facts['current_snapshot']
return facts
def deserialize_snapshot_obj(obj):
return {'id': obj.id,
'name': obj.name,
'description': obj.description,
'creation_time': obj.createTime,
'state': obj.state}
def list_snapshots_recursively(snapshots):
snapshot_data = []
for snapshot in snapshots:
snapshot_data.append(deserialize_snapshot_obj(snapshot))
snapshot_data = snapshot_data + list_snapshots_recursively(snapshot.childSnapshotList)
return snapshot_data
def get_current_snap_obj(snapshots, snapob):
snap_obj = []
for snapshot in snapshots:
if snapshot.snapshot == snapob:
snap_obj.append(snapshot)
snap_obj = snap_obj + get_current_snap_obj(snapshot.childSnapshotList, snapob)
return snap_obj
def list_snapshots(vm):
result = {}
snapshot = _get_vm_prop(vm, ('snapshot',))
if not snapshot:
return result
if vm.snapshot is None:
return result
result['snapshots'] = list_snapshots_recursively(vm.snapshot.rootSnapshotList)
current_snapref = vm.snapshot.currentSnapshot
current_snap_obj = get_current_snap_obj(vm.snapshot.rootSnapshotList, current_snapref)
result['current_snapshot'] = deserialize_snapshot_obj(current_snap_obj[0])
return result
def vmware_argument_spec():
return dict(
hostname=dict(type='str', required=True),
username=dict(type='str', aliases=['user', 'admin'], required=True),
password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
)
def connect_to_api(module, disconnect_atexit=True):
hostname = module.params['hostname']
username = module.params['username']
password = module.params['password']
validate_certs = module.params['validate_certs']
if validate_certs and not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='pyVim does not support changing verification mode with python < 2.7.9. Either update '
'python or or use validate_certs=false')
ssl_context = None
if not validate_certs:
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_context.verify_mode = ssl.CERT_NONE
service_instance = None
try:
service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password, sslContext=ssl_context)
except vim.fault.InvalidLogin as e:
module.fail_json(msg="Unable to log on to vCenter or ESXi API at %s as %s: %s" % (hostname, username, e.msg))
except vim.fault.NoPermission as e:
module.fail_json(msg="User %s does not have required permission"
" to log on to vCenter or ESXi API at %s: %s" % (username, hostname, e.msg))
except (requests.ConnectionError, ssl.SSLError) as e:
module.fail_json(msg="Unable to connect to vCenter or ESXi API at %s on TCP/443: %s" % (hostname, e))
except vmodl.fault.InvalidRequest as e:
# Request is malformed
module.fail_json(msg="Failed to get a response from server %s as "
"request is malformed: %s" % (hostname, e.msg))
except Exception as e:
module.fail_json(msg="Unknown error while connecting to vCenter or ESXi API at %s: %s" % (hostname, e))
if service_instance is None:
module.fail_json(msg="Unknown error while connecting to vCenter or ESXi API at %s" % hostname)
# Disabling atexit should be used in special cases only.
# Such as IP change of the ESXi host which removes the connection anyway.
# Also removal significantly speeds up the return of the module
if disconnect_atexit:
atexit.register(connect.Disconnect, service_instance)
return service_instance.RetrieveContent()
def get_all_objs(content, vimtype, folder=None, recurse=True):
if not folder:
folder = content.rootFolder
obj = {}
container = content.viewManager.CreateContainerView(folder, vimtype, recurse)
for managed_object_ref in container.view:
obj.update({managed_object_ref: managed_object_ref.name})
return obj
def fetch_file_from_guest(module, content, vm, username, password, src, dest):
""" Use VMWare's filemanager api to fetch a file over http """
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if tools_status == 'toolsNotInstalled' or tools_status == 'toolsNotRunning':
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/FileManager/FileTransferInformation.rst
fti = content.guestOperationsManager.fileManager. \
InitiateFileTransferFromGuest(vm, creds, src)
result['size'] = fti.size
result['url'] = fti.url
# Use module_utils to fetch the remote url returned from the api
rsp, info = fetch_url(module, fti.url, use_proxy=False,
force=True, last_mod_time=None,
timeout=10, headers=None)
# save all of the transfer data
for k, v in iteritems(info):
result[k] = v
# exit early if xfer failed
if info['status'] != 200:
result['failed'] = True
return result
# attempt to read the content and write it
try:
with open(dest, 'wb') as f:
f.write(rsp.read())
except Exception as e:
result['failed'] = True
result['msg'] = str(e)
return result
def push_file_to_guest(module, content, vm, username, password, src, dest, overwrite=True):
""" Use VMWare's filemanager api to fetch a file over http """
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if tools_status == 'toolsNotInstalled' or tools_status == 'toolsNotRunning':
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
# the api requires a filesize in bytes
fdata = None
try:
# filesize = os.path.getsize(src)
filesize = os.stat(src).st_size
with open(src, 'rb') as f:
fdata = f.read()
result['local_filesize'] = filesize
except Exception as e:
result['failed'] = True
result['msg'] = "Unable to read src file: %s" % str(e)
return result
# https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.vm.guest.FileManager.html#initiateFileTransferToGuest
file_attribute = vim.vm.guest.FileManager.FileAttributes()
url = content.guestOperationsManager.fileManager. \
InitiateFileTransferToGuest(vm, creds, dest, file_attribute,
filesize, overwrite)
# PUT the filedata to the url ...
rsp, info = fetch_url(module, url, method="put", data=fdata,
use_proxy=False, force=True, last_mod_time=None,
timeout=10, headers=None)
result['msg'] = str(rsp.read())
# save all of the transfer data
for k, v in iteritems(info):
result[k] = v
return result
def run_command_in_guest(content, vm, username, password, program_path, program_args, program_cwd, program_env):
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if (tools_status == 'toolsNotInstalled' or
tools_status == 'toolsNotRunning'):
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
try:
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/ProcessManager.rst
pm = content.guestOperationsManager.processManager
# https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/vim.vm.guest.ProcessManager.ProgramSpec.html
ps = vim.vm.guest.ProcessManager.ProgramSpec(
# programPath=program,
# arguments=args
programPath=program_path,
arguments=program_args,
workingDirectory=program_cwd,
)
res = pm.StartProgramInGuest(vm, creds, ps)
result['pid'] = res
pdata = pm.ListProcessesInGuest(vm, creds, [res])
# wait for pid to finish
while not pdata[0].endTime:
time.sleep(1)
pdata = pm.ListProcessesInGuest(vm, creds, [res])
result['owner'] = pdata[0].owner
result['startTime'] = pdata[0].startTime.isoformat()
result['endTime'] = pdata[0].endTime.isoformat()
result['exitCode'] = pdata[0].exitCode
if result['exitCode'] != 0:
result['failed'] = True
result['msg'] = "program exited non-zero"
else:
result['msg'] = "program completed successfully"
except Exception as e:
result['msg'] = str(e)
result['failed'] = True
return result
def serialize_spec(clonespec):
"""Serialize a clonespec or a relocation spec"""
data = {}
attrs = dir(clonespec)
attrs = [x for x in attrs if not x.startswith('_')]
for x in attrs:
xo = getattr(clonespec, x)
if callable(xo):
continue
xt = type(xo)
if xo is None:
data[x] = None
elif isinstance(xo, vim.vm.ConfigSpec):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.RelocateSpec):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.device.VirtualDisk):
data[x] = serialize_spec(xo)
elif isinstance(xo, vim.vm.device.VirtualDeviceSpec.FileOperation):
data[x] = to_text(xo)
elif isinstance(xo, vim.Description):
data[x] = {
'dynamicProperty': serialize_spec(xo.dynamicProperty),
'dynamicType': serialize_spec(xo.dynamicType),
'label': serialize_spec(xo.label),
'summary': serialize_spec(xo.summary),
}
elif hasattr(xo, 'name'):
data[x] = to_text(xo) + ':' + to_text(xo.name)
elif isinstance(xo, vim.vm.ProfileSpec):
pass
elif issubclass(xt, list):
data[x] = []
for xe in xo:
data[x].append(serialize_spec(xe))
elif issubclass(xt, string_types + integer_types + (float, bool)):
if issubclass(xt, integer_types):
data[x] = int(xo)
else:
data[x] = to_text(xo)
elif issubclass(xt, bool):
data[x] = xo
elif issubclass(xt, dict):
data[to_text(x)] = {}
for k, v in xo.items():
k = to_text(k)
data[x][k] = serialize_spec(v)
else:
data[x] = str(xt)
return data
def find_host_by_cluster_datacenter(module, content, datacenter_name, cluster_name, host_name):
dc = find_datacenter_by_name(content, datacenter_name)
if dc is None:
module.fail_json(msg="Unable to find datacenter with name %s" % datacenter_name)
cluster = find_cluster_by_name(content, cluster_name, datacenter=dc)
if cluster is None:
module.fail_json(msg="Unable to find cluster with name %s" % cluster_name)
for host in cluster.host:
if host.name == host_name:
return host, cluster
return None, cluster
def set_vm_power_state(content, vm, state, force):
"""
Set the power status for a VM determined by the current and
requested states. force is forceful
"""
facts = gather_vm_facts(content, vm)
expected_state = state.replace('_', '').replace('-', '').lower()
current_state = facts['hw_power_status'].lower()
result = dict(
changed=False,
failed=False,
)
# Need Force
if not force and current_state not in ['poweredon', 'poweredoff']:
result['failed'] = True
result['msg'] = "Virtual Machine is in %s power state. Force is required!" % current_state
return result
# State is not already true
if current_state != expected_state:
task = None
try:
if expected_state == 'poweredoff':
task = vm.PowerOff()
elif expected_state == 'poweredon':
task = vm.PowerOn()
elif expected_state == 'restarted':
if current_state in ('poweredon', 'poweringon', 'resetting', 'poweredoff'):
task = vm.Reset()
else:
result['failed'] = True
result['msg'] = "Cannot restart virtual machine in the current state %s" % current_state
elif expected_state == 'suspended':
if current_state in ('poweredon', 'poweringon'):
task = vm.Suspend()
else:
result['failed'] = True
result['msg'] = 'Cannot suspend virtual machine in the current state %s' % current_state
elif expected_state in ['shutdownguest', 'rebootguest']:
if current_state == 'poweredon':
if vm.guest.toolsRunningStatus == 'guestToolsRunning':
if expected_state == 'shutdownguest':
task = vm.ShutdownGuest()
else:
task = vm.RebootGuest()
# Set result['changed'] immediately because
# shutdown and reboot return None.
result['changed'] = True
else:
result['failed'] = True
result['msg'] = "VMware tools should be installed for guest shutdown/reboot"
else:
result['failed'] = True
result['msg'] = "Virtual machine %s must be in poweredon state for guest shutdown/reboot" % vm.name
else:
result['failed'] = True
result['msg'] = "Unsupported expected state provided: %s" % expected_state
except Exception as e:
result['failed'] = True
result['msg'] = to_text(e)
if task:
wait_for_task(task)
if task.info.state == 'error':
result['failed'] = True
result['msg'] = task.info.error.msg
else:
result['changed'] = True
# need to get new metadata if changed
if result['changed']:
result['instance'] = gather_vm_facts(content, vm)
return result
class PyVmomi(object):
def __init__(self, module):
if not HAS_PYVMOMI:
module.fail_json(msg='PyVmomi Python module required. Install using "pip install PyVmomi"')
self.module = module
self.params = module.params
self.si = None
self.current_vm_obj = None
self.content = connect_to_api(self.module)
def get_vm(self):
vm = None
match_first = (self.params['name_match'] == 'first')
if self.params['uuid']:
vm = find_vm_by_id(self.content, vm_id=self.params['uuid'], vm_id_type="uuid")
elif self.params['folder'] and self.params['name']:
vm = find_vm_by_id(self.content, vm_id=self.params['name'], vm_id_type="inventory_path",
folder=self.params['folder'], match_first=match_first)
if vm:
self.current_vm_obj = vm
return vm
def gather_facts(self, vm):
return gather_vm_facts(self.content, vm)
|
gpl-3.0
|
ktosiek/spacewalk
|
backend/server/test/unit-test/rhnSQL/test_rhnServerGroup.py
|
3
|
2625
|
#!/usr/bin/python
#
# Copyright (c) 2008--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
#
#
import os
import sys
import unittest
from spacewalk.server import rhnSQL
sys.path.insert(
0,
os.path.abspath(os.path.dirname(os.path.abspath(__file__) + "/../../../attic/"))
)
import rhnServerGroup
import misc_functions
DB_SETTINGS = misc_functions.db_settings("oracle")
class Tests(unittest.TestCase):
def setUp(self):
rhnSQL.initDB(
backend = "oracle",
username = DB_SETTINGS["user"],
password = DB_SETTINGS["password"],
database = DB_SETTINGS["database"]
)
rhnSQL.clear_log_id()
def tearDown(self):
# Roll back any unsaved data
rhnSQL.rollback()
def test_new_org_1(self):
org_id = misc_functions.create_new_org()
h = rhnSQL.prepare("select id from web_customer where id = :id")
h.execute(id=org_id)
row = h.fetchone_dict()
self.assertNotEqual(row, None)
self.assertEqual(row['id'], org_id)
def test_new_server_group_new_org_1(self):
org_id = misc_functions.create_new_org()
params = misc_functions.build_server_group_params(org_id=org_id)
misc_functions.create_server_group(params)
s = misc_functions.fetch_server_group(params['org_id'], params['name'])
self.assertEqual(s.get_name(), params['name'])
self.assertEqual(s.get_description(), params['description'])
self.assertEqual(s.get_max_members(), params['max_members'])
def test_exception_user_missing_1(self):
params = misc_functions.build_server_group_params(org_id="no such user")
self.assertRaises(rhnServerGroup.InvalidUserError,
misc_functions.create_server_group, params)
def test_exception_org_missing_1(self):
params = misc_functions.build_server_group_params(org_id=-1)
self.assertRaises(rhnServerGroup.InvalidOrgError,
misc_functions.create_server_group, params)
if __name__ == '__main__':
sys.exit(unittest.main() or 0)
|
gpl-2.0
|
evro/CouchPotatoServer
|
libs/subliminal/language.py
|
107
|
54658
|
# -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <[email protected]>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from .utils import to_unicode
import re
import logging
logger = logging.getLogger(__name__)
COUNTRIES = [('AF', 'AFG', '004', u'Afghanistan'),
('AX', 'ALA', '248', u'Åland Islands'),
('AL', 'ALB', '008', u'Albania'),
('DZ', 'DZA', '012', u'Algeria'),
('AS', 'ASM', '016', u'American Samoa'),
('AD', 'AND', '020', u'Andorra'),
('AO', 'AGO', '024', u'Angola'),
('AI', 'AIA', '660', u'Anguilla'),
('AQ', 'ATA', '010', u'Antarctica'),
('AG', 'ATG', '028', u'Antigua and Barbuda'),
('AR', 'ARG', '032', u'Argentina'),
('AM', 'ARM', '051', u'Armenia'),
('AW', 'ABW', '533', u'Aruba'),
('AU', 'AUS', '036', u'Australia'),
('AT', 'AUT', '040', u'Austria'),
('AZ', 'AZE', '031', u'Azerbaijan'),
('BS', 'BHS', '044', u'Bahamas'),
('BH', 'BHR', '048', u'Bahrain'),
('BD', 'BGD', '050', u'Bangladesh'),
('BB', 'BRB', '052', u'Barbados'),
('BY', 'BLR', '112', u'Belarus'),
('BE', 'BEL', '056', u'Belgium'),
('BZ', 'BLZ', '084', u'Belize'),
('BJ', 'BEN', '204', u'Benin'),
('BM', 'BMU', '060', u'Bermuda'),
('BT', 'BTN', '064', u'Bhutan'),
('BO', 'BOL', '068', u'Bolivia, Plurinational State of'),
('BQ', 'BES', '535', u'Bonaire, Sint Eustatius and Saba'),
('BA', 'BIH', '070', u'Bosnia and Herzegovina'),
('BW', 'BWA', '072', u'Botswana'),
('BV', 'BVT', '074', u'Bouvet Island'),
('BR', 'BRA', '076', u'Brazil'),
('IO', 'IOT', '086', u'British Indian Ocean Territory'),
('BN', 'BRN', '096', u'Brunei Darussalam'),
('BG', 'BGR', '100', u'Bulgaria'),
('BF', 'BFA', '854', u'Burkina Faso'),
('BI', 'BDI', '108', u'Burundi'),
('KH', 'KHM', '116', u'Cambodia'),
('CM', 'CMR', '120', u'Cameroon'),
('CA', 'CAN', '124', u'Canada'),
('CV', 'CPV', '132', u'Cape Verde'),
('KY', 'CYM', '136', u'Cayman Islands'),
('CF', 'CAF', '140', u'Central African Republic'),
('TD', 'TCD', '148', u'Chad'),
('CL', 'CHL', '152', u'Chile'),
('CN', 'CHN', '156', u'China'),
('CX', 'CXR', '162', u'Christmas Island'),
('CC', 'CCK', '166', u'Cocos (Keeling) Islands'),
('CO', 'COL', '170', u'Colombia'),
('KM', 'COM', '174', u'Comoros'),
('CG', 'COG', '178', u'Congo'),
('CD', 'COD', '180', u'Congo, The Democratic Republic of the'),
('CK', 'COK', '184', u'Cook Islands'),
('CR', 'CRI', '188', u'Costa Rica'),
('CI', 'CIV', '384', u'Côte d\'Ivoire'),
('HR', 'HRV', '191', u'Croatia'),
('CU', 'CUB', '192', u'Cuba'),
('CW', 'CUW', '531', u'Curaçao'),
('CY', 'CYP', '196', u'Cyprus'),
('CZ', 'CZE', '203', u'Czech Republic'),
('DK', 'DNK', '208', u'Denmark'),
('DJ', 'DJI', '262', u'Djibouti'),
('DM', 'DMA', '212', u'Dominica'),
('DO', 'DOM', '214', u'Dominican Republic'),
('EC', 'ECU', '218', u'Ecuador'),
('EG', 'EGY', '818', u'Egypt'),
('SV', 'SLV', '222', u'El Salvador'),
('GQ', 'GNQ', '226', u'Equatorial Guinea'),
('ER', 'ERI', '232', u'Eritrea'),
('EE', 'EST', '233', u'Estonia'),
('ET', 'ETH', '231', u'Ethiopia'),
('FK', 'FLK', '238', u'Falkland Islands (Malvinas)'),
('FO', 'FRO', '234', u'Faroe Islands'),
('FJ', 'FJI', '242', u'Fiji'),
('FI', 'FIN', '246', u'Finland'),
('FR', 'FRA', '250', u'France'),
('GF', 'GUF', '254', u'French Guiana'),
('PF', 'PYF', '258', u'French Polynesia'),
('TF', 'ATF', '260', u'French Southern Territories'),
('GA', 'GAB', '266', u'Gabon'),
('GM', 'GMB', '270', u'Gambia'),
('GE', 'GEO', '268', u'Georgia'),
('DE', 'DEU', '276', u'Germany'),
('GH', 'GHA', '288', u'Ghana'),
('GI', 'GIB', '292', u'Gibraltar'),
('GR', 'GRC', '300', u'Greece'),
('GL', 'GRL', '304', u'Greenland'),
('GD', 'GRD', '308', u'Grenada'),
('GP', 'GLP', '312', u'Guadeloupe'),
('GU', 'GUM', '316', u'Guam'),
('GT', 'GTM', '320', u'Guatemala'),
('GG', 'GGY', '831', u'Guernsey'),
('GN', 'GIN', '324', u'Guinea'),
('GW', 'GNB', '624', u'Guinea-Bissau'),
('GY', 'GUY', '328', u'Guyana'),
('HT', 'HTI', '332', u'Haiti'),
('HM', 'HMD', '334', u'Heard Island and McDonald Islands'),
('VA', 'VAT', '336', u'Holy See (Vatican City State)'),
('HN', 'HND', '340', u'Honduras'),
('HK', 'HKG', '344', u'Hong Kong'),
('HU', 'HUN', '348', u'Hungary'),
('IS', 'ISL', '352', u'Iceland'),
('IN', 'IND', '356', u'India'),
('ID', 'IDN', '360', u'Indonesia'),
('IR', 'IRN', '364', u'Iran, Islamic Republic of'),
('IQ', 'IRQ', '368', u'Iraq'),
('IE', 'IRL', '372', u'Ireland'),
('IM', 'IMN', '833', u'Isle of Man'),
('IL', 'ISR', '376', u'Israel'),
('IT', 'ITA', '380', u'Italy'),
('JM', 'JAM', '388', u'Jamaica'),
('JP', 'JPN', '392', u'Japan'),
('JE', 'JEY', '832', u'Jersey'),
('JO', 'JOR', '400', u'Jordan'),
('KZ', 'KAZ', '398', u'Kazakhstan'),
('KE', 'KEN', '404', u'Kenya'),
('KI', 'KIR', '296', u'Kiribati'),
('KP', 'PRK', '408', u'Korea, Democratic People\'s Republic of'),
('KR', 'KOR', '410', u'Korea, Republic of'),
('KW', 'KWT', '414', u'Kuwait'),
('KG', 'KGZ', '417', u'Kyrgyzstan'),
('LA', 'LAO', '418', u'Lao People\'s Democratic Republic'),
('LV', 'LVA', '428', u'Latvia'),
('LB', 'LBN', '422', u'Lebanon'),
('LS', 'LSO', '426', u'Lesotho'),
('LR', 'LBR', '430', u'Liberia'),
('LY', 'LBY', '434', u'Libya'),
('LI', 'LIE', '438', u'Liechtenstein'),
('LT', 'LTU', '440', u'Lithuania'),
('LU', 'LUX', '442', u'Luxembourg'),
('MO', 'MAC', '446', u'Macao'),
('MK', 'MKD', '807', u'Macedonia, Republic of'),
('MG', 'MDG', '450', u'Madagascar'),
('MW', 'MWI', '454', u'Malawi'),
('MY', 'MYS', '458', u'Malaysia'),
('MV', 'MDV', '462', u'Maldives'),
('ML', 'MLI', '466', u'Mali'),
('MT', 'MLT', '470', u'Malta'),
('MH', 'MHL', '584', u'Marshall Islands'),
('MQ', 'MTQ', '474', u'Martinique'),
('MR', 'MRT', '478', u'Mauritania'),
('MU', 'MUS', '480', u'Mauritius'),
('YT', 'MYT', '175', u'Mayotte'),
('MX', 'MEX', '484', u'Mexico'),
('FM', 'FSM', '583', u'Micronesia, Federated States of'),
('MD', 'MDA', '498', u'Moldova, Republic of'),
('MC', 'MCO', '492', u'Monaco'),
('MN', 'MNG', '496', u'Mongolia'),
('ME', 'MNE', '499', u'Montenegro'),
('MS', 'MSR', '500', u'Montserrat'),
('MA', 'MAR', '504', u'Morocco'),
('MZ', 'MOZ', '508', u'Mozambique'),
('MM', 'MMR', '104', u'Myanmar'),
('NA', 'NAM', '516', u'Namibia'),
('NR', 'NRU', '520', u'Nauru'),
('NP', 'NPL', '524', u'Nepal'),
('NL', 'NLD', '528', u'Netherlands'),
('NC', 'NCL', '540', u'New Caledonia'),
('NZ', 'NZL', '554', u'New Zealand'),
('NI', 'NIC', '558', u'Nicaragua'),
('NE', 'NER', '562', u'Niger'),
('NG', 'NGA', '566', u'Nigeria'),
('NU', 'NIU', '570', u'Niue'),
('NF', 'NFK', '574', u'Norfolk Island'),
('MP', 'MNP', '580', u'Northern Mariana Islands'),
('NO', 'NOR', '578', u'Norway'),
('OM', 'OMN', '512', u'Oman'),
('PK', 'PAK', '586', u'Pakistan'),
('PW', 'PLW', '585', u'Palau'),
('PS', 'PSE', '275', u'Palestinian Territory, Occupied'),
('PA', 'PAN', '591', u'Panama'),
('PG', 'PNG', '598', u'Papua New Guinea'),
('PY', 'PRY', '600', u'Paraguay'),
('PE', 'PER', '604', u'Peru'),
('PH', 'PHL', '608', u'Philippines'),
('PN', 'PCN', '612', u'Pitcairn'),
('PL', 'POL', '616', u'Poland'),
('PT', 'PRT', '620', u'Portugal'),
('PR', 'PRI', '630', u'Puerto Rico'),
('QA', 'QAT', '634', u'Qatar'),
('RE', 'REU', '638', u'Réunion'),
('RO', 'ROU', '642', u'Romania'),
('RU', 'RUS', '643', u'Russian Federation'),
('RW', 'RWA', '646', u'Rwanda'),
('BL', 'BLM', '652', u'Saint Barthélemy'),
('SH', 'SHN', '654', u'Saint Helena, Ascension and Tristan da Cunha'),
('KN', 'KNA', '659', u'Saint Kitts and Nevis'),
('LC', 'LCA', '662', u'Saint Lucia'),
('MF', 'MAF', '663', u'Saint Martin (French part)'),
('PM', 'SPM', '666', u'Saint Pierre and Miquelon'),
('VC', 'VCT', '670', u'Saint Vincent and the Grenadines'),
('WS', 'WSM', '882', u'Samoa'),
('SM', 'SMR', '674', u'San Marino'),
('ST', 'STP', '678', u'Sao Tome and Principe'),
('SA', 'SAU', '682', u'Saudi Arabia'),
('SN', 'SEN', '686', u'Senegal'),
('RS', 'SRB', '688', u'Serbia'),
('SC', 'SYC', '690', u'Seychelles'),
('SL', 'SLE', '694', u'Sierra Leone'),
('SG', 'SGP', '702', u'Singapore'),
('SX', 'SXM', '534', u'Sint Maarten (Dutch part)'),
('SK', 'SVK', '703', u'Slovakia'),
('SI', 'SVN', '705', u'Slovenia'),
('SB', 'SLB', '090', u'Solomon Islands'),
('SO', 'SOM', '706', u'Somalia'),
('ZA', 'ZAF', '710', u'South Africa'),
('GS', 'SGS', '239', u'South Georgia and the South Sandwich Islands'),
('ES', 'ESP', '724', u'Spain'),
('LK', 'LKA', '144', u'Sri Lanka'),
('SD', 'SDN', '729', u'Sudan'),
('SR', 'SUR', '740', u'Suriname'),
('SS', 'SSD', '728', u'South Sudan'),
('SJ', 'SJM', '744', u'Svalbard and Jan Mayen'),
('SZ', 'SWZ', '748', u'Swaziland'),
('SE', 'SWE', '752', u'Sweden'),
('CH', 'CHE', '756', u'Switzerland'),
('SY', 'SYR', '760', u'Syrian Arab Republic'),
('TW', 'TWN', '158', u'Taiwan, Province of China'),
('TJ', 'TJK', '762', u'Tajikistan'),
('TZ', 'TZA', '834', u'Tanzania, United Republic of'),
('TH', 'THA', '764', u'Thailand'),
('TL', 'TLS', '626', u'Timor-Leste'),
('TG', 'TGO', '768', u'Togo'),
('TK', 'TKL', '772', u'Tokelau'),
('TO', 'TON', '776', u'Tonga'),
('TT', 'TTO', '780', u'Trinidad and Tobago'),
('TN', 'TUN', '788', u'Tunisia'),
('TR', 'TUR', '792', u'Turkey'),
('TM', 'TKM', '795', u'Turkmenistan'),
('TC', 'TCA', '796', u'Turks and Caicos Islands'),
('TV', 'TUV', '798', u'Tuvalu'),
('UG', 'UGA', '800', u'Uganda'),
('UA', 'UKR', '804', u'Ukraine'),
('AE', 'ARE', '784', u'United Arab Emirates'),
('GB', 'GBR', '826', u'United Kingdom'),
('US', 'USA', '840', u'United States'),
('UM', 'UMI', '581', u'United States Minor Outlying Islands'),
('UY', 'URY', '858', u'Uruguay'),
('UZ', 'UZB', '860', u'Uzbekistan'),
('VU', 'VUT', '548', u'Vanuatu'),
('VE', 'VEN', '862', u'Venezuela, Bolivarian Republic of'),
('VN', 'VNM', '704', u'Viet Nam'),
('VG', 'VGB', '092', u'Virgin Islands, British'),
('VI', 'VIR', '850', u'Virgin Islands, U.S.'),
('WF', 'WLF', '876', u'Wallis and Futuna'),
('EH', 'ESH', '732', u'Western Sahara'),
('YE', 'YEM', '887', u'Yemen'),
('ZM', 'ZMB', '894', u'Zambia'),
('ZW', 'ZWE', '716', u'Zimbabwe')]
LANGUAGES = [('aar', '', 'aa', u'Afar', u'afar'),
('abk', '', 'ab', u'Abkhazian', u'abkhaze'),
('ace', '', '', u'Achinese', u'aceh'),
('ach', '', '', u'Acoli', u'acoli'),
('ada', '', '', u'Adangme', u'adangme'),
('ady', '', '', u'Adyghe; Adygei', u'adyghé'),
('afa', '', '', u'Afro-Asiatic languages', u'afro-asiatiques, langues'),
('afh', '', '', u'Afrihili', u'afrihili'),
('afr', '', 'af', u'Afrikaans', u'afrikaans'),
('ain', '', '', u'Ainu', u'aïnou'),
('aka', '', 'ak', u'Akan', u'akan'),
('akk', '', '', u'Akkadian', u'akkadien'),
('alb', 'sqi', 'sq', u'Albanian', u'albanais'),
('ale', '', '', u'Aleut', u'aléoute'),
('alg', '', '', u'Algonquian languages', u'algonquines, langues'),
('alt', '', '', u'Southern Altai', u'altai du Sud'),
('amh', '', 'am', u'Amharic', u'amharique'),
('ang', '', '', u'English, Old (ca.450-1100)', u'anglo-saxon (ca.450-1100)'),
('anp', '', '', u'Angika', u'angika'),
('apa', '', '', u'Apache languages', u'apaches, langues'),
('ara', '', 'ar', u'Arabic', u'arabe'),
('arc', '', '', u'Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)', u'araméen d\'empire (700-300 BCE)'),
('arg', '', 'an', u'Aragonese', u'aragonais'),
('arm', 'hye', 'hy', u'Armenian', u'arménien'),
('arn', '', '', u'Mapudungun; Mapuche', u'mapudungun; mapuche; mapuce'),
('arp', '', '', u'Arapaho', u'arapaho'),
('art', '', '', u'Artificial languages', u'artificielles, langues'),
('arw', '', '', u'Arawak', u'arawak'),
('asm', '', 'as', u'Assamese', u'assamais'),
('ast', '', '', u'Asturian; Bable; Leonese; Asturleonese', u'asturien; bable; léonais; asturoléonais'),
('ath', '', '', u'Athapascan languages', u'athapascanes, langues'),
('aus', '', '', u'Australian languages', u'australiennes, langues'),
('ava', '', 'av', u'Avaric', u'avar'),
('ave', '', 'ae', u'Avestan', u'avestique'),
('awa', '', '', u'Awadhi', u'awadhi'),
('aym', '', 'ay', u'Aymara', u'aymara'),
('aze', '', 'az', u'Azerbaijani', u'azéri'),
('bad', '', '', u'Banda languages', u'banda, langues'),
('bai', '', '', u'Bamileke languages', u'bamiléké, langues'),
('bak', '', 'ba', u'Bashkir', u'bachkir'),
('bal', '', '', u'Baluchi', u'baloutchi'),
('bam', '', 'bm', u'Bambara', u'bambara'),
('ban', '', '', u'Balinese', u'balinais'),
('baq', 'eus', 'eu', u'Basque', u'basque'),
('bas', '', '', u'Basa', u'basa'),
('bat', '', '', u'Baltic languages', u'baltes, langues'),
('bej', '', '', u'Beja; Bedawiyet', u'bedja'),
('bel', '', 'be', u'Belarusian', u'biélorusse'),
('bem', '', '', u'Bemba', u'bemba'),
('ben', '', 'bn', u'Bengali', u'bengali'),
('ber', '', '', u'Berber languages', u'berbères, langues'),
('bho', '', '', u'Bhojpuri', u'bhojpuri'),
('bih', '', 'bh', u'Bihari languages', u'langues biharis'),
('bik', '', '', u'Bikol', u'bikol'),
('bin', '', '', u'Bini; Edo', u'bini; edo'),
('bis', '', 'bi', u'Bislama', u'bichlamar'),
('bla', '', '', u'Siksika', u'blackfoot'),
('bnt', '', '', u'Bantu (Other)', u'bantoues, autres langues'),
('bos', '', 'bs', u'Bosnian', u'bosniaque'),
('bra', '', '', u'Braj', u'braj'),
('bre', '', 'br', u'Breton', u'breton'),
('btk', '', '', u'Batak languages', u'batak, langues'),
('bua', '', '', u'Buriat', u'bouriate'),
('bug', '', '', u'Buginese', u'bugi'),
('bul', '', 'bg', u'Bulgarian', u'bulgare'),
('bur', 'mya', 'my', u'Burmese', u'birman'),
('byn', '', '', u'Blin; Bilin', u'blin; bilen'),
('cad', '', '', u'Caddo', u'caddo'),
('cai', '', '', u'Central American Indian languages', u'amérindiennes de L\'Amérique centrale, langues'),
('car', '', '', u'Galibi Carib', u'karib; galibi; carib'),
('cat', '', 'ca', u'Catalan; Valencian', u'catalan; valencien'),
('cau', '', '', u'Caucasian languages', u'caucasiennes, langues'),
('ceb', '', '', u'Cebuano', u'cebuano'),
('cel', '', '', u'Celtic languages', u'celtiques, langues; celtes, langues'),
('cha', '', 'ch', u'Chamorro', u'chamorro'),
('chb', '', '', u'Chibcha', u'chibcha'),
('che', '', 'ce', u'Chechen', u'tchétchène'),
('chg', '', '', u'Chagatai', u'djaghataï'),
('chi', 'zho', 'zh', u'Chinese', u'chinois'),
('chk', '', '', u'Chuukese', u'chuuk'),
('chm', '', '', u'Mari', u'mari'),
('chn', '', '', u'Chinook jargon', u'chinook, jargon'),
('cho', '', '', u'Choctaw', u'choctaw'),
('chp', '', '', u'Chipewyan; Dene Suline', u'chipewyan'),
('chr', '', '', u'Cherokee', u'cherokee'),
('chu', '', 'cu', u'Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic', u'slavon d\'église; vieux slave; slavon liturgique; vieux bulgare'),
('chv', '', 'cv', u'Chuvash', u'tchouvache'),
('chy', '', '', u'Cheyenne', u'cheyenne'),
('cmc', '', '', u'Chamic languages', u'chames, langues'),
('cop', '', '', u'Coptic', u'copte'),
('cor', '', 'kw', u'Cornish', u'cornique'),
('cos', '', 'co', u'Corsican', u'corse'),
('cpe', '', '', u'Creoles and pidgins, English based', u'créoles et pidgins basés sur l\'anglais'),
('cpf', '', '', u'Creoles and pidgins, French-based ', u'créoles et pidgins basés sur le français'),
('cpp', '', '', u'Creoles and pidgins, Portuguese-based ', u'créoles et pidgins basés sur le portugais'),
('cre', '', 'cr', u'Cree', u'cree'),
('crh', '', '', u'Crimean Tatar; Crimean Turkish', u'tatar de Crimé'),
('crp', '', '', u'Creoles and pidgins ', u'créoles et pidgins'),
('csb', '', '', u'Kashubian', u'kachoube'),
('cus', '', '', u'Cushitic languages', u'couchitiques, langues'),
('cze', 'ces', 'cs', u'Czech', u'tchèque'),
('dak', '', '', u'Dakota', u'dakota'),
('dan', '', 'da', u'Danish', u'danois'),
('dar', '', '', u'Dargwa', u'dargwa'),
('day', '', '', u'Land Dayak languages', u'dayak, langues'),
('del', '', '', u'Delaware', u'delaware'),
('den', '', '', u'Slave (Athapascan)', u'esclave (athapascan)'),
('dgr', '', '', u'Dogrib', u'dogrib'),
('din', '', '', u'Dinka', u'dinka'),
('div', '', 'dv', u'Divehi; Dhivehi; Maldivian', u'maldivien'),
('doi', '', '', u'Dogri', u'dogri'),
('dra', '', '', u'Dravidian languages', u'dravidiennes, langues'),
('dsb', '', '', u'Lower Sorbian', u'bas-sorabe'),
('dua', '', '', u'Duala', u'douala'),
('dum', '', '', u'Dutch, Middle (ca.1050-1350)', u'néerlandais moyen (ca. 1050-1350)'),
('dut', 'nld', 'nl', u'Dutch; Flemish', u'néerlandais; flamand'),
('dyu', '', '', u'Dyula', u'dioula'),
('dzo', '', 'dz', u'Dzongkha', u'dzongkha'),
('efi', '', '', u'Efik', u'efik'),
('egy', '', '', u'Egyptian (Ancient)', u'égyptien'),
('eka', '', '', u'Ekajuk', u'ekajuk'),
('elx', '', '', u'Elamite', u'élamite'),
('eng', '', 'en', u'English', u'anglais'),
('enm', '', '', u'English, Middle (1100-1500)', u'anglais moyen (1100-1500)'),
('epo', '', 'eo', u'Esperanto', u'espéranto'),
('est', '', 'et', u'Estonian', u'estonien'),
('ewe', '', 'ee', u'Ewe', u'éwé'),
('ewo', '', '', u'Ewondo', u'éwondo'),
('fan', '', '', u'Fang', u'fang'),
('fao', '', 'fo', u'Faroese', u'féroïen'),
('fat', '', '', u'Fanti', u'fanti'),
('fij', '', 'fj', u'Fijian', u'fidjien'),
('fil', '', '', u'Filipino; Pilipino', u'filipino; pilipino'),
('fin', '', 'fi', u'Finnish', u'finnois'),
('fiu', '', '', u'Finno-Ugrian languages', u'finno-ougriennes, langues'),
('fon', '', '', u'Fon', u'fon'),
('fre', 'fra', 'fr', u'French', u'français'),
('frm', '', '', u'French, Middle (ca.1400-1600)', u'français moyen (1400-1600)'),
('fro', '', '', u'French, Old (842-ca.1400)', u'français ancien (842-ca.1400)'),
('frr', '', '', u'Northern Frisian', u'frison septentrional'),
('frs', '', '', u'Eastern Frisian', u'frison oriental'),
('fry', '', 'fy', u'Western Frisian', u'frison occidental'),
('ful', '', 'ff', u'Fulah', u'peul'),
('fur', '', '', u'Friulian', u'frioulan'),
('gaa', '', '', u'Ga', u'ga'),
('gay', '', '', u'Gayo', u'gayo'),
('gba', '', '', u'Gbaya', u'gbaya'),
('gem', '', '', u'Germanic languages', u'germaniques, langues'),
('geo', 'kat', 'ka', u'Georgian', u'géorgien'),
('ger', 'deu', 'de', u'German', u'allemand'),
('gez', '', '', u'Geez', u'guèze'),
('gil', '', '', u'Gilbertese', u'kiribati'),
('gla', '', 'gd', u'Gaelic; Scottish Gaelic', u'gaélique; gaélique écossais'),
('gle', '', 'ga', u'Irish', u'irlandais'),
('glg', '', 'gl', u'Galician', u'galicien'),
('glv', '', 'gv', u'Manx', u'manx; mannois'),
('gmh', '', '', u'German, Middle High (ca.1050-1500)', u'allemand, moyen haut (ca. 1050-1500)'),
('goh', '', '', u'German, Old High (ca.750-1050)', u'allemand, vieux haut (ca. 750-1050)'),
('gon', '', '', u'Gondi', u'gond'),
('gor', '', '', u'Gorontalo', u'gorontalo'),
('got', '', '', u'Gothic', u'gothique'),
('grb', '', '', u'Grebo', u'grebo'),
('grc', '', '', u'Greek, Ancient (to 1453)', u'grec ancien (jusqu\'à 1453)'),
('gre', 'ell', 'el', u'Greek, Modern (1453-)', u'grec moderne (après 1453)'),
('grn', '', 'gn', u'Guarani', u'guarani'),
('gsw', '', '', u'Swiss German; Alemannic; Alsatian', u'suisse alémanique; alémanique; alsacien'),
('guj', '', 'gu', u'Gujarati', u'goudjrati'),
('gwi', '', '', u'Gwich\'in', u'gwich\'in'),
('hai', '', '', u'Haida', u'haida'),
('hat', '', 'ht', u'Haitian; Haitian Creole', u'haïtien; créole haïtien'),
('hau', '', 'ha', u'Hausa', u'haoussa'),
('haw', '', '', u'Hawaiian', u'hawaïen'),
('heb', '', 'he', u'Hebrew', u'hébreu'),
('her', '', 'hz', u'Herero', u'herero'),
('hil', '', '', u'Hiligaynon', u'hiligaynon'),
('him', '', '', u'Himachali languages; Western Pahari languages', u'langues himachalis; langues paharis occidentales'),
('hin', '', 'hi', u'Hindi', u'hindi'),
('hit', '', '', u'Hittite', u'hittite'),
('hmn', '', '', u'Hmong; Mong', u'hmong'),
('hmo', '', 'ho', u'Hiri Motu', u'hiri motu'),
('hrv', '', 'hr', u'Croatian', u'croate'),
('hsb', '', '', u'Upper Sorbian', u'haut-sorabe'),
('hun', '', 'hu', u'Hungarian', u'hongrois'),
('hup', '', '', u'Hupa', u'hupa'),
('iba', '', '', u'Iban', u'iban'),
('ibo', '', 'ig', u'Igbo', u'igbo'),
('ice', 'isl', 'is', u'Icelandic', u'islandais'),
('ido', '', 'io', u'Ido', u'ido'),
('iii', '', 'ii', u'Sichuan Yi; Nuosu', u'yi de Sichuan'),
('ijo', '', '', u'Ijo languages', u'ijo, langues'),
('iku', '', 'iu', u'Inuktitut', u'inuktitut'),
('ile', '', 'ie', u'Interlingue; Occidental', u'interlingue'),
('ilo', '', '', u'Iloko', u'ilocano'),
('ina', '', 'ia', u'Interlingua (International Auxiliary Language Association)', u'interlingua (langue auxiliaire internationale)'),
('inc', '', '', u'Indic languages', u'indo-aryennes, langues'),
('ind', '', 'id', u'Indonesian', u'indonésien'),
('ine', '', '', u'Indo-European languages', u'indo-européennes, langues'),
('inh', '', '', u'Ingush', u'ingouche'),
('ipk', '', 'ik', u'Inupiaq', u'inupiaq'),
('ira', '', '', u'Iranian languages', u'iraniennes, langues'),
('iro', '', '', u'Iroquoian languages', u'iroquoises, langues'),
('ita', '', 'it', u'Italian', u'italien'),
('jav', '', 'jv', u'Javanese', u'javanais'),
('jbo', '', '', u'Lojban', u'lojban'),
('jpn', '', 'ja', u'Japanese', u'japonais'),
('jpr', '', '', u'Judeo-Persian', u'judéo-persan'),
('jrb', '', '', u'Judeo-Arabic', u'judéo-arabe'),
('kaa', '', '', u'Kara-Kalpak', u'karakalpak'),
('kab', '', '', u'Kabyle', u'kabyle'),
('kac', '', '', u'Kachin; Jingpho', u'kachin; jingpho'),
('kal', '', 'kl', u'Kalaallisut; Greenlandic', u'groenlandais'),
('kam', '', '', u'Kamba', u'kamba'),
('kan', '', 'kn', u'Kannada', u'kannada'),
('kar', '', '', u'Karen languages', u'karen, langues'),
('kas', '', 'ks', u'Kashmiri', u'kashmiri'),
('kau', '', 'kr', u'Kanuri', u'kanouri'),
('kaw', '', '', u'Kawi', u'kawi'),
('kaz', '', 'kk', u'Kazakh', u'kazakh'),
('kbd', '', '', u'Kabardian', u'kabardien'),
('kha', '', '', u'Khasi', u'khasi'),
('khi', '', '', u'Khoisan languages', u'khoïsan, langues'),
('khm', '', 'km', u'Central Khmer', u'khmer central'),
('kho', '', '', u'Khotanese; Sakan', u'khotanais; sakan'),
('kik', '', 'ki', u'Kikuyu; Gikuyu', u'kikuyu'),
('kin', '', 'rw', u'Kinyarwanda', u'rwanda'),
('kir', '', 'ky', u'Kirghiz; Kyrgyz', u'kirghiz'),
('kmb', '', '', u'Kimbundu', u'kimbundu'),
('kok', '', '', u'Konkani', u'konkani'),
('kom', '', 'kv', u'Komi', u'kom'),
('kon', '', 'kg', u'Kongo', u'kongo'),
('kor', '', 'ko', u'Korean', u'coréen'),
('kos', '', '', u'Kosraean', u'kosrae'),
('kpe', '', '', u'Kpelle', u'kpellé'),
('krc', '', '', u'Karachay-Balkar', u'karatchai balkar'),
('krl', '', '', u'Karelian', u'carélien'),
('kro', '', '', u'Kru languages', u'krou, langues'),
('kru', '', '', u'Kurukh', u'kurukh'),
('kua', '', 'kj', u'Kuanyama; Kwanyama', u'kuanyama; kwanyama'),
('kum', '', '', u'Kumyk', u'koumyk'),
('kur', '', 'ku', u'Kurdish', u'kurde'),
('kut', '', '', u'Kutenai', u'kutenai'),
('lad', '', '', u'Ladino', u'judéo-espagnol'),
('lah', '', '', u'Lahnda', u'lahnda'),
('lam', '', '', u'Lamba', u'lamba'),
('lao', '', 'lo', u'Lao', u'lao'),
('lat', '', 'la', u'Latin', u'latin'),
('lav', '', 'lv', u'Latvian', u'letton'),
('lez', '', '', u'Lezghian', u'lezghien'),
('lim', '', 'li', u'Limburgan; Limburger; Limburgish', u'limbourgeois'),
('lin', '', 'ln', u'Lingala', u'lingala'),
('lit', '', 'lt', u'Lithuanian', u'lituanien'),
('lol', '', '', u'Mongo', u'mongo'),
('loz', '', '', u'Lozi', u'lozi'),
('ltz', '', 'lb', u'Luxembourgish; Letzeburgesch', u'luxembourgeois'),
('lua', '', '', u'Luba-Lulua', u'luba-lulua'),
('lub', '', 'lu', u'Luba-Katanga', u'luba-katanga'),
('lug', '', 'lg', u'Ganda', u'ganda'),
('lui', '', '', u'Luiseno', u'luiseno'),
('lun', '', '', u'Lunda', u'lunda'),
('luo', '', '', u'Luo (Kenya and Tanzania)', u'luo (Kenya et Tanzanie)'),
('lus', '', '', u'Lushai', u'lushai'),
('mac', 'mkd', 'mk', u'Macedonian', u'macédonien'),
('mad', '', '', u'Madurese', u'madourais'),
('mag', '', '', u'Magahi', u'magahi'),
('mah', '', 'mh', u'Marshallese', u'marshall'),
('mai', '', '', u'Maithili', u'maithili'),
('mak', '', '', u'Makasar', u'makassar'),
('mal', '', 'ml', u'Malayalam', u'malayalam'),
('man', '', '', u'Mandingo', u'mandingue'),
('mao', 'mri', 'mi', u'Maori', u'maori'),
('map', '', '', u'Austronesian languages', u'austronésiennes, langues'),
('mar', '', 'mr', u'Marathi', u'marathe'),
('mas', '', '', u'Masai', u'massaï'),
('may', 'msa', 'ms', u'Malay', u'malais'),
('mdf', '', '', u'Moksha', u'moksa'),
('mdr', '', '', u'Mandar', u'mandar'),
('men', '', '', u'Mende', u'mendé'),
('mga', '', '', u'Irish, Middle (900-1200)', u'irlandais moyen (900-1200)'),
('mic', '', '', u'Mi\'kmaq; Micmac', u'mi\'kmaq; micmac'),
('min', '', '', u'Minangkabau', u'minangkabau'),
('mkh', '', '', u'Mon-Khmer languages', u'môn-khmer, langues'),
('mlg', '', 'mg', u'Malagasy', u'malgache'),
('mlt', '', 'mt', u'Maltese', u'maltais'),
('mnc', '', '', u'Manchu', u'mandchou'),
('mni', '', '', u'Manipuri', u'manipuri'),
('mno', '', '', u'Manobo languages', u'manobo, langues'),
('moh', '', '', u'Mohawk', u'mohawk'),
('mon', '', 'mn', u'Mongolian', u'mongol'),
('mos', '', '', u'Mossi', u'moré'),
('mun', '', '', u'Munda languages', u'mounda, langues'),
('mus', '', '', u'Creek', u'muskogee'),
('mwl', '', '', u'Mirandese', u'mirandais'),
('mwr', '', '', u'Marwari', u'marvari'),
('myn', '', '', u'Mayan languages', u'maya, langues'),
('myv', '', '', u'Erzya', u'erza'),
('nah', '', '', u'Nahuatl languages', u'nahuatl, langues'),
('nai', '', '', u'North American Indian languages', u'nord-amérindiennes, langues'),
('nap', '', '', u'Neapolitan', u'napolitain'),
('nau', '', 'na', u'Nauru', u'nauruan'),
('nav', '', 'nv', u'Navajo; Navaho', u'navaho'),
('nbl', '', 'nr', u'Ndebele, South; South Ndebele', u'ndébélé du Sud'),
('nde', '', 'nd', u'Ndebele, North; North Ndebele', u'ndébélé du Nord'),
('ndo', '', 'ng', u'Ndonga', u'ndonga'),
('nds', '', '', u'Low German; Low Saxon; German, Low; Saxon, Low', u'bas allemand; bas saxon; allemand, bas; saxon, bas'),
('nep', '', 'ne', u'Nepali', u'népalais'),
('new', '', '', u'Nepal Bhasa; Newari', u'nepal bhasa; newari'),
('nia', '', '', u'Nias', u'nias'),
('nic', '', '', u'Niger-Kordofanian languages', u'nigéro-kordofaniennes, langues'),
('niu', '', '', u'Niuean', u'niué'),
('nno', '', 'nn', u'Norwegian Nynorsk; Nynorsk, Norwegian', u'norvégien nynorsk; nynorsk, norvégien'),
('nob', '', 'nb', u'Bokmål, Norwegian; Norwegian Bokmål', u'norvégien bokmål'),
('nog', '', '', u'Nogai', u'nogaï; nogay'),
('non', '', '', u'Norse, Old', u'norrois, vieux'),
('nor', '', 'no', u'Norwegian', u'norvégien'),
('nqo', '', '', u'N\'Ko', u'n\'ko'),
('nso', '', '', u'Pedi; Sepedi; Northern Sotho', u'pedi; sepedi; sotho du Nord'),
('nub', '', '', u'Nubian languages', u'nubiennes, langues'),
('nwc', '', '', u'Classical Newari; Old Newari; Classical Nepal Bhasa', u'newari classique'),
('nya', '', 'ny', u'Chichewa; Chewa; Nyanja', u'chichewa; chewa; nyanja'),
('nym', '', '', u'Nyamwezi', u'nyamwezi'),
('nyn', '', '', u'Nyankole', u'nyankolé'),
('nyo', '', '', u'Nyoro', u'nyoro'),
('nzi', '', '', u'Nzima', u'nzema'),
('oci', '', 'oc', u'Occitan (post 1500); Provençal', u'occitan (après 1500); provençal'),
('oji', '', 'oj', u'Ojibwa', u'ojibwa'),
('ori', '', 'or', u'Oriya', u'oriya'),
('orm', '', 'om', u'Oromo', u'galla'),
('osa', '', '', u'Osage', u'osage'),
('oss', '', 'os', u'Ossetian; Ossetic', u'ossète'),
('ota', '', '', u'Turkish, Ottoman (1500-1928)', u'turc ottoman (1500-1928)'),
('oto', '', '', u'Otomian languages', u'otomi, langues'),
('paa', '', '', u'Papuan languages', u'papoues, langues'),
('pag', '', '', u'Pangasinan', u'pangasinan'),
('pal', '', '', u'Pahlavi', u'pahlavi'),
('pam', '', '', u'Pampanga; Kapampangan', u'pampangan'),
('pan', '', 'pa', u'Panjabi; Punjabi', u'pendjabi'),
('pap', '', '', u'Papiamento', u'papiamento'),
('pau', '', '', u'Palauan', u'palau'),
('peo', '', '', u'Persian, Old (ca.600-400 B.C.)', u'perse, vieux (ca. 600-400 av. J.-C.)'),
('per', 'fas', 'fa', u'Persian', u'persan'),
('phi', '', '', u'Philippine languages', u'philippines, langues'),
('phn', '', '', u'Phoenician', u'phénicien'),
('pli', '', 'pi', u'Pali', u'pali'),
('pol', '', 'pl', u'Polish', u'polonais'),
('pon', '', '', u'Pohnpeian', u'pohnpei'),
('por', '', 'pt', u'Portuguese', u'portugais'),
('pra', '', '', u'Prakrit languages', u'prâkrit, langues'),
('pro', '', '', u'Provençal, Old (to 1500)', u'provençal ancien (jusqu\'à 1500)'),
('pus', '', 'ps', u'Pushto; Pashto', u'pachto'),
('que', '', 'qu', u'Quechua', u'quechua'),
('raj', '', '', u'Rajasthani', u'rajasthani'),
('rap', '', '', u'Rapanui', u'rapanui'),
('rar', '', '', u'Rarotongan; Cook Islands Maori', u'rarotonga; maori des îles Cook'),
('roa', '', '', u'Romance languages', u'romanes, langues'),
('roh', '', 'rm', u'Romansh', u'romanche'),
('rom', '', '', u'Romany', u'tsigane'),
('rum', 'ron', 'ro', u'Romanian; Moldavian; Moldovan', u'roumain; moldave'),
('run', '', 'rn', u'Rundi', u'rundi'),
('rup', '', '', u'Aromanian; Arumanian; Macedo-Romanian', u'aroumain; macédo-roumain'),
('rus', '', 'ru', u'Russian', u'russe'),
('sad', '', '', u'Sandawe', u'sandawe'),
('sag', '', 'sg', u'Sango', u'sango'),
('sah', '', '', u'Yakut', u'iakoute'),
('sai', '', '', u'South American Indian (Other)', u'indiennes d\'Amérique du Sud, autres langues'),
('sal', '', '', u'Salishan languages', u'salishennes, langues'),
('sam', '', '', u'Samaritan Aramaic', u'samaritain'),
('san', '', 'sa', u'Sanskrit', u'sanskrit'),
('sas', '', '', u'Sasak', u'sasak'),
('sat', '', '', u'Santali', u'santal'),
('scn', '', '', u'Sicilian', u'sicilien'),
('sco', '', '', u'Scots', u'écossais'),
('sel', '', '', u'Selkup', u'selkoupe'),
('sem', '', '', u'Semitic languages', u'sémitiques, langues'),
('sga', '', '', u'Irish, Old (to 900)', u'irlandais ancien (jusqu\'à 900)'),
('sgn', '', '', u'Sign Languages', u'langues des signes'),
('shn', '', '', u'Shan', u'chan'),
('sid', '', '', u'Sidamo', u'sidamo'),
('sin', '', 'si', u'Sinhala; Sinhalese', u'singhalais'),
('sio', '', '', u'Siouan languages', u'sioux, langues'),
('sit', '', '', u'Sino-Tibetan languages', u'sino-tibétaines, langues'),
('sla', '', '', u'Slavic languages', u'slaves, langues'),
('slo', 'slk', 'sk', u'Slovak', u'slovaque'),
('slv', '', 'sl', u'Slovenian', u'slovène'),
('sma', '', '', u'Southern Sami', u'sami du Sud'),
('sme', '', 'se', u'Northern Sami', u'sami du Nord'),
('smi', '', '', u'Sami languages', u'sames, langues'),
('smj', '', '', u'Lule Sami', u'sami de Lule'),
('smn', '', '', u'Inari Sami', u'sami d\'Inari'),
('smo', '', 'sm', u'Samoan', u'samoan'),
('sms', '', '', u'Skolt Sami', u'sami skolt'),
('sna', '', 'sn', u'Shona', u'shona'),
('snd', '', 'sd', u'Sindhi', u'sindhi'),
('snk', '', '', u'Soninke', u'soninké'),
('sog', '', '', u'Sogdian', u'sogdien'),
('som', '', 'so', u'Somali', u'somali'),
('son', '', '', u'Songhai languages', u'songhai, langues'),
('sot', '', 'st', u'Sotho, Southern', u'sotho du Sud'),
('spa', '', 'es', u'Spanish; Castilian', u'espagnol; castillan'),
('srd', '', 'sc', u'Sardinian', u'sarde'),
('srn', '', '', u'Sranan Tongo', u'sranan tongo'),
('srp', '', 'sr', u'Serbian', u'serbe'),
('srr', '', '', u'Serer', u'sérère'),
('ssa', '', '', u'Nilo-Saharan languages', u'nilo-sahariennes, langues'),
('ssw', '', 'ss', u'Swati', u'swati'),
('suk', '', '', u'Sukuma', u'sukuma'),
('sun', '', 'su', u'Sundanese', u'soundanais'),
('sus', '', '', u'Susu', u'soussou'),
('sux', '', '', u'Sumerian', u'sumérien'),
('swa', '', 'sw', u'Swahili', u'swahili'),
('swe', '', 'sv', u'Swedish', u'suédois'),
('syc', '', '', u'Classical Syriac', u'syriaque classique'),
('syr', '', '', u'Syriac', u'syriaque'),
('tah', '', 'ty', u'Tahitian', u'tahitien'),
('tai', '', '', u'Tai languages', u'tai, langues'),
('tam', '', 'ta', u'Tamil', u'tamoul'),
('tat', '', 'tt', u'Tatar', u'tatar'),
('tel', '', 'te', u'Telugu', u'télougou'),
('tem', '', '', u'Timne', u'temne'),
('ter', '', '', u'Tereno', u'tereno'),
('tet', '', '', u'Tetum', u'tetum'),
('tgk', '', 'tg', u'Tajik', u'tadjik'),
('tgl', '', 'tl', u'Tagalog', u'tagalog'),
('tha', '', 'th', u'Thai', u'thaï'),
('tib', 'bod', 'bo', u'Tibetan', u'tibétain'),
('tig', '', '', u'Tigre', u'tigré'),
('tir', '', 'ti', u'Tigrinya', u'tigrigna'),
('tiv', '', '', u'Tiv', u'tiv'),
('tkl', '', '', u'Tokelau', u'tokelau'),
('tlh', '', '', u'Klingon; tlhIngan-Hol', u'klingon'),
('tli', '', '', u'Tlingit', u'tlingit'),
('tmh', '', '', u'Tamashek', u'tamacheq'),
('tog', '', '', u'Tonga (Nyasa)', u'tonga (Nyasa)'),
('ton', '', 'to', u'Tonga (Tonga Islands)', u'tongan (Îles Tonga)'),
('tpi', '', '', u'Tok Pisin', u'tok pisin'),
('tsi', '', '', u'Tsimshian', u'tsimshian'),
('tsn', '', 'tn', u'Tswana', u'tswana'),
('tso', '', 'ts', u'Tsonga', u'tsonga'),
('tuk', '', 'tk', u'Turkmen', u'turkmène'),
('tum', '', '', u'Tumbuka', u'tumbuka'),
('tup', '', '', u'Tupi languages', u'tupi, langues'),
('tur', '', 'tr', u'Turkish', u'turc'),
('tut', '', '', u'Altaic languages', u'altaïques, langues'),
('tvl', '', '', u'Tuvalu', u'tuvalu'),
('twi', '', 'tw', u'Twi', u'twi'),
('tyv', '', '', u'Tuvinian', u'touva'),
('udm', '', '', u'Udmurt', u'oudmourte'),
('uga', '', '', u'Ugaritic', u'ougaritique'),
('uig', '', 'ug', u'Uighur; Uyghur', u'ouïgour'),
('ukr', '', 'uk', u'Ukrainian', u'ukrainien'),
('umb', '', '', u'Umbundu', u'umbundu'),
('und', '', '', u'Undetermined', u'indéterminée'),
('urd', '', 'ur', u'Urdu', u'ourdou'),
('uzb', '', 'uz', u'Uzbek', u'ouszbek'),
('vai', '', '', u'Vai', u'vaï'),
('ven', '', 've', u'Venda', u'venda'),
('vie', '', 'vi', u'Vietnamese', u'vietnamien'),
('vol', '', 'vo', u'Volapük', u'volapük'),
('vot', '', '', u'Votic', u'vote'),
('wak', '', '', u'Wakashan languages', u'wakashanes, langues'),
('wal', '', '', u'Walamo', u'walamo'),
('war', '', '', u'Waray', u'waray'),
('was', '', '', u'Washo', u'washo'),
('wel', 'cym', 'cy', u'Welsh', u'gallois'),
('wen', '', '', u'Sorbian languages', u'sorabes, langues'),
('wln', '', 'wa', u'Walloon', u'wallon'),
('wol', '', 'wo', u'Wolof', u'wolof'),
('xal', '', '', u'Kalmyk; Oirat', u'kalmouk; oïrat'),
('xho', '', 'xh', u'Xhosa', u'xhosa'),
('yao', '', '', u'Yao', u'yao'),
('yap', '', '', u'Yapese', u'yapois'),
('yid', '', 'yi', u'Yiddish', u'yiddish'),
('yor', '', 'yo', u'Yoruba', u'yoruba'),
('ypk', '', '', u'Yupik languages', u'yupik, langues'),
('zap', '', '', u'Zapotec', u'zapotèque'),
('zbl', '', '', u'Blissymbols; Blissymbolics; Bliss', u'symboles Bliss; Bliss'),
('zen', '', '', u'Zenaga', u'zenaga'),
('zha', '', 'za', u'Zhuang; Chuang', u'zhuang; chuang'),
('znd', '', '', u'Zande languages', u'zandé, langues'),
('zul', '', 'zu', u'Zulu', u'zoulou'),
('zun', '', '', u'Zuni', u'zuni'),
('zza', '', '', u'Zaza; Dimili; Dimli; Kirdki; Kirmanjki; Zazaki', u'zaza; dimili; dimli; kirdki; kirmanjki; zazaki')]
class Country(object):
"""Country according to ISO-3166
:param string country: country name, alpha2 code, alpha3 code or numeric code
:param list countries: all countries
:type countries: see :data:`~subliminal.language.COUNTRIES`
"""
def __init__(self, country, countries=None):
countries = countries or COUNTRIES
country = to_unicode(country.strip().lower())
country_tuple = None
# Try to find the country
if len(country) == 2:
country_tuple = dict((c[0].lower(), c) for c in countries).get(country)
elif len(country) == 3 and not country.isdigit():
country_tuple = dict((c[1].lower(), c) for c in countries).get(country)
elif len(country) == 3 and country.isdigit():
country_tuple = dict((c[2].lower(), c) for c in countries).get(country)
if country_tuple is None:
country_tuple = dict((c[3].lower(), c) for c in countries).get(country)
# Raise ValueError if nothing is found
if country_tuple is None:
raise ValueError('Country %s does not exist' % country)
# Set default attrs
self.alpha2 = country_tuple[0]
self.alpha3 = country_tuple[1]
self.numeric = country_tuple[2]
self.name = country_tuple[3]
def __hash__(self):
return hash(self.alpha3)
def __eq__(self, other):
if isinstance(other, Country):
return self.alpha3 == other.alpha3
return False
def __ne__(self, other):
return not self == other
def __unicode__(self):
return self.name
def __str__(self):
return unicode(self).encode('utf-8')
def __repr__(self):
return 'Country(%s)' % self
class Language(object):
"""Language according to ISO-639
:param string language: language name (english or french), alpha2 code, alpha3 code, terminologic code or numeric code, eventually with a country
:param country: country of the language
:type country: :class:`Country` or string
:param languages: all languages
:type languages: see :data:`~subliminal.language.LANGUAGES`
:param countries: all countries
:type countries: see :data:`~subliminal.language.COUNTRIES`
:param bool strict: whether to raise a ValueError on unknown language or not
:class:`Language` implements the inclusion test, with the ``in`` keyword::
>>> Language('pt-BR') in Language('pt') # Portuguese (Brazil) is included in Portuguese
True
>>> Language('pt') in Language('pt-BR') # Portuguese is not included in Portuguese (Brazil)
False
"""
with_country_regexps = [re.compile('(.*)\((.*)\)'), re.compile('(.*)[-_](.*)')]
def __init__(self, language, country=None, languages=None, countries=None, strict=True):
languages = languages or LANGUAGES
countries = countries or COUNTRIES
# Get the country
self.country = None
if isinstance(country, Country):
self.country = country
elif isinstance(country, basestring):
try:
self.country = Country(country, countries)
except ValueError:
logger.warning(u'Country %s could not be identified' % country)
if strict:
raise
# Language + Country format
#TODO: Improve this part
if country is None:
for regexp in [r.match(language) for r in self.with_country_regexps]:
if regexp:
language = regexp.group(1)
try:
self.country = Country(regexp.group(2), countries)
except ValueError:
logger.warning(u'Country %s could not be identified' % country)
if strict:
raise
break
# Try to find the language
language = to_unicode(language.strip().lower())
language_tuple = None
if len(language) == 2:
language_tuple = dict((l[2].lower(), l) for l in languages).get(language)
elif len(language) == 3:
language_tuple = dict((l[0].lower(), l) for l in languages).get(language)
if language_tuple is None:
language_tuple = dict((l[1].lower(), l) for l in languages).get(language)
if language_tuple is None:
language_tuple = dict((l[3].split('; ')[0].lower(), l) for l in languages).get(language)
if language_tuple is None:
language_tuple = dict((l[4].split('; ')[0].lower(), l) for l in languages).get(language)
# Raise ValueError if strict or continue with Undetermined
if language_tuple is None:
if strict:
raise ValueError('Language %s does not exist' % language)
language_tuple = dict((l[0].lower(), l) for l in languages).get('und')
# Set attributes
self.alpha2 = language_tuple[2]
self.alpha3 = language_tuple[0]
self.terminologic = language_tuple[1]
self.name = language_tuple[3]
self.french_name = language_tuple[4]
def __hash__(self):
if self.country is None:
return hash(self.alpha3)
return hash(self.alpha3 + self.country.alpha3)
def __eq__(self, other):
if isinstance(other, Language):
return self.alpha3 == other.alpha3 and self.country == other.country
return False
def __contains__(self, item):
if isinstance(item, Language):
if self == item:
return True
if self.country is None:
return self.alpha3 == item.alpha3
return False
def __ne__(self, other):
return not self == other
def __nonzero__(self):
return self.alpha3 != 'und'
def __unicode__(self):
if self.country is None:
return self.name
return '%s (%s)' % (self.name, self.country)
def __str__(self):
return unicode(self).encode('utf-8')
def __repr__(self):
if self.country is None:
return 'Language(%s)' % self.name.encode('utf-8')
return 'Language(%s, country=%s)' % (self.name.encode('utf-8'), self.country)
class language_set(set):
"""Set of :class:`Language` with some specificities.
:param iterable: where to take elements from
:type iterable: iterable of :class:`Languages <Language>` or string
:param languages: all languages
:type languages: see :data:`~subliminal.language.LANGUAGES`
:param bool strict: whether to raise a ValueError on invalid language or not
The following redefinitions are meant to reflect the inclusion logic in :class:`Language`
* Inclusion test, with the ``in`` keyword
* Intersection
* Substraction
Here is an illustration of the previous points::
>>> Language('en') in language_set(['en-US', 'en-CA'])
False
>>> Language('en-US') in language_set(['en', 'fr'])
True
>>> language_set(['en']) & language_set(['en-US', 'en-CA'])
language_set([Language(English, country=Canada), Language(English, country=United States)])
>>> language_set(['en-US', 'en-CA', 'fr']) - language_set(['en'])
language_set([Language(French)])
"""
def __init__(self, iterable=None, languages=None, strict=True):
iterable = iterable or []
languages = languages or LANGUAGES
items = []
for i in iterable:
if isinstance(i, Language):
items.append(i)
continue
if isinstance(i, tuple):
items.append(Language(i[0], languages=languages, strict=strict))
continue
items.append(Language(i, languages=languages, strict=strict))
super(language_set, self).__init__(items)
def __contains__(self, item):
for i in self:
if item in i:
return True
return super(language_set, self).__contains__(item)
def __and__(self, other):
results = language_set()
for i in self:
for j in other:
if i in j:
results.add(i)
for i in other:
for j in self:
if i in j:
results.add(i)
return results
def __sub__(self, other):
results = language_set()
for i in self:
if i not in other:
results.add(i)
return results
class language_list(list):
"""List of :class:`Language` with some specificities.
:param iterable: where to take elements from
:type iterable: iterable of :class:`Languages <Language>` or string
:param languages: all languages
:type languages: see :data:`~subliminal.language.LANGUAGES`
:param bool strict: whether to raise a ValueError on invalid language or not
The following redefinitions are meant to reflect the inclusion logic in :class:`Language`
* Inclusion test, with the ``in`` keyword
* Index
Here is an illustration of the previous points::
>>> Language('en') in language_list(['en-US', 'en-CA'])
False
>>> Language('en-US') in language_list(['en', 'fr-BE'])
True
>>> language_list(['en', 'fr-BE']).index(Language('en-US'))
0
"""
def __init__(self, iterable=None, languages=None, strict=True):
iterable = iterable or []
languages = languages or LANGUAGES
items = []
for i in iterable:
if isinstance(i, Language):
items.append(i)
continue
if isinstance(i, tuple):
items.append(Language(i[0], languages=languages, strict=strict))
continue
items.append(Language(i, languages=languages, strict=strict))
super(language_list, self).__init__(items)
def __contains__(self, item):
for i in self:
if item in i:
return True
return super(language_list, self).__contains__(item)
def index(self, x, strict=False):
if not strict:
for i in range(len(self)):
if x in self[i]:
return i
return super(language_list, self).index(x)
|
gpl-3.0
|
pferreir/indico-backup
|
indico/MaKaC/common/utils.py
|
1
|
21170
|
# -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2013 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
# stdlib imports
import time, re, os
from random import randint
from datetime import datetime, date, timedelta
# 3rd party imports
from BTrees.OOBTree import OOBTree
from indico.util.date_time import format_datetime, format_date, format_time
# indico legacy imports
from MaKaC.common.timezoneUtils import isSameDay, isToday, getAdjustedDate,\
isTomorrow
from MaKaC.common import info
from MaKaC import errors
# indico imports
from indico.core.config import Config
from indico.core.db import DBMgr
from indico.util.i18n import currentLocale
# backward compatibility
from indico.util.string import truncate
# fcntl is only available for POSIX systems
if os.name == 'posix':
import fcntl
_KEY_DEFAULT_LENGTH = 20
_FAKENAME_SIZEMIN = 5
_FAKENAME_SIZEMAX = 10
def isWeekend(d):
"""
Accepts date or datetime object.
"""
return d.weekday() in [5, 6]
def stringToDate( str ):
months = { "January":1, "February":2, "March":3, "April":4, "May":5, "June":6, "July":7, "August":8, "September":9, "October":10, "November":11, "December":12 }
[ day, month, year ] = str.split("-")
if months.has_key(month):
month = months[month]
else:
month = int(month)
return datetime(int(year),month,int(day))
def getTextColorFromBackgroundColor(bgcolor):
#Returns black if the average of the RGV values
#is less than 128, and white if bigger.
if len(bgcolor.strip())==7:# remove "#" before the color code
bgcolor=bgcolor[1:]
if len(bgcolor)==6:
try:
avg=int((int(bgcolor[0:2], 16)+int(bgcolor[2:4], 16)+int(bgcolor[4:6], 16))/3)
if avg>128:
return "#000000"
else:
return "#FFFFFF"
except ValueError:
pass
return "#202020"
charRplace = [
[u'\u2019', u"'"],
[u'\u0153', u"oe"],
[u'\u2026', u"..."],
[u'\u2013', u"-"],
[u'\u2018', u"'"]
]
def utf8Tolatin1(text):
t = text.decode("utf8")
for i in charRplace:
t = t.replace(i[0], i[1])
return t.encode("latin1",'replace')
def utf8rep(text):
# \x -> _x keeps windows systems satisfied
return text.decode('utf-8').encode('unicode_escape').replace('\\x','_x')
def sortUsersByName(x,y):
return cmp(x.getFamilyName().lower(),y.getFamilyName().lower())
def sortUsersByFirstName(x,y):
return cmp(x.getFirstName().lower(),y.getFirstName().lower())
def sortUsersByAffiliation(x,y):
return cmp(x.getAffiliation().lower(),y.getAffiliation().lower())
def sortUsersByEmail(x,y):
return cmp(x.getEmail().lower(),y.getEmail().lower())
def sortGroupsByName(x,y):
return cmp(x.getName().lower(),y.getName().lower())
def sortDomainsByName(x,y):
return cmp(x.getName().lower(),y.getName().lower())
def sortFilesByName(x,y):
return cmp(x.getName().lower(),y.getName().lower())
def sortContributionByDate(x,y):
return cmp(x.getStartDate(),y.getStartDate())
def sortSlotByDate(x,y):
return cmp(x.getStartDate(),y.getStartDate())
def sortCategoryByTitle(x,y):
return cmp(x.getTitle().lower(),y.getTitle().lower())
def sortPrincipalsByName(x,y):
from MaKaC.user import Group
firstNamex, firstNamey = "", ""
if x is None:
namex = ""
elif isinstance(x, Group):
namex = x.getName()
else:
namex = x.getFamilyName()
firstNamex = x.getFirstName()
if y is None:
namey = ""
elif isinstance(y, Group):
namey = y.getName()
else:
namey = y.getFamilyName()
firstNamey = y.getFirstName()
cmpRes = cmp(namex.lower(),namey.lower())
if cmpRes == 0:
cmpRes = cmp(firstNamex.lower(),firstNamey.lower())
return cmpRes
def validMail(emailstr, allowMultiple=True):
"""
Check the validity of an email address or serie of email addresses
- emailstr: a string representing a single email address or several
email addresses separated by separators
Returns True if the email/emails is/are valid.
"""
# Convert the separators into valid ones. For now only, mix of whitespaces,
# semi-colons and commas are handled and replaced by commas. This way the
# method only checks the validity of the email addresses without taking
# care of the separators
emails = setValidEmailSeparators(emailstr)
# Creates a list of emails
emaillist = emails.split(",")
if not allowMultiple and len(emaillist) > 1:
return False
# Checks the validity of each email in the list
if emaillist != None or emaillist != []:
for em in emaillist:
if re.search(r"^[-a-zA-Z0-9!#$%&'*+/=?\^_`{|}~]+(?:.[-a-zA-Z0-9!#$%&'*+/=?^_`{|}~]+)*@(?:[a-zA-Z0-9](?:[-a-zA-Z0-9]*[a-zA-Z0-9])?.)+[a-zA-Z0-9](?:[-a-zA-Z0-9]*[a-zA-Z0-9])?$",
em) == None:
# if re.search("^[a-zA-Z][\w\.-]*[a-zA-Z0-9]@[a-zA-Z0-9][\w\.-]*[a-zA-Z0-9]\.[a-zA-Z][a-zA-Z\.]*[a-zA-Z]$",
# em) == None:
return False
return True
def setValidEmailSeparators(emailstr):
"""
Replace occurrences of separators in a string of email addresses by
occurrences of "," in order to get a string of emails valid with the
html 'a' tag. Separators that could be replaced are semi-colons,
whitespaces and mixes of the previous two along with commas. This allows
the handling of multiple email addresses.
- emailstr: the string of emails in which we want to convert the separators
into commas
"""
# remove occurences of separators at the beginning and at the end of
# the string
emails = re.subn(r"(?:^[ ;,]+)|(?:[ ;,]+$)", "", emailstr)[0]
# return the string obtained after replacing the separators
return re.subn(r"[ ;,]+", ",", emails)[0]
def validIP(ip):
"""
Quick and dirty IP address validation
(not exact, but enough)
"""
expr = r'(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)'
return re.match(expr, ip) != None
def isStringHTML(s):
if type(s) == str:
tags = [ "<p>", "<p ", "<br", "<li>" ]
for tag in tags:
if s.lower().find(tag) != -1:
return True
return False
def getEmailList(stri):
emailList = []
for email in stri.split(",") :
email = email.strip()
if email!="" and email.rfind(".", email.find("@") )>0 and email not in emailList :
emailList.append(email)
return emailList
def dictionaryToString(dico):
""" Convert the given dictionary to a string, which is returned.
Useful for HTML attributes (e.g. name="a" value="x" ...).
"""
#check params
if not isinstance(dico, dict): return ""
#converting
attrString = " "
for item in dico.items():
if len(item)==2: #it should always be 2 (Better to prevent than to heal!)
attrName= str(item[0])
attrVal = str(item[1])
attrVal = attrVal.replace('"', "'") #remove double quotes : " -> '
attrString += """%s="%s" """%(attrName,attrVal)
return attrString
def dictionaryToTupleList(dic):
return [(k,v) for (k,v) in dic.iteritems()]
def removeQuotes(myString):
"""encode/replace problematics quotes."""
#Note: Use ’ because ' can be problematic with context help!!!
#Note: \xe2\x80\x99 = ďż˝
return str(myString).strip().replace('"', """).replace("'", "’").replace("$-1������", "’").replace("\xe2\x80\x99", "’")
def putbackQuotes(myString):
"""cancel (almost all) effects of function removeQuotes()."""
return str(myString).strip().replace(""", '"').replace("’", "'")
def newFakeName(minSize=_FAKENAME_SIZEMIN, maxSize=_FAKENAME_SIZEMAX):
"""Give randomly a fake name. Useful when we want to make people anonymous..."""
#check
try:
minSize = int(minSize)
except:
minSize = _FAKENAME_SIZEMIN
try:
maxSize = int(maxSize)
except:
maxSize = _FAKENAME_SIZEMAX
#next
length = randint(minSize,maxSize)
uppers = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
vowels = "aeiouy"
consonants = "bcdfghjklmnpqrstvwxz"
upperIndexMax = len(uppers)-1
vowelIndexMax = len(vowels)-1
consonantIndexMax = len(consonants)-1
#first capital letter
fakename = uppers[ randint(0,upperIndexMax) ]
#following lowercase letters
isVowel = fakename.lower() in vowels
for i in range(1, length):
if isVowel:
fakename += consonants[ randint(0,consonantIndexMax) ]
isVowel = False
else:
fakename += vowels[ randint(0,vowelIndexMax) ]
isVowel = True
return fakename
def newKey(length=_KEY_DEFAULT_LENGTH):
"""returns a new crypted key of given length."""
#check
try:
length = int(length)
except:
length = _KEY_DEFAULT_LENGTH
#next
table = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
key = ""
indexMax = len(table)-1
for i in range(length):
key += table[ randint(0,indexMax) ]
return key
def nodeValue(node):
"""given a leaf node, returns its value."""
from xml.dom.minidom import Element,Text
if isinstance(node,Text):
return node.data
elif isinstance(node,Element) and node.firstChild!=None and isinstance(node.firstChild,Text):
return node.firstChild.data.encode('utf-8')
return ""
def _bool(val):
"""same as bool(), but returns False when you give "False"."""
if str(val).strip() == "False" :
return False
else: return bool(val)
def _int(val):
"""same as int(), but returns 0 when you give "" or None."""
if str(val).strip()=="" or val==None :
val=0
else:
return int(val)
def _positiveInt(val):
"""same as _int(), but returns 0 when you give a negative int."""
val = _int(val)
if val<0:
return 0
else:
return val
def _negativeInt(val):
"""same as _int(), but returns 0 when you give a positive int."""
val = _int(val)
if val>0:
return 0
else:
return val
def encodeUnicode(text, sourceEncoding = "utf-8"):
try:
tmp = str(text).decode( sourceEncoding )
except:
try:
tmp = str(text).decode( 'iso-8859-1' )
except:
return ""
return tmp.encode('utf-8')
def unicodeLength(s, encoding = 'utf-8'):
""" Returns the length of the string s as an unicode object.
The conversion is done in the encoding supplied.
Example: the word 'niño' has a length of 4 as a unicode object, but 5 as a strig in utf-8
because the 'ñ' character uses 2 bytes.
"""
return len(s.decode(encoding, 'replace'))
def unicodeSlice(s, start, end, encoding = 'utf-8'):
""" Returns a slice of the string s, based on its encoding.
Example: trimAsUnicode('ññññ', 'utf-8', 0, 2) will return 'ññ' instead of 'ñ' even if each 'ñ' occupies 2 bytes.
"""
return s.decode(encoding, 'replace')[start:end]
def formatDateTime(dateTime, showWeek=False, format=None, locale=None, server_tz=False):
week = "EEEE" if showWeek else ""
if not format:
return format_datetime(dateTime, week+'d/M/yyyy H:mm', locale=locale, server_tz=server_tz)
else:
return format_datetime(dateTime, format, locale=locale, server_tz=server_tz)
def formatDate(date, showWeek=False, format=None, locale=None):
week = ""
if showWeek:
week = "EEE "
if not format:
return format_date(date, week+'d/M/yyyy', locale=locale)
else:
return format_date(date, format, locale=locale)
def formatTime(tm, format=None, locale=None, server_tz=False):
if not format:
return format_time(tm, 'H:mm', locale=locale, server_tz=server_tz)
else:
return format_time(tm, format, locale=locale, server_tz=server_tz)
def parseDate(dateStr, format='%d/%m/%Y'):
t=time.strptime(dateStr, format)
return datetime(t.tm_year,t.tm_mon, t.tm_mday).date()
def prettyDuration(duration):
"""Return duration 01:05 in a pretty format 1h05'"""
hours = duration.seconds/60/60
minutes = duration.seconds/60%60
if hours:
return "%sh%s'" % (hours, minutes)
else:
return "%s'" % minutes
def formatDuration(duration, units = 'minutes', truncate = True):
""" Formats a duration (a timedelta object)
"""
seconds = duration.days * 86400 + duration.seconds
if units == 'seconds':
result = seconds
elif units == 'minutes':
result = seconds / 60
elif units == 'hours':
result = seconds / 3600
elif units == 'days':
result = seconds / 86400
elif units == 'hours_minutes':
#truncate has no effect here
minutes = int(seconds / 60) % 60
hours = int(seconds / 3600)
return str(hours) + 'h' + str(minutes).zfill(2) + 'm'
elif units == '(hours)_minutes':
#truncate has no effect here
minutes = int(seconds / 60) % 60
hours = int(seconds / 3600)
if hours:
return str(hours) + 'h' + str(minutes).zfill(2) + 'm'
else:
return str(minutes) + 'm'
else:
raise Exception("Unknown duration unit: " + str(units))
if truncate:
return int(result)
else:
return result
def formatTwoDates(date1, date2, tz = None, useToday = False, useTomorrow = False, dayFormat = None, capitalize = True, showWeek = False):
""" Formats two dates, such as an event start and end date, taking into account if they happen the same day
(given a timezone).
-date1 and date2 have to be timezone-aware.
-If no tz argument is provided, tz will be the timezone of date1.
tz can be a string or a timezone "object"
-dayFormat and showWeek are passed to formatDate function, so they behave the same way as in that function
-capitalize: capitalize week days AND first letter of sentence if there is one
Examples: 17/07/2009 from 08:00 to 18:00 (default args, 2 dates in same day)
from 17/07/2009 at 08:00 to 19/07/2009 at 14:00 (default args, 2 dates in different day)
Fri 17/07/2009 from 08:00 to 18:00 (showWeek = True, default args, 2 dates in same day)
today from 10:00 to 11:00 (useToday = True, default args, 2 dates in same day and it happens to be today)
"""
if not tz:
tz = date1.tzinfo
date1 = getAdjustedDate(date1, tz = tz)
date2 = getAdjustedDate(date2, tz = tz)
sameDay = isSameDay(date1, date2, tz)
date1text = ''
date2text = ''
if useToday:
if isToday(date1, tz):
date1text = "today"
if isToday(date2, tz):
date2text = "today"
if useTomorrow:
if isTomorrow(date1, tz):
date1text = "isTomorrow"
if isTomorrow(date2, tz):
date2text = "isTomorrow"
if not date1text:
date1text = formatDate(date1.date(), showWeek, dayFormat)
if capitalize:
date1text = date1text.capitalize()
if not date2text:
date2text = formatDate(date2.date(), showWeek, dayFormat)
if capitalize:
date2text = date2text.capitalize()
time1text = formatTime(date1.time())
time2text = formatTime(date2.time())
if sameDay:
result = date1text + ' from ' + time1text + ' to ' + time2text
else:
if capitalize:
fromText = 'From '
else:
fromText = 'from '
result = fromText + date1text + ' at ' + time1text + ' to ' + date2text + ' at ' + time2text
return result
def parseTime(timeStr, format='%H:%M'):
t=time.strptime(timeStr, format)
return datetime(t.tm_year,t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min).time()
def parseDateTime(dateTimeStr):
t=time.strptime(dateTimeStr, '%d/%m/%Y %H:%M')
return datetime(t.tm_year,t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min)
def normalizeToList(l):
if type(l) != list:
l=[l]
return l
def getHierarchicalId(obj):
"""
Gets the ID of a Conference, Contribution or Subcontribution,
in an hierarchical manner
"""
from MaKaC import conference
ret = obj.getId()
if isinstance(obj,conference.Contribution):
ret="%s.%s"%(obj.getConference().getId(),ret)
elif isinstance(obj, conference.SubContribution):
ret="%s.%s.%s"%(obj.getConference().getId(), obj.getContribution().getId(), ret)
#elif isinstance(obj, conference.DeletedObject):
# ret=obj.getId().replace(':','.')
elif isinstance(obj, conference.Session):
ret="%s.s%s"%(obj.getConference().getId(), ret)
elif isinstance(obj, conference.SessionSlot):
ret="%s.s%s.%s"%(obj.getConference().getId(), obj.getSession().getId(), ret)
return ret
def resolveHierarchicalId(objId):
"""
Gets an object from its Id (unless it doesn't exist,
in which case it returns None
"""
from MaKaC.conference import ConferenceHolder
m = re.match(r'(\w+)(?:\.(s?)(\w+))?(?:\.(\w+))?', objId)
# If the expression doesn't match at all, return
if not m or not m.groups()[0]:
return None
try:
m = m.groups()
conference = ConferenceHolder().getById(m[0])
if m[1]:
# session id specified - session or slot
session = conference.getSessionById(m[2])
if m[3]:
# session slot: 1234.s12.1
return session.getSlotById(m[3])
else:
# session: 1234.s12
return session
else:
if m[2]:
# second token is not a session id
# (either contribution or subcontribution)
contribution = conference.getContributionById(m[2])
if m[3]:
# subcontribution: 1234.12.1
return contribution.getSubContributionById(m[3])
else:
# contribution: 1234.12
return contribution
else:
# there's not second token
# it's definitely a conference
return conference
except errors.MaKaCError:
return None
class OSSpecific(object):
"""
Namespace for OS Specific operations:
- file locking
"""
@classmethod
def _lockFilePosix(cls, f, lockType):
"""
Locks file f with lock type lockType
"""
fcntl.flock(f, lockType)
@classmethod
def _lockFileOthers(cls, f, lockType):
"""
Win32/others file locking could be implemented here
"""
pass
@classmethod
def lockFile(cls, f, lockType):
"""
API method - locks a file
f - file handler
lockType - string: LOCK_EX | LOCK_UN | LOCK_SH
"""
cls._lockFile(f, cls._lockTranslationTable[lockType])
# Check OS and choose correct locking method
if os.name == 'posix':
_lockFile = _lockFilePosix
_lockTranslationTable = {
'LOCK_EX': fcntl.LOCK_EX,
'LOCK_UN': fcntl.LOCK_UN,
'LOCK_SH': fcntl.LOCK_SH
}
else:
_lockFile = _lockFileOthers
_lockTranslationTable = {
'LOCK_EX': None,
'LOCK_UN': None,
'LOCK_SH': None
}
def getProtectionText(target):
if target.hasAnyProtection():
if target.isItselfProtected():
return "protected_own", None
elif target.hasProtectedOwner():
return "protected_parent", None
elif target.getDomainList() != []:
return "domain", list(x.getName() for x in target.getDomainList())
else:
return getProtectionText(target.getOwner())
return "", None
def getReportNumberItems(obj):
rns = obj.getReportNumberHolder().listReportNumbers()
reportCodes = []
for rn in rns:
key = rn[0]
if key in Config.getInstance().getReportNumberSystems().keys():
number = rn[1]
reportNumberId="s%sr%s"%(key, number)
name = Config.getInstance().getReportNumberSystems()[key]["name"]
reportCodes.append({"id" : reportNumberId, "number": number, "system": key, "name": name})
return reportCodes
|
gpl-3.0
|
GustavoHennig/ansible
|
lib/ansible/modules/cloud/openstack/os_keystone_domain_facts.py
|
49
|
4121
|
#!/usr/bin/python
# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_keystone_domain_facts
short_description: Retrieve facts about one or more OpenStack domains
extends_documentation_fragment: openstack
version_added: "2.1"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
description:
- Retrieve facts about a one or more OpenStack domains
requirements:
- "python >= 2.6"
- "shade"
options:
name:
description:
- Name or ID of the domain
required: true
filters:
description:
- A dictionary of meta data to use for further filtering. Elements of
this dictionary may be additional dictionaries.
required: false
default: None
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
'''
EXAMPLES = '''
# Gather facts about previously created domain
- os_keystone_domain_facts:
cloud: awesomecloud
- debug:
var: openstack_domains
# Gather facts about a previously created domain by name
- os_keystone_domain_facts:
cloud: awesomecloud
name: demodomain
- debug:
var: openstack_domains
# Gather facts about a previously created domain with filter
- os_keystone_domain_facts:
cloud: awesomecloud
name: demodomain
filters:
enabled: False
- debug:
var: openstack_domains
'''
RETURN = '''
openstack_domains:
description: has all the OpenStack facts about domains
returned: always, but can be null
type: complex
contains:
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the domain.
returned: success
type: string
description:
description: Description of the domain.
returned: success
type: string
enabled:
description: Flag to indicate if the domain is enabled.
returned: success
type: bool
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
filters=dict(required=False, type='dict', default=None),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['name', 'filters'],
]
)
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
name = module.params['name']
filters = module.params['filters']
opcloud = shade.operator_cloud(**module.params)
if name:
# Let's suppose user is passing domain ID
try:
domains = cloud.get_domain(name)
except:
domains = opcloud.search_domains(filters={'name': name})
else:
domains = opcloud.search_domains(filters)
module.exit_json(changed=False, ansible_facts=dict(
openstack_domains=domains))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
diwer/sublimeconfig
|
Packages/Package Control/package_control/downloaders/background_downloader.py
|
9
|
1574
|
import threading
class BackgroundDownloader(threading.Thread):
"""
Downloads information from one or more URLs in the background.
Normal usage is to use one BackgroundDownloader per domain name.
:param settings:
A dict containing at least the following fields:
`cache_length`,
`debug`,
`timeout`,
`user_agent`,
`http_proxy`,
`https_proxy`,
`proxy_username`,
`proxy_password`
:param providers:
An array of providers that can download the URLs
"""
def __init__(self, settings, providers):
self.settings = settings
self.urls = []
self.providers = providers
self.used_providers = {}
threading.Thread.__init__(self)
def add_url(self, url):
"""
Adds a URL to the list to download
:param url:
The URL to download info about
"""
self.urls.append(url)
def get_provider(self, url):
"""
Returns the provider for the URL specified
:param url:
The URL to return the provider for
:return:
The provider object for the URL
"""
return self.used_providers[url]
def run(self):
for url in self.urls:
for provider_class in self.providers:
if provider_class.match_url(url):
provider = provider_class(url, self.settings)
break
provider.prefetch()
self.used_providers[url] = provider
|
mit
|
jeongarmy/TizenRT
|
external/protobuf/python/google/protobuf/message.py
|
43
|
11454
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(robinson): We should just make these methods all "pure-virtual" and move
# all implementation out, into reflection.py for now.
"""Contains an abstract base class for protocol messages."""
__author__ = '[email protected] (Will Robinson)'
class Error(Exception): pass
class DecodeError(Error): pass
class EncodeError(Error): pass
class Message(object):
"""Abstract base class for protocol messages.
Protocol message classes are almost always generated by the protocol
compiler. These generated types subclass Message and implement the methods
shown below.
TODO(robinson): Link to an HTML document here.
TODO(robinson): Document that instances of this class will also
have an Extensions attribute with __getitem__ and __setitem__.
Again, not sure how to best convey this.
TODO(robinson): Document that the class must also have a static
RegisterExtension(extension_field) method.
Not sure how to best express at this point.
"""
# TODO(robinson): Document these fields and methods.
__slots__ = []
DESCRIPTOR = None
def __deepcopy__(self, memo=None):
clone = type(self)()
clone.MergeFrom(self)
return clone
def __eq__(self, other_msg):
"""Recursively compares two messages by value and structure."""
raise NotImplementedError
def __ne__(self, other_msg):
# Can't just say self != other_msg, since that would infinitely recurse. :)
return not self == other_msg
def __hash__(self):
raise TypeError('unhashable object')
def __str__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def __unicode__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def MergeFrom(self, other_msg):
"""Merges the contents of the specified message into current message.
This method merges the contents of the specified message into the current
message. Singular fields that are set in the specified message overwrite
the corresponding fields in the current message. Repeated fields are
appended. Singular sub-messages and groups are recursively merged.
Args:
other_msg: Message to merge into the current message.
"""
raise NotImplementedError
def CopyFrom(self, other_msg):
"""Copies the content of the specified message into the current message.
The method clears the current message and then merges the specified
message using MergeFrom.
Args:
other_msg: Message to copy into the current one.
"""
if self is other_msg:
return
self.Clear()
self.MergeFrom(other_msg)
def Clear(self):
"""Clears all data that was set in the message."""
raise NotImplementedError
def SetInParent(self):
"""Mark this as present in the parent.
This normally happens automatically when you assign a field of a
sub-message, but sometimes you want to make the sub-message
present while keeping it empty. If you find yourself using this,
you may want to reconsider your design."""
raise NotImplementedError
def IsInitialized(self):
"""Checks if the message is initialized.
Returns:
The method returns True if the message is initialized (i.e. all of its
required fields are set).
"""
raise NotImplementedError
# TODO(robinson): MergeFromString() should probably return None and be
# implemented in terms of a helper that returns the # of bytes read. Our
# deserialization routines would use the helper when recursively
# deserializing, but the end user would almost always just want the no-return
# MergeFromString().
def MergeFromString(self, serialized):
"""Merges serialized protocol buffer data into this message.
When we find a field in |serialized| that is already present
in this message:
- If it's a "repeated" field, we append to the end of our list.
- Else, if it's a scalar, we overwrite our field.
- Else, (it's a nonrepeated composite), we recursively merge
into the existing composite.
TODO(robinson): Document handling of unknown fields.
Args:
serialized: Any object that allows us to call buffer(serialized)
to access a string of bytes using the buffer interface.
TODO(robinson): When we switch to a helper, this will return None.
Returns:
The number of bytes read from |serialized|.
For non-group messages, this will always be len(serialized),
but for messages which are actually groups, this will
generally be less than len(serialized), since we must
stop when we reach an END_GROUP tag. Note that if
we *do* stop because of an END_GROUP tag, the number
of bytes returned does not include the bytes
for the END_GROUP tag information.
"""
raise NotImplementedError
def ParseFromString(self, serialized):
"""Parse serialized protocol buffer data into this message.
Like MergeFromString(), except we clear the object first and
do not return the value that MergeFromString returns.
"""
self.Clear()
self.MergeFromString(serialized)
def SerializeToString(self, **kwargs):
"""Serializes the protocol message to a binary string.
Arguments:
**kwargs: Keyword arguments to the serialize method, accepts
the following keyword args:
deterministic: If true, requests deterministic serialization of the
protobuf, with predictable ordering of map keys.
Returns:
A binary string representation of the message if all of the required
fields in the message are set (i.e. the message is initialized).
Raises:
message.EncodeError if the message isn't initialized.
"""
raise NotImplementedError
def SerializePartialToString(self, **kwargs):
"""Serializes the protocol message to a binary string.
This method is similar to SerializeToString but doesn't check if the
message is initialized.
Arguments:
**kwargs: Keyword arguments to the serialize method, accepts
the following keyword args:
deterministic: If true, requests deterministic serialization of the
protobuf, with predictable ordering of map keys.
Returns:
A string representation of the partial message.
"""
raise NotImplementedError
# TODO(robinson): Decide whether we like these better
# than auto-generated has_foo() and clear_foo() methods
# on the instances themselves. This way is less consistent
# with C++, but it makes reflection-type access easier and
# reduces the number of magically autogenerated things.
#
# TODO(robinson): Be sure to document (and test) exactly
# which field names are accepted here. Are we case-sensitive?
# What do we do with fields that share names with Python keywords
# like 'lambda' and 'yield'?
#
# nnorwitz says:
# """
# Typically (in python), an underscore is appended to names that are
# keywords. So they would become lambda_ or yield_.
# """
def ListFields(self):
"""Returns a list of (FieldDescriptor, value) tuples for all
fields in the message which are not empty. A message field is
non-empty if HasField() would return true. A singular primitive field
is non-empty if HasField() would return true in proto2 or it is non zero
in proto3. A repeated field is non-empty if it contains at least one
element. The fields are ordered by field number"""
raise NotImplementedError
def HasField(self, field_name):
"""Checks if a certain field is set for the message, or if any field inside
a oneof group is set. Note that if the field_name is not defined in the
message descriptor, ValueError will be raised."""
raise NotImplementedError
def ClearField(self, field_name):
"""Clears the contents of a given field, or the field set inside a oneof
group. If the name neither refers to a defined field or oneof group,
ValueError is raised."""
raise NotImplementedError
def WhichOneof(self, oneof_group):
"""Returns the name of the field that is set inside a oneof group, or
None if no field is set. If no group with the given name exists, ValueError
will be raised."""
raise NotImplementedError
def HasExtension(self, extension_handle):
raise NotImplementedError
def ClearExtension(self, extension_handle):
raise NotImplementedError
def DiscardUnknownFields(self):
raise NotImplementedError
def ByteSize(self):
"""Returns the serialized size of this message.
Recursively calls ByteSize() on all contained messages.
"""
raise NotImplementedError
def _SetListener(self, message_listener):
"""Internal method used by the protocol message implementation.
Clients should not call this directly.
Sets a listener that this message will call on certain state transitions.
The purpose of this method is to register back-edges from children to
parents at runtime, for the purpose of setting "has" bits and
byte-size-dirty bits in the parent and ancestor objects whenever a child or
descendant object is modified.
If the client wants to disconnect this Message from the object tree, she
explicitly sets callback to None.
If message_listener is None, unregisters any existing listener. Otherwise,
message_listener must implement the MessageListener interface in
internal/message_listener.py, and we discard any listener registered
via a previous _SetListener() call.
"""
raise NotImplementedError
def __getstate__(self):
"""Support the pickle protocol."""
return dict(serialized=self.SerializePartialToString())
def __setstate__(self, state):
"""Support the pickle protocol."""
self.__init__()
self.ParseFromString(state['serialized'])
|
apache-2.0
|
kuiwei/kuiwei
|
common/djangoapps/third_party_auth/provider.py
|
29
|
8241
|
"""Third-party auth provider definitions.
Loaded by Django's settings mechanism. Consequently, this module must not
invoke the Django armature.
"""
from social.backends import google, linkedin
_DEFAULT_ICON_CLASS = 'icon-signin'
class BaseProvider(object):
"""Abstract base class for third-party auth providers.
All providers must subclass BaseProvider -- otherwise, they cannot be put
in the provider Registry.
"""
# Class. The provider's backing social.backends.base.BaseAuth child.
BACKEND_CLASS = None
# String. Name of the FontAwesome glyph to use for sign in buttons (or the
# name of a user-supplied custom glyph that is present at runtime).
ICON_CLASS = _DEFAULT_ICON_CLASS
# String. User-facing name of the provider. Must be unique across all
# enabled providers. Will be presented in the UI.
NAME = None
# Dict of string -> object. Settings that will be merged into Django's
# settings instance. In most cases the value will be None, since real
# values are merged from .json files (foo.auth.json; foo.env.json) onto the
# settings instance during application initialization.
SETTINGS = {}
@classmethod
def get_authentication_backend(cls):
"""Gets associated Django settings.AUTHENTICATION_BACKEND string."""
return '%s.%s' % (cls.BACKEND_CLASS.__module__, cls.BACKEND_CLASS.__name__)
@classmethod
def get_email(cls, unused_provider_details):
"""Gets user's email address.
Provider responses can contain arbitrary data. This method can be
overridden to extract an email address from the provider details
extracted by the social_details pipeline step.
Args:
unused_provider_details: dict of string -> string. Data about the
user passed back by the provider.
Returns:
String or None. The user's email address, if any.
"""
return None
@classmethod
def get_name(cls, unused_provider_details):
"""Gets user's name.
Provider responses can contain arbitrary data. This method can be
overridden to extract a full name for a user from the provider details
extracted by the social_details pipeline step.
Args:
unused_provider_details: dict of string -> string. Data about the
user passed back by the provider.
Returns:
String or None. The user's full name, if any.
"""
return None
@classmethod
def get_register_form_data(cls, pipeline_kwargs):
"""Gets dict of data to display on the register form.
common.djangoapps.student.views.register_user uses this to populate the
new account creation form with values supplied by the user's chosen
provider, preventing duplicate data entry.
Args:
pipeline_kwargs: dict of string -> object. Keyword arguments
accumulated by the pipeline thus far.
Returns:
Dict of string -> string. Keys are names of form fields; values are
values for that field. Where there is no value, the empty string
must be used.
"""
# Details about the user sent back from the provider.
details = pipeline_kwargs.get('details')
# Get the username separately to take advantage of the de-duping logic
# built into the pipeline. The provider cannot de-dupe because it can't
# check the state of taken usernames in our system. Note that there is
# technically a data race between the creation of this value and the
# creation of the user object, so it is still possible for users to get
# an error on submit.
suggested_username = pipeline_kwargs.get('username')
return {
'email': cls.get_email(details) or '',
'name': cls.get_name(details) or '',
'username': suggested_username,
}
@classmethod
def merge_onto(cls, settings):
"""Merge class-level settings onto a django settings module."""
for key, value in cls.SETTINGS.iteritems():
setattr(settings, key, value)
class GoogleOauth2(BaseProvider):
"""Provider for Google's Oauth2 auth system."""
BACKEND_CLASS = google.GoogleOAuth2
ICON_CLASS = 'icon-google-plus'
NAME = 'Google'
SETTINGS = {
'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY': None,
'SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET': None,
}
@classmethod
def get_email(cls, provider_details):
return provider_details.get('email')
@classmethod
def get_name(cls, provider_details):
return provider_details.get('fullname')
class LinkedInOauth2(BaseProvider):
"""Provider for LinkedIn's Oauth2 auth system."""
BACKEND_CLASS = linkedin.LinkedinOAuth2
ICON_CLASS = 'icon-linkedin'
NAME = 'LinkedIn'
SETTINGS = {
'SOCIAL_AUTH_LINKEDIN_OAUTH2_KEY': None,
'SOCIAL_AUTH_LINKEDIN_OAUTH2_SECRET': None,
}
@classmethod
def get_email(cls, provider_details):
return provider_details.get('email')
@classmethod
def get_name(cls, provider_details):
return provider_details.get('fullname')
class Registry(object):
"""Singleton registry of third-party auth providers.
Providers must subclass BaseProvider in order to be usable in the registry.
"""
_CONFIGURED = False
_ENABLED = {}
@classmethod
def _check_configured(cls):
"""Ensures registry is configured."""
if not cls._CONFIGURED:
raise RuntimeError('Registry not configured')
@classmethod
def _get_all(cls):
"""Gets all provider implementations loaded into the Python runtime."""
# BaseProvider does so have __subclassess__. pylint: disable-msg=no-member
return {klass.NAME: klass for klass in BaseProvider.__subclasses__()}
@classmethod
def _enable(cls, provider):
"""Enables a single provider."""
if provider.NAME in cls._ENABLED:
raise ValueError('Provider %s already enabled' % provider.NAME)
cls._ENABLED[provider.NAME] = provider
@classmethod
def configure_once(cls, provider_names):
"""Configures providers.
Args:
provider_names: list of string. The providers to configure.
Raises:
ValueError: if the registry has already been configured, or if any
of the passed provider_names does not have a corresponding
BaseProvider child implementation.
"""
if cls._CONFIGURED:
raise ValueError('Provider registry already configured')
# Flip the bit eagerly -- configure() should not be re-callable if one
# _enable call fails.
cls._CONFIGURED = True
for name in provider_names:
all_providers = cls._get_all()
if name not in all_providers:
raise ValueError('No implementation found for provider ' + name)
cls._enable(all_providers.get(name))
@classmethod
def enabled(cls):
"""Returns list of enabled providers."""
cls._check_configured()
return sorted(cls._ENABLED.values(), key=lambda provider: provider.NAME)
@classmethod
def get(cls, provider_name):
"""Gets provider named provider_name string if enabled, else None."""
cls._check_configured()
return cls._ENABLED.get(provider_name)
@classmethod
def get_by_backend_name(cls, backend_name):
"""Gets provider (or None) by backend name.
Args:
backend_name: string. The python-social-auth
backends.base.BaseAuth.name (for example, 'google-oauth2') to
try and get a provider for.
Raises:
RuntimeError: if the registry has not been configured.
"""
cls._check_configured()
for enabled in cls._ENABLED.values():
if enabled.BACKEND_CLASS.name == backend_name:
return enabled
@classmethod
def _reset(cls):
"""Returns the registry to an unconfigured state; for tests only."""
cls._CONFIGURED = False
cls._ENABLED = {}
|
agpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.