repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
Samuc/Proyecto-IV
|
lib/python2.7/site-packages/setuptools/tests/test_sdist.py
|
332
|
17816
|
# -*- coding: utf-8 -*-
"""sdist tests"""
import locale
import os
import shutil
import sys
import tempfile
import unittest
import unicodedata
import re
from setuptools.tests import environment, test_svn
from setuptools.tests.py26compat import skipIf
from setuptools.compat import StringIO, unicode
from setuptools.tests.py26compat import skipIf
from setuptools.command.sdist import sdist, walk_revctrl
from setuptools.command.egg_info import manifest_maker
from setuptools.dist import Distribution
from setuptools import svn_utils
SETUP_ATTRS = {
'name': 'sdist_test',
'version': '0.0',
'packages': ['sdist_test'],
'package_data': {'sdist_test': ['*.txt']}
}
SETUP_PY = """\
from setuptools import setup
setup(**%r)
""" % SETUP_ATTRS
if sys.version_info >= (3,):
LATIN1_FILENAME = 'smörbröd.py'.encode('latin-1')
else:
LATIN1_FILENAME = 'sm\xf6rbr\xf6d.py'
# Cannot use context manager because of Python 2.4
def quiet():
global old_stdout, old_stderr
old_stdout, old_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = StringIO(), StringIO()
def unquiet():
sys.stdout, sys.stderr = old_stdout, old_stderr
# Fake byte literals for Python <= 2.5
def b(s, encoding='utf-8'):
if sys.version_info >= (3,):
return s.encode(encoding)
return s
# Convert to POSIX path
def posix(path):
if sys.version_info >= (3,) and not isinstance(path, str):
return path.replace(os.sep.encode('ascii'), b('/'))
else:
return path.replace(os.sep, '/')
# HFS Plus uses decomposed UTF-8
def decompose(path):
if isinstance(path, unicode):
return unicodedata.normalize('NFD', path)
try:
path = path.decode('utf-8')
path = unicodedata.normalize('NFD', path)
path = path.encode('utf-8')
except UnicodeError:
pass # Not UTF-8
return path
class TestSdistTest(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
f = open(os.path.join(self.temp_dir, 'setup.py'), 'w')
f.write(SETUP_PY)
f.close()
# Set up the rest of the test package
test_pkg = os.path.join(self.temp_dir, 'sdist_test')
os.mkdir(test_pkg)
# *.rst was not included in package_data, so c.rst should not be
# automatically added to the manifest when not under version control
for fname in ['__init__.py', 'a.txt', 'b.txt', 'c.rst']:
# Just touch the files; their contents are irrelevant
open(os.path.join(test_pkg, fname), 'w').close()
self.old_cwd = os.getcwd()
os.chdir(self.temp_dir)
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.temp_dir)
def test_package_data_in_sdist(self):
"""Regression test for pull request #4: ensures that files listed in
package_data are included in the manifest even if they're not added to
version control.
"""
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# squelch output
quiet()
try:
cmd.run()
finally:
unquiet()
manifest = cmd.filelist.files
self.assertTrue(os.path.join('sdist_test', 'a.txt') in manifest)
self.assertTrue(os.path.join('sdist_test', 'b.txt') in manifest)
self.assertTrue(os.path.join('sdist_test', 'c.rst') not in manifest)
def test_manifest_is_written_with_utf8_encoding(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# UTF-8 filename
filename = os.path.join('sdist_test', 'smörbröd.py')
# Add UTF-8 filename and write manifest
quiet()
try:
mm.run()
mm.filelist.files.append(filename)
mm.write_manifest()
finally:
unquiet()
manifest = open(mm.manifest, 'rbU')
contents = manifest.read()
manifest.close()
# The manifest should be UTF-8 encoded
try:
u_contents = contents.decode('UTF-8')
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
# The manifest should contain the UTF-8 filename
if sys.version_info >= (3,):
self.assertTrue(posix(filename) in u_contents)
else:
self.assertTrue(posix(filename) in contents)
# Python 3 only
if sys.version_info >= (3,):
def test_write_manifest_allows_utf8_filenames(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# UTF-8 filename
filename = os.path.join(b('sdist_test'), b('smörbröd.py'))
# Add filename and write manifest
quiet()
try:
mm.run()
u_filename = filename.decode('utf-8')
mm.filelist.files.append(u_filename)
# Re-write manifest
mm.write_manifest()
finally:
unquiet()
manifest = open(mm.manifest, 'rbU')
contents = manifest.read()
manifest.close()
# The manifest should be UTF-8 encoded
try:
contents.decode('UTF-8')
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
# The manifest should contain the UTF-8 filename
self.assertTrue(posix(filename) in contents)
# The filelist should have been updated as well
self.assertTrue(u_filename in mm.filelist.files)
def test_write_manifest_skips_non_utf8_filenames(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# Latin-1 filename
filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)
# Add filename with surrogates and write manifest
quiet()
try:
mm.run()
u_filename = filename.decode('utf-8', 'surrogateescape')
mm.filelist.files.append(u_filename)
# Re-write manifest
mm.write_manifest()
finally:
unquiet()
manifest = open(mm.manifest, 'rbU')
contents = manifest.read()
manifest.close()
# The manifest should be UTF-8 encoded
try:
contents.decode('UTF-8')
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
# The Latin-1 filename should have been skipped
self.assertFalse(posix(filename) in contents)
# The filelist should have been updated as well
self.assertFalse(u_filename in mm.filelist.files)
def test_manifest_is_read_with_utf8_encoding(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Create manifest
quiet()
try:
cmd.run()
finally:
unquiet()
# Add UTF-8 filename to manifest
filename = os.path.join(b('sdist_test'), b('smörbröd.py'))
cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
manifest = open(cmd.manifest, 'ab')
manifest.write(b('\n')+filename)
manifest.close()
# The file must exist to be included in the filelist
open(filename, 'w').close()
# Re-read manifest
cmd.filelist.files = []
quiet()
try:
cmd.read_manifest()
finally:
unquiet()
# The filelist should contain the UTF-8 filename
if sys.version_info >= (3,):
filename = filename.decode('utf-8')
self.assertTrue(filename in cmd.filelist.files)
# Python 3 only
if sys.version_info >= (3,):
def test_read_manifest_skips_non_utf8_filenames(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Create manifest
quiet()
try:
cmd.run()
finally:
unquiet()
# Add Latin-1 filename to manifest
filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)
cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
manifest = open(cmd.manifest, 'ab')
manifest.write(b('\n')+filename)
manifest.close()
# The file must exist to be included in the filelist
open(filename, 'w').close()
# Re-read manifest
cmd.filelist.files = []
quiet()
try:
try:
cmd.read_manifest()
except UnicodeDecodeError:
e = sys.exc_info()[1]
self.fail(e)
finally:
unquiet()
# The Latin-1 filename should have been skipped
filename = filename.decode('latin-1')
self.assertFalse(filename in cmd.filelist.files)
@skipIf(sys.version_info >= (3,) and locale.getpreferredencoding() != 'UTF-8',
'Unittest fails if locale is not utf-8 but the manifests is recorded correctly')
def test_sdist_with_utf8_encoded_filename(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# UTF-8 filename
filename = os.path.join(b('sdist_test'), b('smörbröd.py'))
open(filename, 'w').close()
quiet()
try:
cmd.run()
finally:
unquiet()
if sys.platform == 'darwin':
filename = decompose(filename)
if sys.version_info >= (3,):
fs_enc = sys.getfilesystemencoding()
if sys.platform == 'win32':
if fs_enc == 'cp1252':
# Python 3 mangles the UTF-8 filename
filename = filename.decode('cp1252')
self.assertTrue(filename in cmd.filelist.files)
else:
filename = filename.decode('mbcs')
self.assertTrue(filename in cmd.filelist.files)
else:
filename = filename.decode('utf-8')
self.assertTrue(filename in cmd.filelist.files)
else:
self.assertTrue(filename in cmd.filelist.files)
def test_sdist_with_latin1_encoded_filename(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Latin-1 filename
filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)
open(filename, 'w').close()
self.assertTrue(os.path.isfile(filename))
quiet()
try:
cmd.run()
finally:
unquiet()
if sys.version_info >= (3,):
#not all windows systems have a default FS encoding of cp1252
if sys.platform == 'win32':
# Latin-1 is similar to Windows-1252 however
# on mbcs filesys it is not in latin-1 encoding
fs_enc = sys.getfilesystemencoding()
if fs_enc == 'mbcs':
filename = filename.decode('mbcs')
else:
filename = filename.decode('latin-1')
self.assertTrue(filename in cmd.filelist.files)
else:
# The Latin-1 filename should have been skipped
filename = filename.decode('latin-1')
self.assertFalse(filename in cmd.filelist.files)
else:
# No conversion takes place under Python 2 and the file
# is included. We shall keep it that way for BBB.
self.assertTrue(filename in cmd.filelist.files)
class TestDummyOutput(environment.ZippedEnvironment):
def setUp(self):
self.datafile = os.path.join('setuptools', 'tests',
'svn_data', "dummy.zip")
self.dataname = "dummy"
super(TestDummyOutput, self).setUp()
def _run(self):
code, data = environment.run_setup_py(["sdist"],
pypath=self.old_cwd,
data_stream=0)
if code:
info = "DIR: " + os.path.abspath('.')
info += "\n SDIST RETURNED: %i\n\n" % code
info += data
raise AssertionError(info)
datalines = data.splitlines()
possible = (
"running sdist",
"running egg_info",
"creating dummy\.egg-info",
"writing dummy\.egg-info",
"writing top-level names to dummy\.egg-info",
"writing dependency_links to dummy\.egg-info",
"writing manifest file 'dummy\.egg-info",
"reading manifest file 'dummy\.egg-info",
"reading manifest template 'MANIFEST\.in'",
"writing manifest file 'dummy\.egg-info",
"creating dummy-0.1.1",
"making hard links in dummy-0\.1\.1",
"copying files to dummy-0\.1\.1",
"copying \S+ -> dummy-0\.1\.1",
"copying dummy",
"copying dummy\.egg-info",
"hard linking \S+ -> dummy-0\.1\.1",
"hard linking dummy",
"hard linking dummy\.egg-info",
"Writing dummy-0\.1\.1",
"creating dist",
"creating 'dist",
"Creating tar archive",
"running check",
"adding 'dummy-0\.1\.1",
"tar .+ dist/dummy-0\.1\.1\.tar dummy-0\.1\.1",
"gzip .+ dist/dummy-0\.1\.1\.tar",
"removing 'dummy-0\.1\.1' \\(and everything under it\\)",
)
print(" DIR: " + os.path.abspath('.'))
for line in datalines:
found = False
for pattern in possible:
if re.match(pattern, line):
print(" READ: " + line)
found = True
break
if not found:
raise AssertionError("Unexpexected: %s\n-in-\n%s"
% (line, data))
return data
def test_sources(self):
self._run()
class TestSvn(environment.ZippedEnvironment):
def setUp(self):
version = svn_utils.SvnInfo.get_svn_version()
if not version: # None or Empty
return
self.base_version = tuple([int(x) for x in version.split('.')][:2])
if not self.base_version:
raise ValueError('No SVN tools installed')
elif self.base_version < (1, 3):
raise ValueError('Insufficient SVN Version %s' % version)
elif self.base_version >= (1, 9):
#trying the latest version
self.base_version = (1, 8)
self.dataname = "svn%i%i_example" % self.base_version
self.datafile = os.path.join('setuptools', 'tests',
'svn_data', self.dataname + ".zip")
super(TestSvn, self).setUp()
@skipIf(not test_svn._svn_check, "No SVN to text, in the first place")
def test_walksvn(self):
if self.base_version >= (1, 6):
folder2 = 'third party2'
folder3 = 'third party3'
else:
folder2 = 'third_party2'
folder3 = 'third_party3'
#TODO is this right
expected = set([
os.path.join('a file'),
os.path.join(folder2, 'Changes.txt'),
os.path.join(folder2, 'MD5SUMS'),
os.path.join(folder2, 'README.txt'),
os.path.join(folder3, 'Changes.txt'),
os.path.join(folder3, 'MD5SUMS'),
os.path.join(folder3, 'README.txt'),
os.path.join(folder3, 'TODO.txt'),
os.path.join(folder3, 'fin'),
os.path.join('third_party', 'README.txt'),
os.path.join('folder', folder2, 'Changes.txt'),
os.path.join('folder', folder2, 'MD5SUMS'),
os.path.join('folder', folder2, 'WatashiNiYomimasu.txt'),
os.path.join('folder', folder3, 'Changes.txt'),
os.path.join('folder', folder3, 'fin'),
os.path.join('folder', folder3, 'MD5SUMS'),
os.path.join('folder', folder3, 'oops'),
os.path.join('folder', folder3, 'WatashiNiYomimasu.txt'),
os.path.join('folder', folder3, 'ZuMachen.txt'),
os.path.join('folder', 'third_party', 'WatashiNiYomimasu.txt'),
os.path.join('folder', 'lalala.txt'),
os.path.join('folder', 'quest.txt'),
# The example will have a deleted file
# (or should) but shouldn't return it
])
self.assertEqual(set(x for x in walk_revctrl()), expected)
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
gpl-2.0
|
wkh124/wkh124
|
rna_seq_average_by_gene.py
|
1
|
1793
|
#!/usr/bin/env python
"""Average RNA-Seq data by unique genes usign a column with unique gene
identifiers from a tab separated file with the data of one sequence per line.
Usage:
./rna_seq_average_by_gene.py input_file id_column name_col data_column
id_column: Number of the column containing the identifiers of the unique genes.
When multiple lines share the same identifier, their data are combined
name_col: Number of the column containing the sequence names.
data_column: Number of the first column containing the data. All
subsequent columns are expected to contain data.
"""
# Importing modules
import sys
# Parsing user input
try:
input_file = sys.argv[1]
id_col = int(sys.argv[2]) - 1
name_col = int(sys.argv[3]) - 1
data_col = int(sys.argv[4]) - 1
except:
print __doc__
sys.exit(1)
# Main
if __name__ == '__main__':
data = [x.strip().split("\t") for x in open(input_file).readlines() if x.strip() != ""]
unique_genes = list(set([x[id_col] for x in data]))
with open(input_file + ".unique", "w") as f1:
with open(input_file + ".groups", "w") as f2:
for gene in unique_genes:
#print "---------" + gene + "---------"
gene_names = [x[name_col] for x in data if x[id_col] == gene]
gene_data = [x[data_col:] for x in data if x[id_col] == gene]
nseq = len(gene_data)
nind = len(gene_data[0])
avg = [0] * nind
for i in xrange(nind):
ind_data = [int(x[i]) for x in gene_data]
avg[i] = sum(ind_data) / float(nseq)
f1.write(gene + "\t" + "\t".join([str(x) for x in avg]) + "\n")
f2.write(gene + "\t" + "\t".join(gene_names) + "\n")
|
gpl-3.0
|
eunchong/build
|
third_party/buildbot_8_4p1/buildbot/test/fake/state.py
|
8
|
1027
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
class State(object):
"""
A simple class you can use to keep track of state throughout
a test. Just assign whatever you want to its attributes. Its
constructor provides a shortcut to setting initial values for
attributes
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
|
bsd-3-clause
|
XXMrHyde/android_external_chromium_org
|
third_party/tlslite/tlslite/VerifierDB.py
|
359
|
3104
|
"""Class for storing SRP password verifiers."""
from utils.cryptomath import *
from utils.compat import *
import mathtls
from BaseDB import BaseDB
class VerifierDB(BaseDB):
"""This class represent an in-memory or on-disk database of SRP
password verifiers.
A VerifierDB can be passed to a server handshake to authenticate
a client based on one of the verifiers.
This class is thread-safe.
"""
def __init__(self, filename=None):
"""Create a new VerifierDB instance.
@type filename: str
@param filename: Filename for an on-disk database, or None for
an in-memory database. If the filename already exists, follow
this with a call to open(). To create a new on-disk database,
follow this with a call to create().
"""
BaseDB.__init__(self, filename, "verifier")
def _getItem(self, username, valueStr):
(N, g, salt, verifier) = valueStr.split(" ")
N = base64ToNumber(N)
g = base64ToNumber(g)
salt = base64ToString(salt)
verifier = base64ToNumber(verifier)
return (N, g, salt, verifier)
def __setitem__(self, username, verifierEntry):
"""Add a verifier entry to the database.
@type username: str
@param username: The username to associate the verifier with.
Must be less than 256 characters in length. Must not already
be in the database.
@type verifierEntry: tuple
@param verifierEntry: The verifier entry to add. Use
L{tlslite.VerifierDB.VerifierDB.makeVerifier} to create a
verifier entry.
"""
BaseDB.__setitem__(self, username, verifierEntry)
def _setItem(self, username, value):
if len(username)>=256:
raise ValueError("username too long")
N, g, salt, verifier = value
N = numberToBase64(N)
g = numberToBase64(g)
salt = stringToBase64(salt)
verifier = numberToBase64(verifier)
valueStr = " ".join( (N, g, salt, verifier) )
return valueStr
def _checkItem(self, value, username, param):
(N, g, salt, verifier) = value
x = mathtls.makeX(salt, username, param)
v = powMod(g, x, N)
return (verifier == v)
def makeVerifier(username, password, bits):
"""Create a verifier entry which can be stored in a VerifierDB.
@type username: str
@param username: The username for this verifier. Must be less
than 256 characters in length.
@type password: str
@param password: The password for this verifier.
@type bits: int
@param bits: This values specifies which SRP group parameters
to use. It must be one of (1024, 1536, 2048, 3072, 4096, 6144,
8192). Larger values are more secure but slower. 2048 is a
good compromise between safety and speed.
@rtype: tuple
@return: A tuple which may be stored in a VerifierDB.
"""
return mathtls.makeVerifier(username, password, bits)
makeVerifier = staticmethod(makeVerifier)
|
bsd-3-clause
|
MikeAmy/django-webtest
|
django_webtest/middleware.py
|
28
|
2546
|
# -*- coding: utf-8 -*-
from django.contrib.auth.middleware import RemoteUserMiddleware
from django.core.exceptions import ImproperlyConfigured
from django.contrib import auth
class WebtestUserMiddleware(RemoteUserMiddleware):
"""
Middleware for utilizing django-webtest simplified auth
('user' arg for self.app.post and self.app.get).
Mostly copied from RemoteUserMiddleware, but the auth backend is changed
(by changing ``auth.authenticate`` arguments) in order to keep
RemoteUser backend untouched during django-webtest auth.
"""
header = "WEBTEST_USER"
def process_request(self, request):
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The django-webtest auth middleware requires the "
"'django.contrib.auth.middleware.AuthenticationMiddleware' "
"to be installed. Add it to your MIDDLEWARE_CLASSES setting "
"or disable django-webtest auth support "
"by setting 'setup_auth' property of your WebTest subclass "
"to False."
)
try:
username = request.META[self.header]
except KeyError:
# If specified header doesn't exist then return (leaving
# request.user set to AnonymousUser by the
# AuthenticationMiddleware).
return
# If the user is already authenticated and that user is the user we are
# getting passed in the headers, then the correct user is already
# persisted in the session and we don't need to continue.
if request.user.is_authenticated():
if hasattr(request.user, "get_username"):
authenticated_username = request.user.get_username()
else:
authenticated_username = request.user.username
if authenticated_username == self.clean_username(username, request):
return
# We are seeing this user for the first time in this session, attempt
# to authenticate the user.
user = auth.authenticate(django_webtest_user=username)
if user:
# User is valid. Set request.user and persist user in the session
# by logging the user in.
request.user = user
auth.login(request, user)
class DisableCSRFCheckMiddleware(object):
def process_request(self, request):
request._dont_enforce_csrf_checks = True
|
mit
|
cynngah/uofthacksIV
|
generate-jobs/lib/python2.7/site-packages/requests/_internal_utils.py
|
414
|
1096
|
# -*- coding: utf-8 -*-
"""
requests._internal_utils
~~~~~~~~~~~~~~
Provides utility functions that are consumed internally by Requests
which depend on extremely few external helpers (such as compat)
"""
from .compat import is_py2, builtin_str, str
def to_native_string(string, encoding='ascii'):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
"""
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
def unicode_is_ascii(u_string):
"""Determine if unicode string only contains ASCII characters.
:param str u_string: unicode string to check. Must be unicode
and not Python 2 `str`.
:rtype: bool
"""
assert isinstance(u_string, str)
try:
u_string.encode('ascii')
return True
except UnicodeEncodeError:
return False
|
mit
|
5hawnknight/selenium
|
py/test/selenium/webdriver/common/click_tests.py
|
39
|
1423
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
@pytest.fixture(autouse=True)
def loadPage(pages):
pages.load("clicks.html")
def testCanClickOnALinkThatOverflowsAndFollowIt(driver):
driver.find_element(By.ID, "overflowLink").click()
WebDriverWait(driver, 3).until(EC.title_is("XHTML Test Page"))
def testClickingALinkMadeUpOfNumbersIsHandledCorrectly(driver):
driver.find_element(By.LINK_TEXT, "333333").click()
WebDriverWait(driver, 3).until(EC.title_is("XHTML Test Page"))
|
apache-2.0
|
doot/CouchPotatoServer
|
libs/guessit/hash_mpc.py
|
150
|
1884
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
import struct
import os
def hash_file(filename):
"""This function is taken from:
http://trac.opensubtitles.org/projects/opensubtitles/wiki/HashSourceCodes
and is licensed under the GPL."""
longlongformat = 'q' # long long
bytesize = struct.calcsize(longlongformat)
f = open(filename, "rb")
filesize = os.path.getsize(filename)
hash_value = filesize
if filesize < 65536 * 2:
raise Exception("SizeError: size is %d, should be > 132K..." % filesize)
for x in range(65536 / bytesize):
buf = f.read(bytesize)
(l_value,) = struct.unpack(longlongformat, buf)
hash_value += l_value
hash_value = hash_value & 0xFFFFFFFFFFFFFFFF #to remain as 64bit number
f.seek(max(0, filesize - 65536), 0)
for x in range(65536 / bytesize):
buf = f.read(bytesize)
(l_value,) = struct.unpack(longlongformat, buf)
hash_value += l_value
hash_value = hash_value & 0xFFFFFFFFFFFFFFFF
f.close()
return "%016x" % hash_value
|
gpl-3.0
|
grzezlo/NVDARemote
|
addon/globalPlugins/remoteClient/server.py
|
5
|
5723
|
import json
import os
import select
import socket
import ssl
import sys
import time
class Server:
PING_TIME: int = 300
running: bool = False
port: int
password: str
def __init__(self, port, password, bind_host='', bind_host6='[::]'):
self.port = port
self.password = password
#Maps client sockets to clients
self.clients = {}
self.client_sockets = []
self.running = False
self.server_socket = self.create_server_socket(socket.AF_INET, socket.SOCK_STREAM, bind_addr=(bind_host, self.port))
self.server_socket6 = self.create_server_socket(socket.AF_INET6, socket.SOCK_STREAM, bind_addr=(bind_host6, self.port))
def create_server_socket(self, family, type, bind_addr):
server_socket = socket.socket(family, type)
certfile = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'server.pem')
server_socket = ssl.wrap_socket(server_socket, certfile=certfile)
server_socket.bind(bind_addr)
server_socket.listen(5)
return server_socket
def run(self):
self.running = True
self.last_ping_time = time.time()
while self.running:
r, w, e = select.select(self.client_sockets+[self.server_socket, self.server_socket6], [], self.client_sockets, 60)
if not self.running:
break
for sock in r:
if sock is self.server_socket or sock is self.server_socket6:
self.accept_new_connection(sock)
continue
self.clients[sock].handle_data()
if time.time() - self.last_ping_time >= self.PING_TIME:
for client in self.clients.values():
if client.authenticated:
client.send(type='ping')
self.last_ping_time = time.time()
def accept_new_connection(self, sock):
try:
client_sock, addr = sock.accept()
except (ssl.SSLError, socket.error, OSError):
return
client_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
client = Client(server=self, socket=client_sock)
self.add_client(client)
def add_client(self, client):
self.clients[client.socket] = client
self.client_sockets.append(client.socket)
def remove_client(self, client):
del self.clients[client.socket]
self.client_sockets.remove(client.socket)
def client_disconnected(self, client):
self.remove_client(client)
if client.authenticated:
client.send_to_others(type='client_left', user_id=client.id, client=client.as_dict())
def close(self):
self.running = False
self.server_socket.close()
self.server_socket6.close()
class Client:
id: int = 0
def __init__(self, server, socket):
self.server = server
self.socket = socket
self.buffer = b''
self.authenticated = False
self.id = Client.id + 1
self.connection_type = None
self.protocol_version = 1
Client.id += 1
def handle_data(self):
sock_data: bytes = b''
try:
# 16384 is 2^14 self.socket is a ssl wrapped socket.
# Perhaps this value was chosen as the largest value that could be received [1] to avoid having to loop
# until a new line is reached.
# However, the Python docs [2] say:
# "For best match with hardware and network realities, the value of bufsize should be a relatively
# small power of 2, for example, 4096."
# This should probably be changed in the future.
# See also transport.py handle_server_data in class TCPTransport.
# [1] https://stackoverflow.com/a/24870153/
# [2] https://docs.python.org/3.7/library/socket.html#socket.socket.recv
buffSize = 16384
sock_data = self.socket.recv(buffSize)
except:
self.close()
return
if not sock_data: #Disconnect
self.close()
return
data = self.buffer + sock_data
if b'\n' not in data:
self.buffer = data
return
self.buffer = b""
while b'\n' in data:
line, sep, data = data.partition(b'\n')
try:
self.parse(line)
except ValueError:
self.close()
return
self.buffer += data
def parse(self, line):
parsed = json.loads(line)
if 'type' not in parsed:
return
if self.authenticated:
self.send_to_others(**parsed)
return
fn = 'do_'+parsed['type']
if hasattr(self, fn):
getattr(self, fn)(parsed)
def as_dict(self):
return dict(id=self.id, connection_type=self.connection_type)
def do_join(self, obj):
password = obj.get('channel', None)
if password != self.server.password:
self.send(type='error', message='incorrect_password')
self.close()
return
self.connection_type = obj.get('connection_type')
self.authenticated = True
clients = []
client_ids = []
for c in list(self.server.clients.values()):
if c is self or not c.authenticated:
continue
clients.append(c.as_dict())
client_ids.append(c.id)
self.send(type='channel_joined', channel=self.server.password, user_ids=client_ids, clients=clients)
self.send_to_others(type='client_joined', user_id=self.id, client=self.as_dict())
def do_protocol_version(self, obj):
version = obj.get('version')
if not version:
return
self.protocol_version = version
def close(self):
self.socket.close()
self.server.client_disconnected(self)
def send(self, type, origin=None, clients=None, client=None, **kwargs):
msg = dict(type=type, **kwargs)
if self.protocol_version > 1:
if origin:
msg['origin'] = origin
if clients:
msg['clients'] = clients
if client:
msg['client'] = client
msgstr = json.dumps(msg)+'\n'
try:
self.socket.sendall(msgstr.encode('UTF-8'))
except:
self.close()
def send_to_others(self, origin=None, **obj):
if origin is None:
origin = self.id
for c in self.server.clients.values():
if c is not self and c.authenticated:
c.send(origin=origin, **obj)
|
gpl-2.0
|
CartoDB/crankshaft
|
release/python/0.8.1/crankshaft/test/test_clustering_getis.py
|
11
|
2568
|
import unittest
import numpy as np
from helper import fixture_file
from crankshaft.clustering import Getis
import crankshaft.pysal_utils as pu
from crankshaft import random_seeds
import json
from crankshaft.analysis_data_provider import AnalysisDataProvider
# Fixture files produced as follows
#
# import pysal as ps
# import numpy as np
# import random
#
# # setup variables
# f = ps.open(ps.examples.get_path("stl_hom.dbf"))
# y = np.array(f.by_col['HR8893'])
# w_queen = ps.queen_from_shapefile(ps.examples.get_path("stl_hom.shp"))
#
# out_queen = [{"id": index + 1,
# "neighbors": [x+1 for x in w_queen.neighbors[index]],
# "value": val} for index, val in enumerate(y)]
#
# with open('neighbors_queen_getis.json', 'w') as f:
# f.write(str(out_queen))
#
# random.seed(1234)
# np.random.seed(1234)
# lgstar_queen = ps.esda.getisord.G_Local(y, w_queen, star=True,
# permutations=999)
#
# with open('getis_queen.json', 'w') as f:
# f.write(str(zip(lgstar_queen.z_sim,
# lgstar_queen.p_sim, lgstar_queen.p_z_sim)))
class FakeDataProvider(AnalysisDataProvider):
def __init__(self, mock_data):
self.mock_result = mock_data
def get_getis(self, w_type, param):
return self.mock_result
class GetisTest(unittest.TestCase):
"""Testing class for Getis-Ord's G* funtion
This test replicates the work done in PySAL documentation:
https://pysal.readthedocs.io/en/v1.11.0/users/tutorials/autocorrelation.html#local-g-and-g
"""
def setUp(self):
# load raw data for analysis
self.neighbors_data = json.loads(
open(fixture_file('neighbors_getis.json')).read())
# load pre-computed/known values
self.getis_data = json.loads(
open(fixture_file('getis.json')).read())
def test_getis_ord(self):
"""Test Getis-Ord's G*"""
data = [{'id': d['id'],
'attr1': d['value'],
'neighbors': d['neighbors']} for d in self.neighbors_data]
random_seeds.set_random_seeds(1234)
getis = Getis(FakeDataProvider(data))
result = getis.getis_ord('subquery', 'value',
'queen', None, 999, 'the_geom',
'cartodb_id')
result = [(row[0], row[1]) for row in result]
expected = np.array(self.getis_data)[:, 0:2]
for ([res_z, res_p], [exp_z, exp_p]) in zip(result, expected):
self.assertAlmostEqual(res_z, exp_z, delta=1e-2)
|
bsd-3-clause
|
tmpgit/intellij-community
|
python/testData/inspections/PyPropertyDefinitionInspection25/src/prop_test.py
|
48
|
1700
|
class A(object):
def __init__(self, bar):
self._x = 1 ; self._bar = bar
def __getX(self):
return self._x
def __setX(self, x):
self._x = x
def __delX(self):
pass
x1 = property(__getX, __setX, __delX, "doc of x1")
x2 = property(__setX) # should return
x3 = property(__getX, __getX) # should not return
x4 = property(__getX, fdel=__getX) # should not return
x5 = property(__getX, doc=123) # bad doc
x6 = property(lambda self: self._x)
x7 = property(lambda self: self._x, lambda self: self._x) # setter should not return
@property
def foo(self):
return self._x
@foo.setter # ignored in 2.5
def foo(self, x):
self._x = x
@foo.deleter # ignored in 2.5
def foo(self):
pass
@property
def boo(self):
return self._x
@boo.setter
def boo1(self, x): # ignored in 2.5
self._x = x
@boo.deleter
def boo2(self): # ignored in 2,5
pass
@property
def moo(self): # should return
pass
@moo.setter
def foo(self, x):
return 1 # ignored in 2.5
@foo.deleter
def foo(self):
return self._x # ignored in 2.5
@qoo.setter # unknown qoo is reported in ref inspection
def qoo(self, v):
self._x = v
@property
def bar(self):
return None
class Ghostbusters(object):
def __call__(self):
return "Who do you call?"
gb = Ghostbusters()
class B(object):
x = property(gb) # pass
y = property(Ghostbusters()) # pass
z = property(Ghostbusters) # pass
class Eternal(object):
def give(self):
while True:
yield 1
def giveAndTake(self):
x = 1
while True:
x = (yield x)
one = property(give) # should pass
anything = property(giveAndTake) # should pass
|
apache-2.0
|
yongtang/tensorflow
|
tensorflow/python/keras/layers/kernelized_test.py
|
6
|
16816
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for kernelized.py."""
import functools
import math
import os
import shutil
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend as keras_backend
from tensorflow.python.keras import combinations
from tensorflow.python.keras import initializers
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import kernelized as kernel_layers
from tensorflow.python.keras.saving import save
from tensorflow.python.keras.utils import kernelized_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
def _exact_gaussian(stddev):
return functools.partial(
kernelized_utils.exact_gaussian_kernel, stddev=stddev)
def _exact_laplacian(stddev):
return functools.partial(
kernelized_utils.exact_laplacian_kernel, stddev=stddev)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
def _assert_all_close(self, expected, actual, atol=0.001):
if not context.executing_eagerly():
with self.cached_session() as sess:
keras_backend._initialize_variables(sess)
self.assertAllClose(expected, actual, atol=atol)
else:
self.assertAllClose(expected, actual, atol=atol)
@testing_utils.run_v2_only
def test_state_saving_and_loading(self):
with self.cached_session():
input_data = np.random.random((1, 2))
rff_layer = kernel_layers.RandomFourierFeatures(output_dim=10, scale=3.0)
inputs = input_layer.Input((2,))
outputs = rff_layer(inputs)
model = training.Model(inputs, outputs)
output_data = model.predict(input_data)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
saved_model_dir = os.path.join(temp_dir, 'rff_model')
model.save(saved_model_dir)
new_model = save.load_model(saved_model_dir)
new_output_data = new_model.predict(input_data)
self.assertAllClose(output_data, new_output_data, atol=1e-4)
def test_invalid_output_dim(self):
with self.assertRaisesRegex(
ValueError, r'`output_dim` should be a positive integer. Given: -3.'):
_ = kernel_layers.RandomFourierFeatures(output_dim=-3, scale=2.0)
def test_unsupported_kernel_type(self):
with self.assertRaisesRegex(
ValueError, r'Unsupported kernel type: \'unsupported_kernel\'.'):
_ = kernel_layers.RandomFourierFeatures(
3, 'unsupported_kernel', stddev=2.0)
def test_invalid_scale(self):
with self.assertRaisesRegex(
ValueError,
r'When provided, `scale` should be a positive float. Given: 0.0.'):
_ = kernel_layers.RandomFourierFeatures(output_dim=10, scale=0.0)
def test_invalid_input_shape(self):
inputs = random_ops.random_uniform((3, 2, 4), seed=1)
rff_layer = kernel_layers.RandomFourierFeatures(output_dim=10, scale=3.0)
with self.assertRaisesRegex(
ValueError,
r'The rank of the input tensor should be 2. Got 3 instead.'):
_ = rff_layer(inputs)
@parameterized.named_parameters(
('gaussian', 'gaussian', 10.0, False),
('random', init_ops.random_uniform_initializer, 1.0, True))
def test_random_features_properties(self, initializer, scale, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=10,
kernel_initializer=initializer,
scale=scale,
trainable=trainable)
self.assertEqual(rff_layer.output_dim, 10)
self.assertEqual(rff_layer.kernel_initializer, initializer)
self.assertEqual(rff_layer.scale, scale)
self.assertEqual(rff_layer.trainable, trainable)
@parameterized.named_parameters(('gaussian', 'gaussian', False),
('laplacian', 'laplacian', True),
('other', init_ops.ones_initializer, True))
def test_call(self, initializer, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=10,
kernel_initializer=initializer,
scale=1.0,
trainable=trainable,
name='random_fourier_features')
inputs = random_ops.random_uniform((3, 2), seed=1)
outputs = rff_layer(inputs)
self.assertListEqual([3, 10], outputs.shape.as_list())
num_trainable_vars = 1 if trainable else 0
self.assertLen(rff_layer.non_trainable_variables, 3 - num_trainable_vars)
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_no_eager_Leak(self):
# Tests that repeatedly constructing and building a Layer does not leak
# Python objects.
inputs = random_ops.random_uniform((5, 4), seed=1)
kernel_layers.RandomFourierFeatures(output_dim=4, name='rff')(inputs)
kernel_layers.RandomFourierFeatures(output_dim=10, scale=2.0)(inputs)
def test_output_shape(self):
inputs = random_ops.random_uniform((3, 2), seed=1)
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=7, name='random_fourier_features', trainable=True)
outputs = rff_layer(inputs)
self.assertEqual([3, 7], outputs.shape.as_list())
@parameterized.named_parameters(
('gaussian', 'gaussian'), ('laplacian', 'laplacian'),
('other', init_ops.random_uniform_initializer))
def test_call_on_placeholder(self, initializer):
with ops.Graph().as_default():
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=5,
kernel_initializer=initializer,
name='random_fourier_features')
with self.assertRaisesRegex(
ValueError, r'The last dimension of the inputs to '
'`RandomFourierFeatures` should be defined. Found `None`.'):
rff_layer(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None])
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=5,
kernel_initializer=initializer,
name='random_fourier_features')
with self.assertRaisesRegex(
ValueError, r'The last dimension of the inputs to '
'`RandomFourierFeatures` should be defined. Found `None`.'):
rff_layer(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3])
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=5, name='random_fourier_features')
rff_layer(inputs)
@parameterized.named_parameters(('gaussian', 10, 'gaussian', 2.0),
('laplacian', 5, 'laplacian', None),
('other', 10, init_ops.ones_initializer, 1.0))
def test_compute_output_shape(self, output_dim, initializer, scale):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim, initializer, scale=scale, name='rff')
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tensor_shape.TensorShape(None))
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tensor_shape.TensorShape([]))
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tensor_shape.TensorShape([3]))
with self.assertRaises(ValueError):
rff_layer.compute_output_shape(tensor_shape.TensorShape([3, 2, 3]))
with self.assertRaisesRegex(
ValueError, r'The innermost dimension of input shape must be defined.'):
rff_layer.compute_output_shape(tensor_shape.TensorShape([3, None]))
self.assertEqual([None, output_dim],
rff_layer.compute_output_shape((None, 3)).as_list())
self.assertEqual([None, output_dim],
rff_layer.compute_output_shape(
tensor_shape.TensorShape([None, 2])).as_list())
self.assertEqual([4, output_dim],
rff_layer.compute_output_shape((4, 1)).as_list())
@parameterized.named_parameters(
('gaussian', 10, 'gaussian', 3.0, False),
('laplacian', 5, 'laplacian', 5.5, True),
('other', 7, init_ops.random_uniform_initializer(), None, True))
def test_get_config(self, output_dim, initializer, scale, trainable):
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim,
initializer,
scale=scale,
trainable=trainable,
name='random_fourier_features',
)
expected_initializer = initializer
if not isinstance(initializer, str):
expected_initializer = initializers.serialize(initializer)
expected_dtype = (
'float32' if base_layer_utils.v2_dtype_behavior_enabled() else None)
expected_config = {
'output_dim': output_dim,
'kernel_initializer': expected_initializer,
'scale': scale,
'name': 'random_fourier_features',
'trainable': trainable,
'dtype': expected_dtype,
}
self.assertLen(expected_config, len(rff_layer.get_config()))
self.assertSameElements(
list(expected_config.items()), list(rff_layer.get_config().items()))
@parameterized.named_parameters(
('gaussian', 5, 'gaussian', None, True),
('laplacian', 5, 'laplacian', 5.5, False),
('other', 7, init_ops.ones_initializer(), 2.0, True))
def test_from_config(self, output_dim, initializer, scale, trainable):
model_config = {
'output_dim': output_dim,
'kernel_initializer': initializer,
'scale': scale,
'trainable': trainable,
'name': 'random_fourier_features',
}
rff_layer = kernel_layers.RandomFourierFeatures.from_config(model_config)
self.assertEqual(rff_layer.output_dim, output_dim)
self.assertEqual(rff_layer.kernel_initializer, initializer)
self.assertEqual(rff_layer.scale, scale)
self.assertEqual(rff_layer.trainable, trainable)
inputs = random_ops.random_uniform((3, 2), seed=1)
outputs = rff_layer(inputs)
self.assertListEqual([3, output_dim], outputs.shape.as_list())
num_trainable_vars = 1 if trainable else 0
self.assertLen(rff_layer.trainable_variables, num_trainable_vars)
if trainable:
self.assertEqual('random_fourier_features/kernel_scale:0',
rff_layer.trainable_variables[0].name)
self.assertLen(rff_layer.non_trainable_variables, 3 - num_trainable_vars)
@parameterized.named_parameters(
('gaussian', 10, 'gaussian', 3.0, True),
('laplacian', 5, 'laplacian', 5.5, False),
('other', 10, init_ops.random_uniform_initializer(), None, True))
def test_same_random_features_params_reused(self, output_dim, initializer,
scale, trainable):
"""Applying the layer on the same input twice gives the same output."""
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=output_dim,
kernel_initializer=initializer,
scale=scale,
trainable=trainable,
name='random_fourier_features')
inputs = constant_op.constant(
np.random.uniform(low=-1.0, high=1.0, size=(2, 4)))
output1 = rff_layer(inputs)
output2 = rff_layer(inputs)
self._assert_all_close(output1, output2)
@parameterized.named_parameters(
('gaussian', 'gaussian', 5.0), ('laplacian', 'laplacian', 3.0),
('other', init_ops.random_uniform_initializer(), 5.0))
def test_different_params_similar_approximation(self, initializer, scale):
random_seed.set_random_seed(12345)
rff_layer1 = kernel_layers.RandomFourierFeatures(
output_dim=3000,
kernel_initializer=initializer,
scale=scale,
name='rff1')
rff_layer2 = kernel_layers.RandomFourierFeatures(
output_dim=2000,
kernel_initializer=initializer,
scale=scale,
name='rff2')
# Two distinct inputs.
x = constant_op.constant([[1.0, -1.0, 0.5]])
y = constant_op.constant([[-1.0, 1.0, 1.0]])
# Apply both layers to both inputs.
output_x1 = math.sqrt(2.0 / 3000.0) * rff_layer1(x)
output_y1 = math.sqrt(2.0 / 3000.0) * rff_layer1(y)
output_x2 = math.sqrt(2.0 / 2000.0) * rff_layer2(x)
output_y2 = math.sqrt(2.0 / 2000.0) * rff_layer2(y)
# Compute the inner products of the outputs (on inputs x and y) for both
# layers. For any fixed random features layer rff_layer, and inputs x, y,
# rff_layer(x)^T * rff_layer(y) ~= K(x,y) up to a normalization factor.
approx_kernel1 = kernelized_utils.inner_product(output_x1, output_y1)
approx_kernel2 = kernelized_utils.inner_product(output_x2, output_y2)
self._assert_all_close(approx_kernel1, approx_kernel2, atol=0.08)
@parameterized.named_parameters(
('gaussian', 'gaussian', 5.0, _exact_gaussian(stddev=5.0)),
('laplacian', 'laplacian', 20.0, _exact_laplacian(stddev=20.0)))
def test_bad_kernel_approximation(self, initializer, scale, exact_kernel_fn):
"""Approximation is bad when output dimension is small."""
# Two distinct inputs.
x = constant_op.constant([[1.0, -1.0, 0.5]])
y = constant_op.constant([[-1.0, 1.0, 1.0]])
small_output_dim = 10
random_seed.set_random_seed(1234)
# Initialize layer.
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=small_output_dim,
kernel_initializer=initializer,
scale=scale,
name='random_fourier_features')
# Apply layer to both inputs.
output_x = math.sqrt(2.0 / small_output_dim) * rff_layer(x)
output_y = math.sqrt(2.0 / small_output_dim) * rff_layer(y)
# The inner products of the outputs (on inputs x and y) approximates the
# real value of the RBF kernel but poorly since the output dimension of the
# layer is small.
exact_kernel_value = exact_kernel_fn(x, y)
approx_kernel_value = kernelized_utils.inner_product(output_x, output_y)
abs_error = math_ops.abs(exact_kernel_value - approx_kernel_value)
if not context.executing_eagerly():
with self.cached_session() as sess:
keras_backend._initialize_variables(sess)
abs_error_eval = sess.run([abs_error])
self.assertGreater(abs_error_eval[0][0], 0.05)
self.assertLess(abs_error_eval[0][0], 0.5)
else:
self.assertGreater(abs_error, 0.05)
self.assertLess(abs_error, 0.5)
@parameterized.named_parameters(
('gaussian', 'gaussian', 5.0, _exact_gaussian(stddev=5.0)),
('laplacian', 'laplacian', 10.0, _exact_laplacian(stddev=10.0)))
def test_good_kernel_approximation_multiple_inputs(self, initializer, scale,
exact_kernel_fn):
# Parameters.
input_dim = 5
output_dim = 2000
x_rows = 20
y_rows = 30
x = constant_op.constant(
np.random.uniform(size=(x_rows, input_dim)), dtype=dtypes.float32)
y = constant_op.constant(
np.random.uniform(size=(y_rows, input_dim)), dtype=dtypes.float32)
random_seed.set_random_seed(1234)
rff_layer = kernel_layers.RandomFourierFeatures(
output_dim=output_dim,
kernel_initializer=initializer,
scale=scale,
name='random_fourier_features')
# The shapes of output_x and output_y are (x_rows, output_dim) and
# (y_rows, output_dim) respectively.
output_x = math.sqrt(2.0 / output_dim) * rff_layer(x)
output_y = math.sqrt(2.0 / output_dim) * rff_layer(y)
approx_kernel_matrix = kernelized_utils.inner_product(output_x, output_y)
exact_kernel_matrix = exact_kernel_fn(x, y)
self._assert_all_close(approx_kernel_matrix, exact_kernel_matrix, atol=0.05)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
icereval/osf.io
|
framework/flask/__init__.py
|
19
|
2935
|
# -*- coding: utf-8 -*-
from flask import (Flask, request, jsonify, render_template, # noqa
render_template_string, Blueprint, send_file, abort, make_response,
redirect as flask_redirect, url_for, send_from_directory, current_app
)
import furl
from website import settings
# Create app
app = Flask(
__name__,
static_folder=settings.STATIC_FOLDER,
static_url_path=settings.STATIC_URL_PATH,
)
# Pull debug mode from settings
app.debug = settings.DEBUG_MODE
app.config['SENTRY_TAGS'] = {'App': 'web'}
app.config['SENTRY_RELEASE'] = settings.VERSION
def rm_handler(app, handler_name, func, key=None):
"""Remove a handler from an application.
:param app: Flask app
:param handler_name: Name of handler type, e.g. 'before_request'
:param func: Handler function to attach
:param key: Blueprint name
"""
handler_funcs_name = '{0}_funcs'.format(handler_name)
handler_funcs = getattr(app, handler_funcs_name)
try:
handler_funcs.get(key, []).remove(func)
except ValueError:
pass
def rm_handlers(app, handlers, key=None):
"""Remove multiple handlers from an application.
:param app: Flask application
:param handlers: Mapping from handler names to handler functions
"""
for handler_name, func in handlers.iteritems():
rm_handler(app, handler_name, func, key=key)
# Set up static routing for addons
def add_handler(app, handler_name, func, key=None):
"""Add handler to Flask application if handler has not already been added.
Used to avoid attaching the same handlers more than once, e.g. when setting
up multiple applications during testing.
:param app: Flask app
:param handler_name: Name of handler type, e.g. 'before_request'
:param func: Handler function to attach
:param key: Blueprint name
"""
handler_adder = getattr(app, handler_name)
handler_funcs_name = '{0}_funcs'.format(handler_name)
handler_funcs = getattr(app, handler_funcs_name)
if func not in handler_funcs.get(key, []):
handler_adder(func)
def add_handlers(app, handlers, key=None):
"""Add multiple handlers to application.
:param app: Flask application
:param handlers: Mapping from handler names to handler functions
"""
for handler_name, func in handlers.iteritems():
add_handler(app, handler_name, func, key=key)
def redirect(location, code=302):
"""Redirect the client to a desired location. Behaves the same
as Flask's :func:`flask.redirect` function with an awareness of
OSF view-only links.
IMPORTANT: This function should always be used instead of
flask.redirect to ensure the correct behavior of view-only
links.
"""
view_only = request.args.get('view_only', '')
if view_only:
url = furl.furl(location)
url.args['view_only'] = view_only
location = url.url
return flask_redirect(location, code=code)
|
apache-2.0
|
ajdavis/tornado
|
tornado/test/http1connection_test.py
|
29
|
1995
|
from __future__ import absolute_import, division, print_function
import socket
from tornado.http1connection import HTTP1Connection
from tornado.httputil import HTTPMessageDelegate
from tornado.iostream import IOStream
from tornado.locks import Event
from tornado.netutil import add_accept_handler
from tornado.testing import AsyncTestCase, bind_unused_port, gen_test
class HTTP1ConnectionTest(AsyncTestCase):
def setUp(self):
super(HTTP1ConnectionTest, self).setUp()
self.asyncSetUp()
@gen_test
def asyncSetUp(self):
listener, port = bind_unused_port()
event = Event()
def accept_callback(conn, addr):
self.server_stream = IOStream(conn)
self.addCleanup(self.server_stream.close)
event.set()
add_accept_handler(listener, accept_callback)
self.client_stream = IOStream(socket.socket())
self.addCleanup(self.client_stream.close)
yield [self.client_stream.connect(('127.0.0.1', port)),
event.wait()]
self.io_loop.remove_handler(listener)
listener.close()
@gen_test
def test_http10_no_content_length(self):
# Regression test for a bug in which can_keep_alive would crash
# for an HTTP/1.0 (not 1.1) response with no content-length.
conn = HTTP1Connection(self.client_stream, True)
self.server_stream.write(b"HTTP/1.0 200 Not Modified\r\n\r\nhello")
self.server_stream.close()
event = Event()
test = self
body = []
class Delegate(HTTPMessageDelegate):
def headers_received(self, start_line, headers):
test.code = start_line.code
def data_received(self, data):
body.append(data)
def finish(self):
event.set()
yield conn.read_response(Delegate())
yield event.wait()
self.assertEqual(self.code, 200)
self.assertEqual(b''.join(body), b'hello')
|
apache-2.0
|
yewang15215/django
|
django/conf/locale/cy/formats.py
|
504
|
1822
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y' # '25 Hydref 2006'
TIME_FORMAT = 'P' # '2:30 y.b.'
DATETIME_FORMAT = 'j F Y, P' # '25 Hydref 2006, 2:30 y.b.'
YEAR_MONTH_FORMAT = 'F Y' # 'Hydref 2006'
MONTH_DAY_FORMAT = 'j F' # '25 Hydref'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 y.b.'
FIRST_DAY_OF_WEEK = 1 # 'Dydd Llun'
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
bsd-3-clause
|
bxshi/gem5
|
src/mem/MemObject.py
|
69
|
1734
|
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
from ClockedObject import ClockedObject
class MemObject(ClockedObject):
type = 'MemObject'
abstract = True
cxx_header = "mem/mem_object.hh"
|
bsd-3-clause
|
dexterx17/nodoSocket
|
clients/Python-2.7.6/Lib/json/tests/test_pass1.py
|
108
|
1841
|
from json.tests import PyTest, CTest
# from http://json.org/JSON_checker/test/pass1.json
JSON = r'''
[
"JSON Test Pattern pass1",
{"object with 1 member":["array with 1 element"]},
{},
[],
-42,
true,
false,
null,
{
"integer": 1234567890,
"real": -9876.543210,
"e": 0.123456789e-12,
"E": 1.234567890E+34,
"": 23456789012E66,
"zero": 0,
"one": 1,
"space": " ",
"quote": "\"",
"backslash": "\\",
"controls": "\b\f\n\r\t",
"slash": "/ & \/",
"alpha": "abcdefghijklmnopqrstuvwyz",
"ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ",
"digit": "0123456789",
"0123456789": "digit",
"special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?",
"hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A",
"true": true,
"false": false,
"null": null,
"array":[ ],
"object":{ },
"address": "50 St. James Street",
"url": "http://www.JSON.org/",
"comment": "// /* <!-- --",
"# -- --> */": " ",
" s p a c e d " :[1,2 , 3
,
4 , 5 , 6 ,7 ],"compact":[1,2,3,4,5,6,7],
"jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}",
"quotes": "" \u0022 %22 0x22 034 "",
"\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
: "A key can be any string"
},
0.5 ,98.6
,
99.44
,
1066,
1e1,
0.1e1,
1e-1,
1e00,2e+00,2e-00
,"rosebud"]
'''
class TestPass1(object):
def test_parse(self):
# test in/out equivalence and parsing
res = self.loads(JSON)
out = self.dumps(res)
self.assertEqual(res, self.loads(out))
class TestPyPass1(TestPass1, PyTest): pass
class TestCPass1(TestPass1, CTest): pass
|
mit
|
morreene/tradenews
|
venv/Lib/encodings/iso8859_1.py
|
266
|
13176
|
""" Python Character Mapping Codec iso8859_1 generated from 'MAPPINGS/ISO8859/8859-1.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-1',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
bsd-3-clause
|
fedora-infra/fedimg
|
tests/test_uploader.py
|
1
|
3280
|
# This file is part of fedimg.
# Copyright (C) 2014-2018 Red Hat, Inc.
#
# fedimg is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# fedimg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with fedimg; if not, see http://www.gnu.org/licenses,
# or write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: David Gay <[email protected]>
# Sayan Chowdhury <[email protected]>
import mock
import multiprocessing.pool
import unittest
import fedimg.uploader
class TestUploader(unittest.TestCase):
@mock.patch('fedimg.uploader.ec2main', return_value=[])
@mock.patch('fedimg.uploader.ec2copy')
@mock.patch('fedimg.uploader.ACTIVE_SERVICES', return_value=['hp'])
def test_inactive_aws(self, active_services, ec2copy, ec2main):
thread_pool = multiprocessing.pool.ThreadPool(processes=1)
fedimg.uploader.upload(
thread_pool,
['http://kojipkgs.fedoraproject.org/compose/Fedora-Cloud-27-20180317.0/compose/CloudImages/x86_64/images/Fedora-Cloud-Base-27-20180317.0.x86_64.raw.xz'],
compose_id='Fedora-Cloud-27-20180317.0',
push_notifications=True,
)
self.assertIs(ec2main.called, False)
self.assertIs(ec2copy.called, False)
@mock.patch('fedimg.uploader.ec2main', return_value=[])
@mock.patch('fedimg.uploader.ec2copy')
def test_active_aws_no_images(self, ec2copy, ec2main):
thread_pool = multiprocessing.pool.ThreadPool(processes=1)
fedimg.uploader.upload(
thread_pool,
['http://kojipkgs.fedoraproject.org/compose/Fedora-Cloud-27-20180317.0/compose/CloudImages/x86_64/images/Fedora-Cloud-Base-27-20180317.0.x86_64.raw.xz'],
compose_id='Fedora-Cloud-27-20180317.0',
push_notifications=True,
)
self.assertIs(ec2main.called, True)
self.assertIs(ec2copy.called, False)
@mock.patch('fedimg.uploader.ec2main')
@mock.patch('fedimg.uploader.ec2copy')
def test_active_aws_with_images(self, ec2copy, ec2main):
thread_pool = multiprocessing.pool.ThreadPool(processes=1)
ec2main.return_value = [{
'image_id': 'i-abc1234',
'is_image_public': True,
'snapshot_id': 'snap-abc1234',
'is_snapshot_public': True,
'regions': 'us-east-1'
}]
fedimg.uploader.upload(
thread_pool,
['http://kojipkgs.fedoraproject.org/compose/Fedora-Cloud-27-20180317.0/compose/CloudImages/x86_64/images/Fedora-Cloud-Base-27-20180317.0.x86_64.raw.xz'],
compose_id='Fedora-Cloud-27-20180317.0',
push_notifications=True,
)
self.assertIs(ec2main.called, True)
self.assertIs(ec2main.called, True)
|
agpl-3.0
|
sabi0/intellij-community
|
python/helpers/py2only/docutils/parsers/rst/directives/__init__.py
|
104
|
13607
|
# $Id: __init__.py 7621 2013-03-04 13:20:49Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
This package contains directive implementation modules.
"""
__docformat__ = 'reStructuredText'
import re
import codecs
import sys
from docutils import nodes
from docutils.parsers.rst.languages import en as _fallback_language_module
if sys.version_info < (2,5):
from docutils._compat import __import__
_directive_registry = {
'attention': ('admonitions', 'Attention'),
'caution': ('admonitions', 'Caution'),
'code': ('body', 'CodeBlock'),
'danger': ('admonitions', 'Danger'),
'error': ('admonitions', 'Error'),
'important': ('admonitions', 'Important'),
'note': ('admonitions', 'Note'),
'tip': ('admonitions', 'Tip'),
'hint': ('admonitions', 'Hint'),
'warning': ('admonitions', 'Warning'),
'admonition': ('admonitions', 'Admonition'),
'sidebar': ('body', 'Sidebar'),
'topic': ('body', 'Topic'),
'line-block': ('body', 'LineBlock'),
'parsed-literal': ('body', 'ParsedLiteral'),
'math': ('body', 'MathBlock'),
'rubric': ('body', 'Rubric'),
'epigraph': ('body', 'Epigraph'),
'highlights': ('body', 'Highlights'),
'pull-quote': ('body', 'PullQuote'),
'compound': ('body', 'Compound'),
'container': ('body', 'Container'),
#'questions': ('body', 'question_list'),
'table': ('tables', 'RSTTable'),
'csv-table': ('tables', 'CSVTable'),
'list-table': ('tables', 'ListTable'),
'image': ('images', 'Image'),
'figure': ('images', 'Figure'),
'contents': ('parts', 'Contents'),
'sectnum': ('parts', 'Sectnum'),
'header': ('parts', 'Header'),
'footer': ('parts', 'Footer'),
#'footnotes': ('parts', 'footnotes'),
#'citations': ('parts', 'citations'),
'target-notes': ('references', 'TargetNotes'),
'meta': ('html', 'Meta'),
#'imagemap': ('html', 'imagemap'),
'raw': ('misc', 'Raw'),
'include': ('misc', 'Include'),
'replace': ('misc', 'Replace'),
'unicode': ('misc', 'Unicode'),
'class': ('misc', 'Class'),
'role': ('misc', 'Role'),
'default-role': ('misc', 'DefaultRole'),
'title': ('misc', 'Title'),
'date': ('misc', 'Date'),
'restructuredtext-test-directive': ('misc', 'TestDirective'),}
"""Mapping of directive name to (module name, class name). The
directive name is canonical & must be lowercase. Language-dependent
names are defined in the ``language`` subpackage."""
_directives = {}
"""Cache of imported directives."""
def directive(directive_name, language_module, document):
"""
Locate and return a directive function from its language-dependent name.
If not found in the current language, check English. Return None if the
named directive cannot be found.
"""
normname = directive_name.lower()
messages = []
msg_text = []
if normname in _directives:
return _directives[normname], messages
canonicalname = None
try:
canonicalname = language_module.directives[normname]
except AttributeError, error:
msg_text.append('Problem retrieving directive entry from language '
'module %r: %s.' % (language_module, error))
except KeyError:
msg_text.append('No directive entry for "%s" in module "%s".'
% (directive_name, language_module.__name__))
if not canonicalname:
try:
canonicalname = _fallback_language_module.directives[normname]
msg_text.append('Using English fallback for directive "%s".'
% directive_name)
except KeyError:
msg_text.append('Trying "%s" as canonical directive name.'
% directive_name)
# The canonical name should be an English name, but just in case:
canonicalname = normname
if msg_text:
message = document.reporter.info(
'\n'.join(msg_text), line=document.current_line)
messages.append(message)
try:
modulename, classname = _directive_registry[canonicalname]
except KeyError:
# Error handling done by caller.
return None, messages
try:
module = __import__(modulename, globals(), locals(), level=1)
except ImportError, detail:
messages.append(document.reporter.error(
'Error importing directive module "%s" (directive "%s"):\n%s'
% (modulename, directive_name, detail),
line=document.current_line))
return None, messages
try:
directive = getattr(module, classname)
_directives[normname] = directive
except AttributeError:
messages.append(document.reporter.error(
'No directive class "%s" in module "%s" (directive "%s").'
% (classname, modulename, directive_name),
line=document.current_line))
return None, messages
return directive, messages
def register_directive(name, directive):
"""
Register a nonstandard application-defined directive function.
Language lookups are not needed for such functions.
"""
_directives[name] = directive
def flag(argument):
"""
Check for a valid flag option (no argument) and return ``None``.
(Directive option conversion function.)
Raise ``ValueError`` if an argument is found.
"""
if argument and argument.strip():
raise ValueError('no argument is allowed; "%s" supplied' % argument)
else:
return None
def unchanged_required(argument):
"""
Return the argument text, unchanged.
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found.
"""
if argument is None:
raise ValueError('argument required but none supplied')
else:
return argument # unchanged!
def unchanged(argument):
"""
Return the argument text, unchanged.
(Directive option conversion function.)
No argument implies empty string ("").
"""
if argument is None:
return u''
else:
return argument # unchanged!
def path(argument):
"""
Return the path argument unwrapped (with newlines removed).
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found.
"""
if argument is None:
raise ValueError('argument required but none supplied')
else:
path = ''.join([s.strip() for s in argument.splitlines()])
return path
def uri(argument):
"""
Return the URI argument with whitespace removed.
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found.
"""
if argument is None:
raise ValueError('argument required but none supplied')
else:
uri = ''.join(argument.split())
return uri
def nonnegative_int(argument):
"""
Check for a nonnegative integer argument; raise ``ValueError`` if not.
(Directive option conversion function.)
"""
value = int(argument)
if value < 0:
raise ValueError('negative value; must be positive or zero')
return value
def percentage(argument):
"""
Check for an integer percentage value with optional percent sign.
"""
try:
argument = argument.rstrip(' %')
except AttributeError:
pass
return nonnegative_int(argument)
length_units = ['em', 'ex', 'px', 'in', 'cm', 'mm', 'pt', 'pc']
def get_measure(argument, units):
"""
Check for a positive argument of one of the units and return a
normalized string of the form "<value><unit>" (without space in
between).
To be called from directive option conversion functions.
"""
match = re.match(r'^([0-9.]+) *(%s)$' % '|'.join(units), argument)
try:
float(match.group(1))
except (AttributeError, ValueError):
raise ValueError(
'not a positive measure of one of the following units:\n%s'
% ' '.join(['"%s"' % i for i in units]))
return match.group(1) + match.group(2)
def length_or_unitless(argument):
return get_measure(argument, length_units + [''])
def length_or_percentage_or_unitless(argument, default=''):
"""
Return normalized string of a length or percentage unit.
Add <default> if there is no unit. Raise ValueError if the argument is not
a positive measure of one of the valid CSS units (or without unit).
>>> length_or_percentage_or_unitless('3 pt')
'3pt'
>>> length_or_percentage_or_unitless('3%', 'em')
'3%'
>>> length_or_percentage_or_unitless('3')
'3'
>>> length_or_percentage_or_unitless('3', 'px')
'3px'
"""
try:
return get_measure(argument, length_units + ['%'])
except ValueError:
try:
return get_measure(argument, ['']) + default
except ValueError:
# raise ValueError with list of valid units:
return get_measure(argument, length_units + ['%'])
def class_option(argument):
"""
Convert the argument into a list of ID-compatible strings and return it.
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found.
"""
if argument is None:
raise ValueError('argument required but none supplied')
names = argument.split()
class_names = []
for name in names:
class_name = nodes.make_id(name)
if not class_name:
raise ValueError('cannot make "%s" into a class name' % name)
class_names.append(class_name)
return class_names
unicode_pattern = re.compile(
r'(?:0x|x|\\x|U\+?|\\u)([0-9a-f]+)$|&#x([0-9a-f]+);$', re.IGNORECASE)
def unicode_code(code):
r"""
Convert a Unicode character code to a Unicode character.
(Directive option conversion function.)
Codes may be decimal numbers, hexadecimal numbers (prefixed by ``0x``,
``x``, ``\x``, ``U+``, ``u``, or ``\u``; e.g. ``U+262E``), or XML-style
numeric character entities (e.g. ``☮``). Other text remains as-is.
Raise ValueError for illegal Unicode code values.
"""
try:
if code.isdigit(): # decimal number
return unichr(int(code))
else:
match = unicode_pattern.match(code)
if match: # hex number
value = match.group(1) or match.group(2)
return unichr(int(value, 16))
else: # other text
return code
except OverflowError, detail:
raise ValueError('code too large (%s)' % detail)
def single_char_or_unicode(argument):
"""
A single character is returned as-is. Unicode characters codes are
converted as in `unicode_code`. (Directive option conversion function.)
"""
char = unicode_code(argument)
if len(char) > 1:
raise ValueError('%r invalid; must be a single character or '
'a Unicode code' % char)
return char
def single_char_or_whitespace_or_unicode(argument):
"""
As with `single_char_or_unicode`, but "tab" and "space" are also supported.
(Directive option conversion function.)
"""
if argument == 'tab':
char = '\t'
elif argument == 'space':
char = ' '
else:
char = single_char_or_unicode(argument)
return char
def positive_int(argument):
"""
Converts the argument into an integer. Raises ValueError for negative,
zero, or non-integer values. (Directive option conversion function.)
"""
value = int(argument)
if value < 1:
raise ValueError('negative or zero value; must be positive')
return value
def positive_int_list(argument):
"""
Converts a space- or comma-separated list of values into a Python list
of integers.
(Directive option conversion function.)
Raises ValueError for non-positive-integer values.
"""
if ',' in argument:
entries = argument.split(',')
else:
entries = argument.split()
return [positive_int(entry) for entry in entries]
def encoding(argument):
"""
Verfies the encoding argument by lookup.
(Directive option conversion function.)
Raises ValueError for unknown encodings.
"""
try:
codecs.lookup(argument)
except LookupError:
raise ValueError('unknown encoding: "%s"' % argument)
return argument
def choice(argument, values):
"""
Directive option utility function, supplied to enable options whose
argument must be a member of a finite set of possible values (must be
lower case). A custom conversion function must be written to use it. For
example::
from docutils.parsers.rst import directives
def yesno(argument):
return directives.choice(argument, ('yes', 'no'))
Raise ``ValueError`` if no argument is found or if the argument's value is
not valid (not an entry in the supplied list).
"""
try:
value = argument.lower().strip()
except AttributeError:
raise ValueError('must supply an argument; choose from %s'
% format_values(values))
if value in values:
return value
else:
raise ValueError('"%s" unknown; choose from %s'
% (argument, format_values(values)))
def format_values(values):
return '%s, or "%s"' % (', '.join(['"%s"' % s for s in values[:-1]]),
values[-1])
|
apache-2.0
|
EricNeedham/assignment-1
|
venv/lib/python2.7/site-packages/sqlalchemy/pool.py
|
22
|
43867
|
# sqlalchemy/pool.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Connection pooling for DB-API connections.
Provides a number of connection pool implementations for a variety of
usage scenarios and thread behavior requirements imposed by the
application, DB-API or database itself.
Also provides a DB-API 2.0 connection proxying mechanism allowing
regular DB-API connect() methods to be transparently managed by a
SQLAlchemy connection pool.
"""
import time
import traceback
import weakref
from . import exc, log, event, interfaces, util
from .util import queue as sqla_queue
from .util import threading, memoized_property, \
chop_traceback
from collections import deque
proxies = {}
def manage(module, **params):
"""Return a proxy for a DB-API module that automatically
pools connections.
Given a DB-API 2.0 module and pool management parameters, returns
a proxy for the module that will automatically pool connections,
creating new connection pools for each distinct set of connection
arguments sent to the decorated module's connect() function.
:param module: a DB-API 2.0 database module
:param poolclass: the class used by the pool module to provide
pooling. Defaults to :class:`.QueuePool`.
:param \*\*params: will be passed through to *poolclass*
"""
try:
return proxies[module]
except KeyError:
return proxies.setdefault(module, _DBProxy(module, **params))
def clear_managers():
"""Remove all current DB-API 2.0 managers.
All pools and connections are disposed.
"""
for manager in proxies.values():
manager.close()
proxies.clear()
reset_rollback = util.symbol('reset_rollback')
reset_commit = util.symbol('reset_commit')
reset_none = util.symbol('reset_none')
class _ConnDialect(object):
"""partial implementation of :class:`.Dialect`
which provides DBAPI connection methods.
When a :class:`.Pool` is combined with an :class:`.Engine`,
the :class:`.Engine` replaces this with its own
:class:`.Dialect`.
"""
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
class Pool(log.Identified):
"""Abstract base class for connection pools."""
_dialect = _ConnDialect()
def __init__(self,
creator, recycle=-1, echo=None,
use_threadlocal=False,
logging_name=None,
reset_on_return=True,
listeners=None,
events=None,
_dispatch=None,
_dialect=None):
"""
Construct a Pool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param recycle: If set to non -1, number of seconds between
connection recycling, which means upon checkout, if this
timeout is surpassed the connection will be closed and
replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: If True, connections being pulled and retrieved
from the pool will be logged to the standard output, as well
as pool sizing information. Echoing can also be achieved by
enabling logging for the "sqlalchemy.pool"
namespace. Defaults to False.
:param use_threadlocal: If set to True, repeated calls to
:meth:`connect` within the same application thread will be
guaranteed to return the same connection object, if one has
already been retrieved from the pool and has not been
returned yet. Offers a slight performance advantage at the
cost of individual transactions by default. The
:meth:`.Pool.unique_connection` method is provided to return
a consistenty unique connection to bypass this behavior
when the flag is set.
.. warning:: The :paramref:`.Pool.use_threadlocal` flag
**does not affect the behavior** of :meth:`.Engine.connect`.
:meth:`.Engine.connect` makes use of the
:meth:`.Pool.unique_connection` method which **does not use thread
local context**. To produce a :class:`.Connection` which refers
to the :meth:`.Pool.connect` method, use
:meth:`.Engine.contextual_connect`.
Note that other SQLAlchemy connectivity systems such as
:meth:`.Engine.execute` as well as the orm
:class:`.Session` make use of
:meth:`.Engine.contextual_connect` internally, so these functions
are compatible with the :paramref:`.Pool.use_threadlocal` setting.
.. seealso::
:ref:`threadlocal_strategy` - contains detail on the
"threadlocal" engine strategy, which provides a more comprehensive
approach to "threadlocal" connectivity for the specific
use case of using :class:`.Engine` and :class:`.Connection` objects
directly.
:param reset_on_return: Determine steps to take on
connections as they are returned to the pool.
reset_on_return can have any of these values:
* ``"rollback"`` - call rollback() on the connection,
to release locks and transaction resources.
This is the default value. The vast majority
of use cases should leave this value set.
* ``True`` - same as 'rollback', this is here for
backwards compatibility.
* ``"commit"`` - call commit() on the connection,
to release locks and transaction resources.
A commit here may be desirable for databases that
cache query plans if a commit is emitted,
such as Microsoft SQL Server. However, this
value is more dangerous than 'rollback' because
any data changes present on the transaction
are committed unconditionally.
* ``None`` - don't do anything on the connection.
This setting should only be made on a database
that has no transaction support at all,
namely MySQL MyISAM. By not doing anything,
performance can be improved. This
setting should **never be selected** for a
database that supports transactions,
as it will lead to deadlocks and stale
state.
* ``False`` - same as None, this is here for
backwards compatibility.
.. versionchanged:: 0.7.6
:paramref:`.Pool.reset_on_return` accepts ``"rollback"``
and ``"commit"`` arguments.
:param events: a list of 2-tuples, each of the form
``(callable, target)`` which will be passed to :func:`.event.listen`
upon construction. Provided here so that event listeners
can be assigned via :func:`.create_engine` before dialect-level
listeners are applied.
:param listeners: Deprecated. A list of
:class:`~sqlalchemy.interfaces.PoolListener`-like objects or
dictionaries of callables that receive events when DB-API
connections are created, checked out and checked in to the
pool. This has been superseded by
:func:`~sqlalchemy.event.listen`.
"""
if logging_name:
self.logging_name = self._orig_logging_name = logging_name
else:
self._orig_logging_name = None
log.instance_logger(self, echoflag=echo)
self._threadconns = threading.local()
self._creator = creator
self._recycle = recycle
self._invalidate_time = 0
self._use_threadlocal = use_threadlocal
if reset_on_return in ('rollback', True, reset_rollback):
self._reset_on_return = reset_rollback
elif reset_on_return in (None, False, reset_none):
self._reset_on_return = reset_none
elif reset_on_return in ('commit', reset_commit):
self._reset_on_return = reset_commit
else:
raise exc.ArgumentError(
"Invalid value for 'reset_on_return': %r"
% reset_on_return)
self.echo = echo
if _dispatch:
self.dispatch._update(_dispatch, only_propagate=False)
if _dialect:
self._dialect = _dialect
if events:
for fn, target in events:
event.listen(self, target, fn)
if listeners:
util.warn_deprecated(
"The 'listeners' argument to Pool (and "
"create_engine()) is deprecated. Use event.listen().")
for l in listeners:
self.add_listener(l)
def _close_connection(self, connection):
self.logger.debug("Closing connection %r", connection)
try:
self._dialect.do_close(connection)
except (SystemExit, KeyboardInterrupt):
raise
except:
self.logger.error("Exception closing connection %r",
connection, exc_info=True)
@util.deprecated(
2.7, "Pool.add_listener is deprecated. Use event.listen()")
def add_listener(self, listener):
"""Add a :class:`.PoolListener`-like object to this pool.
``listener`` may be an object that implements some or all of
PoolListener, or a dictionary of callables containing implementations
of some or all of the named methods in PoolListener.
"""
interfaces.PoolListener._adapt_listener(self, listener)
def unique_connection(self):
"""Produce a DBAPI connection that is not referenced by any
thread-local context.
This method is equivalent to :meth:`.Pool.connect` when the
:paramref:`.Pool.use_threadlocal` flag is not set to True.
When :paramref:`.Pool.use_threadlocal` is True, the
:meth:`.Pool.unique_connection` method provides a means of bypassing
the threadlocal context.
"""
return _ConnectionFairy._checkout(self)
def _create_connection(self):
"""Called by subclasses to create a new ConnectionRecord."""
return _ConnectionRecord(self)
def _invalidate(self, connection, exception=None):
"""Mark all connections established within the generation
of the given connection as invalidated.
If this pool's last invalidate time is before when the given
connection was created, update the timestamp til now. Otherwise,
no action is performed.
Connections with a start time prior to this pool's invalidation
time will be recycled upon next checkout.
"""
rec = getattr(connection, "_connection_record", None)
if not rec or self._invalidate_time < rec.starttime:
self._invalidate_time = time.time()
if getattr(connection, 'is_valid', False):
connection.invalidate(exception)
def recreate(self):
"""Return a new :class:`.Pool`, of the same class as this one
and configured with identical creation arguments.
This method is used in conjunction with :meth:`dispose`
to close out an entire :class:`.Pool` and create a new one in
its place.
"""
raise NotImplementedError()
def dispose(self):
"""Dispose of this pool.
This method leaves the possibility of checked-out connections
remaining open, as it only affects connections that are
idle in the pool.
See also the :meth:`Pool.recreate` method.
"""
raise NotImplementedError()
def connect(self):
"""Return a DBAPI connection from the pool.
The connection is instrumented such that when its
``close()`` method is called, the connection will be returned to
the pool.
"""
if not self._use_threadlocal:
return _ConnectionFairy._checkout(self)
try:
rec = self._threadconns.current()
except AttributeError:
pass
else:
if rec is not None:
return rec._checkout_existing()
return _ConnectionFairy._checkout(self, self._threadconns)
def _return_conn(self, record):
"""Given a _ConnectionRecord, return it to the :class:`.Pool`.
This method is called when an instrumented DBAPI connection
has its ``close()`` method called.
"""
if self._use_threadlocal:
try:
del self._threadconns.current
except AttributeError:
pass
self._do_return_conn(record)
def _do_get(self):
"""Implementation for :meth:`get`, supplied by subclasses."""
raise NotImplementedError()
def _do_return_conn(self, conn):
"""Implementation for :meth:`return_conn`, supplied by subclasses."""
raise NotImplementedError()
def status(self):
raise NotImplementedError()
class _ConnectionRecord(object):
"""Internal object which maintains an individual DBAPI connection
referenced by a :class:`.Pool`.
The :class:`._ConnectionRecord` object always exists for any particular
DBAPI connection whether or not that DBAPI connection has been
"checked out". This is in contrast to the :class:`._ConnectionFairy`
which is only a public facade to the DBAPI connection while it is checked
out.
A :class:`._ConnectionRecord` may exist for a span longer than that
of a single DBAPI connection. For example, if the
:meth:`._ConnectionRecord.invalidate`
method is called, the DBAPI connection associated with this
:class:`._ConnectionRecord`
will be discarded, but the :class:`._ConnectionRecord` may be used again,
in which case a new DBAPI connection is produced when the :class:`.Pool`
next uses this record.
The :class:`._ConnectionRecord` is delivered along with connection
pool events, including :meth:`.PoolEvents.connect` and
:meth:`.PoolEvents.checkout`, however :class:`._ConnectionRecord` still
remains an internal object whose API and internals may change.
.. seealso::
:class:`._ConnectionFairy`
"""
def __init__(self, pool):
self.__pool = pool
self.connection = self.__connect()
self.finalize_callback = deque()
pool.dispatch.first_connect.\
for_modify(pool.dispatch).\
exec_once(self.connection, self)
pool.dispatch.connect(self.connection, self)
connection = None
"""A reference to the actual DBAPI connection being tracked.
May be ``None`` if this :class:`._ConnectionRecord` has been marked
as invalidated; a new DBAPI connection may replace it if the owning
pool calls upon this :class:`._ConnectionRecord` to reconnect.
"""
@util.memoized_property
def info(self):
"""The ``.info`` dictionary associated with the DBAPI connection.
This dictionary is shared among the :attr:`._ConnectionFairy.info`
and :attr:`.Connection.info` accessors.
"""
return {}
@classmethod
def checkout(cls, pool):
rec = pool._do_get()
try:
dbapi_connection = rec.get_connection()
except:
rec.checkin()
raise
echo = pool._should_log_debug()
fairy = _ConnectionFairy(dbapi_connection, rec, echo)
rec.fairy_ref = weakref.ref(
fairy,
lambda ref: _finalize_fairy and
_finalize_fairy(
dbapi_connection,
rec, pool, ref, echo)
)
_refs.add(rec)
if echo:
pool.logger.debug("Connection %r checked out from pool",
dbapi_connection)
return fairy
def checkin(self):
self.fairy_ref = None
connection = self.connection
pool = self.__pool
while self.finalize_callback:
finalizer = self.finalize_callback.pop()
finalizer(connection)
if pool.dispatch.checkin:
pool.dispatch.checkin(connection, self)
pool._return_conn(self)
def close(self):
if self.connection is not None:
self.__close()
def invalidate(self, e=None):
"""Invalidate the DBAPI connection held by this :class:`._ConnectionRecord`.
This method is called for all connection invalidations, including
when the :meth:`._ConnectionFairy.invalidate` or
:meth:`.Connection.invalidate` methods are called, as well as when any
so-called "automatic invalidation" condition occurs.
.. seealso::
:ref:`pool_connection_invalidation`
"""
# already invalidated
if self.connection is None:
return
self.__pool.dispatch.invalidate(self.connection, self, e)
if e is not None:
self.__pool.logger.info(
"Invalidate connection %r (reason: %s:%s)",
self.connection, e.__class__.__name__, e)
else:
self.__pool.logger.info(
"Invalidate connection %r", self.connection)
self.__close()
self.connection = None
def get_connection(self):
recycle = False
if self.connection is None:
self.connection = self.__connect()
self.info.clear()
if self.__pool.dispatch.connect:
self.__pool.dispatch.connect(self.connection, self)
elif self.__pool._recycle > -1 and \
time.time() - self.starttime > self.__pool._recycle:
self.__pool.logger.info(
"Connection %r exceeded timeout; recycling",
self.connection)
recycle = True
elif self.__pool._invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to pool invalidation; " +
"recycling",
self.connection
)
recycle = True
if recycle:
self.__close()
self.connection = self.__connect()
self.info.clear()
if self.__pool.dispatch.connect:
self.__pool.dispatch.connect(self.connection, self)
return self.connection
def __close(self):
self.__pool._close_connection(self.connection)
def __connect(self):
try:
self.starttime = time.time()
connection = self.__pool._creator()
self.__pool.logger.debug("Created new connection %r", connection)
return connection
except Exception as e:
self.__pool.logger.debug("Error on connect(): %s", e)
raise
def _finalize_fairy(connection, connection_record,
pool, ref, echo, fairy=None):
"""Cleanup for a :class:`._ConnectionFairy` whether or not it's already
been garbage collected.
"""
_refs.discard(connection_record)
if ref is not None and \
connection_record.fairy_ref is not ref:
return
if connection is not None:
if connection_record and echo:
pool.logger.debug("Connection %r being returned to pool",
connection)
try:
fairy = fairy or _ConnectionFairy(
connection, connection_record, echo)
assert fairy.connection is connection
fairy._reset(pool)
# Immediately close detached instances
if not connection_record:
pool._close_connection(connection)
except Exception as e:
pool.logger.error(
"Exception during reset or similar", exc_info=True)
if connection_record:
connection_record.invalidate(e=e)
if isinstance(e, (SystemExit, KeyboardInterrupt)):
raise
if connection_record:
connection_record.checkin()
_refs = set()
class _ConnectionFairy(object):
"""Proxies a DBAPI connection and provides return-on-dereference
support.
This is an internal object used by the :class:`.Pool` implementation
to provide context management to a DBAPI connection delivered by
that :class:`.Pool`.
The name "fairy" is inspired by the fact that the
:class:`._ConnectionFairy` object's lifespan is transitory, as it lasts
only for the length of a specific DBAPI connection being checked out from
the pool, and additionally that as a transparent proxy, it is mostly
invisible.
.. seealso::
:class:`._ConnectionRecord`
"""
def __init__(self, dbapi_connection, connection_record, echo):
self.connection = dbapi_connection
self._connection_record = connection_record
self._echo = echo
connection = None
"""A reference to the actual DBAPI connection being tracked."""
_connection_record = None
"""A reference to the :class:`._ConnectionRecord` object associated
with the DBAPI connection.
This is currently an internal accessor which is subject to change.
"""
_reset_agent = None
"""Refer to an object with a ``.commit()`` and ``.rollback()`` method;
if non-None, the "reset-on-return" feature will call upon this object
rather than directly against the dialect-level do_rollback() and
do_commit() methods.
In practice, a :class:`.Connection` assigns a :class:`.Transaction` object
to this variable when one is in scope so that the :class:`.Transaction`
takes the job of committing or rolling back on return if
:meth:`.Connection.close` is called while the :class:`.Transaction`
still exists.
This is essentially an "event handler" of sorts but is simplified as an
instance variable both for performance/simplicity as well as that there
can only be one "reset agent" at a time.
"""
@classmethod
def _checkout(cls, pool, threadconns=None, fairy=None):
if not fairy:
fairy = _ConnectionRecord.checkout(pool)
fairy._pool = pool
fairy._counter = 0
if threadconns is not None:
threadconns.current = weakref.ref(fairy)
if fairy.connection is None:
raise exc.InvalidRequestError("This connection is closed")
fairy._counter += 1
if not pool.dispatch.checkout or fairy._counter != 1:
return fairy
# Pool listeners can trigger a reconnection on checkout
attempts = 2
while attempts > 0:
try:
pool.dispatch.checkout(fairy.connection,
fairy._connection_record,
fairy)
return fairy
except exc.DisconnectionError as e:
pool.logger.info(
"Disconnection detected on checkout: %s", e)
fairy._connection_record.invalidate(e)
fairy.connection = fairy._connection_record.get_connection()
attempts -= 1
pool.logger.info("Reconnection attempts exhausted on checkout")
fairy.invalidate()
raise exc.InvalidRequestError("This connection is closed")
def _checkout_existing(self):
return _ConnectionFairy._checkout(self._pool, fairy=self)
def _checkin(self):
_finalize_fairy(self.connection, self._connection_record,
self._pool, None, self._echo, fairy=self)
self.connection = None
self._connection_record = None
_close = _checkin
def _reset(self, pool):
if pool.dispatch.reset:
pool.dispatch.reset(self, self._connection_record)
if pool._reset_on_return is reset_rollback:
if self._echo:
pool.logger.debug("Connection %s rollback-on-return%s",
self.connection,
", via agent"
if self._reset_agent else "")
if self._reset_agent:
self._reset_agent.rollback()
else:
pool._dialect.do_rollback(self)
elif pool._reset_on_return is reset_commit:
if self._echo:
pool.logger.debug("Connection %s commit-on-return%s",
self.connection,
", via agent"
if self._reset_agent else "")
if self._reset_agent:
self._reset_agent.commit()
else:
pool._dialect.do_commit(self)
@property
def _logger(self):
return self._pool.logger
@property
def is_valid(self):
"""Return True if this :class:`._ConnectionFairy` still refers
to an active DBAPI connection."""
return self.connection is not None
@util.memoized_property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`.ConnectionFairy`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`._ConnectionFairy`. It is shared
with the :attr:`._ConnectionRecord.info` and :attr:`.Connection.info`
accessors.
"""
return self._connection_record.info
def invalidate(self, e=None):
"""Mark this connection as invalidated.
This method can be called directly, and is also called as a result
of the :meth:`.Connection.invalidate` method. When invoked,
the DBAPI connection is immediately closed and discarded from
further use by the pool. The invalidation mechanism proceeds
via the :meth:`._ConnectionRecord.invalidate` internal method.
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.connection is None:
util.warn("Can't invalidate an already-closed connection.")
return
if self._connection_record:
self._connection_record.invalidate(e=e)
self.connection = None
self._checkin()
def cursor(self, *args, **kwargs):
"""Return a new DBAPI cursor for the underlying connection.
This method is a proxy for the ``connection.cursor()`` DBAPI
method.
"""
return self.connection.cursor(*args, **kwargs)
def __getattr__(self, key):
return getattr(self.connection, key)
def detach(self):
"""Separate this connection from its Pool.
This means that the connection will no longer be returned to the
pool when closed, and will instead be literally closed. The
containing ConnectionRecord is separated from the DB-API connection,
and will create a new connection when next used.
Note that any overall connection limiting constraints imposed by a
Pool implementation may be violated after a detach, as the detached
connection is removed from the pool's knowledge and control.
"""
if self._connection_record is not None:
_refs.remove(self._connection_record)
self._connection_record.fairy_ref = None
self._connection_record.connection = None
# TODO: should this be _return_conn?
self._pool._do_return_conn(self._connection_record)
self.info = self.info.copy()
self._connection_record = None
def close(self):
self._counter -= 1
if self._counter == 0:
self._checkin()
class SingletonThreadPool(Pool):
"""A Pool that maintains one connection per thread.
Maintains one connection per each thread, never moving a connection to a
thread other than the one which it was created in.
Options are the same as those of :class:`.Pool`, as well as:
:param pool_size: The number of threads in which to maintain connections
at once. Defaults to five.
:class:`.SingletonThreadPool` is used by the SQLite dialect
automatically when a memory-based database is used.
See :ref:`sqlite_toplevel`.
"""
def __init__(self, creator, pool_size=5, **kw):
kw['use_threadlocal'] = True
Pool.__init__(self, creator, **kw)
self._conn = threading.local()
self._all_conns = set()
self.size = pool_size
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator,
pool_size=self.size,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
"""Dispose of this pool."""
for conn in self._all_conns:
try:
conn.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
# pysqlite won't even let you close a conn from a thread
# that didn't create it
pass
self._all_conns.clear()
def _cleanup(self):
while len(self._all_conns) >= self.size:
c = self._all_conns.pop()
c.close()
def status(self):
return "SingletonThreadPool id:%d size: %d" % \
(id(self), len(self._all_conns))
def _do_return_conn(self, conn):
pass
def _do_get(self):
try:
c = self._conn.current()
if c:
return c
except AttributeError:
pass
c = self._create_connection()
self._conn.current = weakref.ref(c)
if len(self._all_conns) >= self.size:
self._cleanup()
self._all_conns.add(c)
return c
class QueuePool(Pool):
"""A :class:`.Pool` that imposes a limit on the number of open connections.
:class:`.QueuePool` is the default pooling implementation used for
all :class:`.Engine` objects, unless the SQLite dialect is in use.
"""
def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30,
**kw):
"""
Construct a QueuePool.
:param creator: a callable function that returns a DB-API
connection object, same as that of :paramref:`.Pool.creator`.
:param pool_size: The size of the pool to be maintained,
defaults to 5. This is the largest number of connections that
will be kept persistently in the pool. Note that the pool
begins with no connections; once this number of connections
is requested, that number of connections will remain.
``pool_size`` can be set to 0 to indicate no size limit; to
disable pooling, use a :class:`~sqlalchemy.pool.NullPool`
instead.
:param max_overflow: The maximum overflow size of the
pool. When the number of checked-out connections reaches the
size set in pool_size, additional connections will be
returned up to this limit. When those additional connections
are returned to the pool, they are disconnected and
discarded. It follows then that the total number of
simultaneous connections the pool will allow is pool_size +
`max_overflow`, and the total number of "sleeping"
connections the pool will allow is pool_size. `max_overflow`
can be set to -1 to indicate no overflow limit; no limit
will be placed on the total number of concurrent
connections. Defaults to 10.
:param timeout: The number of seconds to wait before giving up
on returning a connection. Defaults to 30.
:param \**kw: Other keyword arguments including
:paramref:`.Pool.recycle`, :paramref:`.Pool.echo`,
:paramref:`.Pool.reset_on_return` and others are passed to the
:class:`.Pool` constructor.
"""
Pool.__init__(self, creator, **kw)
self._pool = sqla_queue.Queue(pool_size)
self._overflow = 0 - pool_size
self._max_overflow = max_overflow
self._timeout = timeout
self._overflow_lock = threading.Lock()
def _do_return_conn(self, conn):
try:
self._pool.put(conn, False)
except sqla_queue.Full:
try:
conn.close()
finally:
self._dec_overflow()
def _do_get(self):
use_overflow = self._max_overflow > -1
try:
wait = use_overflow and self._overflow >= self._max_overflow
return self._pool.get(wait, self._timeout)
except sqla_queue.Empty:
if use_overflow and self._overflow >= self._max_overflow:
if not wait:
return self._do_get()
else:
raise exc.TimeoutError(
"QueuePool limit of size %d overflow %d reached, "
"connection timed out, timeout %d" %
(self.size(), self.overflow(), self._timeout))
if self._inc_overflow():
try:
return self._create_connection()
except:
self._dec_overflow()
raise
else:
return self._do_get()
def _inc_overflow(self):
if self._max_overflow == -1:
self._overflow += 1
return True
with self._overflow_lock:
if self._overflow < self._max_overflow:
self._overflow += 1
return True
else:
return False
def _dec_overflow(self):
if self._max_overflow == -1:
self._overflow -= 1
return True
with self._overflow_lock:
self._overflow -= 1
return True
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator, pool_size=self._pool.maxsize,
max_overflow=self._max_overflow,
timeout=self._timeout,
recycle=self._recycle, echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
while True:
try:
conn = self._pool.get(False)
conn.close()
except sqla_queue.Empty:
break
self._overflow = 0 - self.size()
self.logger.info("Pool disposed. %s", self.status())
def status(self):
return "Pool size: %d Connections in pool: %d "\
"Current Overflow: %d Current Checked out "\
"connections: %d" % (self.size(),
self.checkedin(),
self.overflow(),
self.checkedout())
def size(self):
return self._pool.maxsize
def checkedin(self):
return self._pool.qsize()
def overflow(self):
return self._overflow
def checkedout(self):
return self._pool.maxsize - self._pool.qsize() + self._overflow
class NullPool(Pool):
"""A Pool which does not pool connections.
Instead it literally opens and closes the underlying DB-API connection
per each connection open/close.
Reconnect-related functions such as ``recycle`` and connection
invalidation are not supported by this Pool implementation, since
no connections are held persistently.
.. versionchanged:: 0.7
:class:`.NullPool` is used by the SQlite dialect automatically
when a file-based database is used. See :ref:`sqlite_toplevel`.
"""
def status(self):
return "NullPool"
def _do_return_conn(self, conn):
conn.close()
def _do_get(self):
return self._create_connection()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
pass
class StaticPool(Pool):
"""A Pool of exactly one connection, used for all requests.
Reconnect-related functions such as ``recycle`` and connection
invalidation (which is also used to support auto-reconnect) are not
currently supported by this Pool implementation but may be implemented
in a future release.
"""
@memoized_property
def _conn(self):
return self._creator()
@memoized_property
def connection(self):
return _ConnectionRecord(self)
def status(self):
return "StaticPool"
def dispose(self):
if '_conn' in self.__dict__:
self._conn.close()
self._conn = None
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(creator=self._creator,
recycle=self._recycle,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
_dialect=self._dialect)
def _create_connection(self):
return self._conn
def _do_return_conn(self, conn):
pass
def _do_get(self):
return self.connection
class AssertionPool(Pool):
"""A :class:`.Pool` that allows at most one checked out connection at
any given time.
This will raise an exception if more than one connection is checked out
at a time. Useful for debugging code that is using more connections
than desired.
.. versionchanged:: 0.7
:class:`.AssertionPool` also logs a traceback of where
the original connection was checked out, and reports
this in the assertion error raised.
"""
def __init__(self, *args, **kw):
self._conn = None
self._checked_out = False
self._store_traceback = kw.pop('store_traceback', True)
self._checkout_traceback = None
Pool.__init__(self, *args, **kw)
def status(self):
return "AssertionPool"
def _do_return_conn(self, conn):
if not self._checked_out:
raise AssertionError("connection is not checked out")
self._checked_out = False
assert conn is self._conn
def dispose(self):
self._checked_out = False
if self._conn:
self._conn.close()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator, echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
_dialect=self._dialect)
def _do_get(self):
if self._checked_out:
if self._checkout_traceback:
suffix = ' at:\n%s' % ''.join(
chop_traceback(self._checkout_traceback))
else:
suffix = ''
raise AssertionError("connection is already checked out" + suffix)
if not self._conn:
self._conn = self._create_connection()
self._checked_out = True
if self._store_traceback:
self._checkout_traceback = traceback.format_stack()
return self._conn
class _DBProxy(object):
"""Layers connection pooling behavior on top of a standard DB-API module.
Proxies a DB-API 2.0 connect() call to a connection pool keyed to the
specific connect parameters. Other functions and attributes are delegated
to the underlying DB-API module.
"""
def __init__(self, module, poolclass=QueuePool, **kw):
"""Initializes a new proxy.
module
a DB-API 2.0 module
poolclass
a Pool class, defaulting to QueuePool
Other parameters are sent to the Pool object's constructor.
"""
self.module = module
self.kw = kw
self.poolclass = poolclass
self.pools = {}
self._create_pool_mutex = threading.Lock()
def close(self):
for key in list(self.pools):
del self.pools[key]
def __del__(self):
self.close()
def __getattr__(self, key):
return getattr(self.module, key)
def get_pool(self, *args, **kw):
key = self._serialize(*args, **kw)
try:
return self.pools[key]
except KeyError:
self._create_pool_mutex.acquire()
try:
if key not in self.pools:
kw.pop('sa_pool_key', None)
pool = self.poolclass(
lambda: self.module.connect(*args, **kw), **self.kw)
self.pools[key] = pool
return pool
else:
return self.pools[key]
finally:
self._create_pool_mutex.release()
def connect(self, *args, **kw):
"""Activate a connection to the database.
Connect to the database using this DBProxy's module and the given
connect arguments. If the arguments match an existing pool, the
connection will be returned from the pool's current thread-local
connection instance, or if there is no thread-local connection
instance it will be checked out from the set of pooled connections.
If the pool has no available connections and allows new connections
to be created, a new database connection will be made.
"""
return self.get_pool(*args, **kw).connect()
def dispose(self, *args, **kw):
"""Dispose the pool referenced by the given connect arguments."""
key = self._serialize(*args, **kw)
try:
del self.pools[key]
except KeyError:
pass
def _serialize(self, *args, **kw):
if "sa_pool_key" in kw:
return kw['sa_pool_key']
return tuple(
list(args) +
[(k, kw[k]) for k in sorted(kw)]
)
|
mit
|
surligas/cs436-gnuradio
|
gr-blocks/python/blocks/qa_message.py
|
47
|
4794
|
#!/usr/bin/env python
#
# Copyright 2004,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import time
from gnuradio import gr, gr_unittest, blocks
import pmt
def all_counts():
return (gr.block_ncurrently_allocated(),
gr.block_detail_ncurrently_allocated(),
gr.buffer_ncurrently_allocated(),
gr.buffer_reader_ncurrently_allocated(),
gr.message_ncurrently_allocated())
class test_message(gr_unittest.TestCase):
def setUp(self):
self.msgq = gr.msg_queue()
def tearDown(self):
self.msgq = None
def leak_check(self, fct):
begin = all_counts()
fct()
# tear down early so we can check for leaks
self.tearDown()
end = all_counts()
self.assertEqual(begin, end)
def test_100(self):
msg = gr.message(0, 1.5, 2.3)
self.assertEquals(0, msg.type())
self.assertAlmostEqual(1.5, msg.arg1())
self.assertAlmostEqual(2.3, msg.arg2())
self.assertEquals(0, msg.length())
def test_101(self):
s = 'This is a test'
msg = gr.message_from_string(s)
self.assertEquals(s, msg.to_string())
def test_200(self):
self.leak_check(self.body_200)
def body_200(self):
self.msgq.insert_tail(gr.message(0))
self.assertEquals(1, self.msgq.count())
self.msgq.insert_tail(gr.message(1))
self.assertEquals(2, self.msgq.count())
msg0 = self.msgq.delete_head()
self.assertEquals(0, msg0.type())
msg1 = self.msgq.delete_head()
self.assertEquals(1, msg1.type())
self.assertEquals(0, self.msgq.count())
def test_201(self):
self.leak_check(self.body_201)
def body_201(self):
self.msgq.insert_tail(gr.message(0))
self.assertEquals(1, self.msgq.count())
self.msgq.insert_tail(gr.message(1))
self.assertEquals(2, self.msgq.count())
def test_202(self):
self.leak_check(self.body_202)
def body_202(self):
# global msg
msg = gr.message(666)
def test_300(self):
input_data = (0,1,2,3,4,5,6,7,8,9)
src = blocks.vector_source_b(input_data)
dst = blocks.vector_sink_b()
tb = gr.top_block()
tb.connect(src, dst)
tb.run()
self.assertEquals(input_data, dst.data())
def test_301(self):
# Use itemsize, limit constructor
src = blocks.message_source(gr.sizeof_char)
dst = blocks.vector_sink_b()
tb = gr.top_block()
tb.connect(src, dst)
src.msgq().insert_tail(gr.message_from_string('01234'))
src.msgq().insert_tail(gr.message_from_string('5'))
src.msgq().insert_tail(gr.message_from_string(''))
src.msgq().insert_tail(gr.message_from_string('6789'))
src.msgq().insert_tail(gr.message(1)) # send EOF
tb.run()
self.assertEquals(tuple(map(ord, '0123456789')), dst.data())
def test_302(self):
# Use itemsize, msgq constructor
msgq = gr.msg_queue()
src = blocks.message_source(gr.sizeof_char, msgq)
dst = blocks.vector_sink_b()
tb = gr.top_block()
tb.connect(src, dst)
src.msgq().insert_tail(gr.message_from_string('01234'))
src.msgq().insert_tail(gr.message_from_string('5'))
src.msgq().insert_tail(gr.message_from_string(''))
src.msgq().insert_tail(gr.message_from_string('6789'))
src.msgq().insert_tail(gr.message(1)) # send EOF
tb.run()
self.assertEquals(tuple(map(ord, '0123456789')), dst.data())
def test_debug_401(self):
msg = pmt.intern("TESTING")
src = blocks.message_strobe(msg, 500)
snk = blocks.message_debug()
tb = gr.top_block()
tb.msg_connect(src, "strobe", snk, "store")
tb.start()
time.sleep(1)
tb.stop()
tb.wait()
rec_msg = snk.get_message(0)
self.assertTrue(pmt.eqv(rec_msg, msg))
if __name__ == '__main__':
gr_unittest.run(test_message, "test_message.xml")
|
gpl-3.0
|
switowski/invenio
|
invenio/ext/fixtures/__init__.py
|
17
|
2454
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Fixtures extension."""
from __future__ import print_function
from .registry import fixtures
def load_fixtures(sender, yes_i_know=False, drop=True, **kwargs):
"""Load fixtures.
Loads classes found in 'packages' to the database. Names of the fixture
classes should end with 'Data' suffix.
:param packages: packages with fixture classes to load
:param truncate_tables_first: if True truncates tables before loading
the fixtures
"""
from invenio.ext.sqlalchemy import db, models
from fixture import SQLAlchemyFixture
# Load SQLAlchemy models.
list(models)
models = dict((m.__name__ + 'Data', m) for m in db.Model.__subclasses__())
missing = set(fixtures.keys()) - set(models.keys())
if len(missing):
raise Exception(
'Cannot match models for the following fixtures classes {0}'.format(
missing
))
print(">>> There are", len(fixtures.keys()), "tables to be loaded.")
SQLAlchemyFixture(
env=models, engine=db.metadata.bind, session=db.session
).data(*fixtures.values()).setup()
db.session.commit()
def fixture_dump(sender, **kwargs):
"""Dump fixtures."""
print('ERROR: This feature is not implemented inside fixtures.')
def setup_app(app):
"""Set up the extension for the given app."""
# Subscribe to database post create command
from invenio.base import signals
from invenio.base.scripts.database import create, recreate, dump
signals.post_command.connect(load_fixtures, sender=create)
signals.post_command.connect(load_fixtures, sender=recreate)
signals.post_command.connect(fixture_dump, sender=dump)
|
gpl-2.0
|
AbdealiJK/coala
|
coalib/results/Diff.py
|
6
|
8963
|
import copy
import difflib
from coalib.results.LineDiff import LineDiff
from coalib.results.SourceRange import SourceRange
class ConflictError(Exception):
pass
class Diff:
"""
A Diff result represents a difference for one file.
"""
def __init__(self, file_list):
"""
Creates an empty diff for the given file.
:param file_list: The original (unmodified) file as a list of its
lines.
"""
self._changes = {}
self._file = file_list
@classmethod
def from_string_arrays(cls, file_array_1, file_array_2):
"""
Creates a Diff object from two arrays containing strings.
If this Diff is applied to the original array, the second array will be
created.
:param file_array_1: Original array
:param file_array_2: Array to compare
"""
result = cls(file_array_1)
matcher = difflib.SequenceMatcher(None, file_array_1, file_array_2)
# We use this because its faster (generator) and doesnt yield as much
# useless information as get_opcodes.
for change_group in matcher.get_grouped_opcodes(1):
for (tag,
a_index_1,
a_index_2,
b_index_1,
b_index_2) in change_group:
if tag == "delete":
for index in range(a_index_1+1, a_index_2+1):
result.delete_line(index)
elif tag == "insert":
# We add after line, they add before, so dont add 1 here
result.add_lines(a_index_1,
file_array_2[b_index_1:b_index_2])
elif tag == "replace":
result.change_line(a_index_1+1,
file_array_1[a_index_1],
file_array_2[b_index_1])
result.add_lines(a_index_1+1,
file_array_2[b_index_1+1:b_index_2])
for index in range(a_index_1+2, a_index_2+1):
result.delete_line(index)
return result
@classmethod
def from_clang_fixit(cls, fixit, file):
"""
Creates a Diff object from a given clang fixit and the file contents.
:param fixit: A cindex.Fixit object.
:param file: A list of lines in the file to apply the fixit to.
:return: The corresponding Diff object.
"""
oldvalue = '\n'.join(file[fixit.range.start.line-1:
fixit.range.end.line])
endindex = fixit.range.end.column - len(file[fixit.range.end.line-1])-1
newvalue = (oldvalue[:fixit.range.start.column-1] +
fixit.value +
oldvalue[endindex:])
new_file = (file[:fixit.range.start.line-1] +
newvalue.splitlines(True) +
file[fixit.range.end.line:])
return cls.from_string_arrays(file, new_file)
def _get_change(self, line_nr, min_line=1):
if not isinstance(line_nr, int):
raise TypeError("line_nr needs to be an integer.")
if line_nr < min_line:
raise ValueError("The given line number is not allowed.")
return self._changes.get(line_nr, LineDiff())
def __len__(self):
return len(self._changes)
@property
def original(self):
"""
Retrieves the original file.
"""
return self._file
@property
def modified(self):
"""
Calculates the modified file, after applying the Diff to the original.
"""
result = []
current_line = 0
# Note that line_nr counts from _1_ although 0 is possible when
# inserting lines before everything
for line_nr in sorted(self._changes):
result.extend(self._file[current_line:max(line_nr-1, 0)])
linediff = self._changes[line_nr]
if not linediff.delete and not linediff.change and line_nr > 0:
result.append(self._file[line_nr-1])
elif linediff.change:
result.append(linediff.change[1])
if linediff.add_after:
result.extend(linediff.add_after)
current_line = line_nr
result.extend(self._file[current_line:])
return result
@property
def unified_diff(self):
"""
Generates a unified diff corresponding to this patch.
Note that the unified diff is not deterministic and thus not suitable
for equality comparison.
"""
return ''.join(difflib.unified_diff(self.original, self.modified))
def __json__(self):
"""
Override JSON export, using the unified diff is the easiest thing for
the users.
"""
return self.unified_diff
def affected_code(self, filename):
"""
Creates a list of SourceRange objects which point to the related code.
Changes on continuous lines will be put into one SourceRange.
:param filename: The filename to associate the SourceRange's to.
:return: A list of all related SourceRange objects.
"""
return list(diff.range(filename) for diff in self.split_diff())
def split_diff(self):
"""
Splits this diff into small pieces, such that several continuously
altered lines are still together in one diff. All subdiffs will be
yielded.
"""
diffs = []
last_line = -1
this_diff = Diff(self._file)
for line in sorted(self._changes.keys()):
if line != last_line + 1 and len(this_diff._changes) > 0:
yield this_diff
this_diff = Diff(self._file)
last_line = line
this_diff._changes[line] = self._changes[line]
if len(this_diff._changes) > 0:
yield this_diff
def range(self, filename):
"""
Calculates a SourceRange spanning over the whole Diff. If something is
added after the 0th line (i.e. before the first line) the first line
will be included in the SourceRange.
:param filename: The filename to associate the SourceRange with.
:return: A SourceRange object.
"""
start = min(self._changes.keys())
end = max(self._changes.keys())
return SourceRange.from_values(filename,
start_line=max(1, start),
end_line=max(1, end))
def __add__(self, other):
"""
Adds another diff to this one. Will throw an exception if this is not
possible. (This will *not* be done in place.)
"""
if not isinstance(other, Diff):
raise TypeError("Only diffs can be added to a diff.")
result = copy.deepcopy(self)
for line_nr in other._changes:
change = other._changes[line_nr]
if change.delete is True:
result.delete_line(line_nr)
if change.add_after is not False:
result.add_lines(line_nr, change.add_after)
if change.change is not False:
result.change_line(line_nr, change.change[0], change.change[1])
return result
def delete_line(self, line_nr):
"""
Mark the given line nr as deleted. The first line is line number 1.
"""
linediff = self._get_change(line_nr)
linediff.delete = True
self._changes[line_nr] = linediff
def add_lines(self, line_nr_before, lines):
"""
Adds lines after the given line number.
:param line_nr_before: Line number of the line before the additions.
Use 0 for insert lines before everything.
:param lines: A list of lines to add.
"""
if lines == []:
return # No action
linediff = self._get_change(line_nr_before, min_line=0)
if linediff.add_after is not False:
raise ConflictError("Cannot add lines after the given line since "
"there are already lines.")
linediff.add_after = lines
self._changes[line_nr_before] = linediff
def change_line(self, line_nr, original_line, replacement):
"""
Changes the given line with the given line number. The replacement will
be there instead.
"""
linediff = self._get_change(line_nr)
if linediff.change is not False:
raise ConflictError("An already changed line cannot be changed.")
linediff.change = (original_line, replacement)
self._changes[line_nr] = linediff
def __eq__(self, other):
return ((self._file == other._file) and
(self.modified == other.modified))
|
agpl-3.0
|
lowtalker/splunk-sdk-python
|
splunklib/binding.py
|
1
|
54532
|
# Copyright 2011-2014 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The **splunklib.binding** module provides a low-level binding interface to the
`Splunk REST API <http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTcontents>`_.
This module handles the wire details of calling the REST API, such as
authentication tokens, prefix paths, URL encoding, and so on. Actual path
segments, ``GET`` and ``POST`` arguments, and the parsing of responses is left
to the user.
If you want a friendlier interface to the Splunk REST API, use the
:mod:`splunklib.client` module.
"""
import httplib
import logging
import socket
import ssl
import urllib
import io
import sys
import Cookie
from datetime import datetime
from functools import wraps
from StringIO import StringIO
from contextlib import contextmanager
from xml.etree.ElementTree import XML
try:
from xml.etree.ElementTree import ParseError
except ImportError, e:
from xml.parsers.expat import ExpatError as ParseError
from data import record
__all__ = [
"AuthenticationError",
"connect",
"Context",
"handler",
"HTTPError"
]
# If you change these, update the docstring
# on _authority as well.
DEFAULT_HOST = "localhost"
DEFAULT_PORT = "8089"
DEFAULT_SCHEME = "https"
def _log_duration(f):
@wraps(f)
def new_f(*args, **kwargs):
start_time = datetime.now()
val = f(*args, **kwargs)
end_time = datetime.now()
logging.debug("Operation took %s", end_time-start_time)
return val
return new_f
def _parse_cookies(cookie_str, dictionary):
"""Tries to parse any key-value pairs of cookies in a string,
then updates the the dictionary with any key-value pairs found.
**Example**::
dictionary = {}
_parse_cookies('my=value', dictionary)
# Now the following is True
dictionary['my'] == 'value'
:param cookie_str: A string containing "key=value" pairs from an HTTP "Set-Cookie" header.
:type cookie_str: ``str``
:param dictionary: A dictionary to update with any found key-value pairs.
:type dictionary: ``dict``
"""
parsed_cookie = Cookie.SimpleCookie(cookie_str)
for cookie in parsed_cookie.values():
dictionary[cookie.key] = cookie.coded_value
def _make_cookie_header(cookies):
"""
Takes a list of 2-tuples of key-value pairs of
cookies, and returns a valid HTTP ``Cookie``
header.
**Example**::
header = _make_cookie_header([("key", "value"), ("key_2", "value_2")])
# Now the following is True
header == "key=value; key_2=value_2"
:param cookies: A list of 2-tuples of cookie key-value pairs.
:type cookies: ``list`` of 2-tuples
:return: ``str` An HTTP header cookie string.
:rtype: ``str``
"""
return "; ".join("%s=%s" % (key, value) for key, value in cookies)
# Singleton values to eschew None
class _NoAuthenticationToken(object):
"""The value stored in a :class:`Context` or :class:`splunklib.client.Service`
class that is not logged in.
If a ``Context`` or ``Service`` object is created without an authentication
token, and there has not yet been a call to the ``login`` method, the token
field of the ``Context`` or ``Service`` object is set to
``_NoAuthenticationToken``.
Likewise, after a ``Context`` or ``Service`` object has been logged out, the
token is set to this value again.
"""
pass
class UrlEncoded(str):
"""This class marks URL-encoded strings.
It should be considered an SDK-private implementation detail.
Manually tracking whether strings are URL encoded can be difficult. Avoid
calling ``urllib.quote`` to replace special characters with escapes. When
you receive a URL-encoded string, *do* use ``urllib.unquote`` to replace
escapes with single characters. Then, wrap any string you want to use as a
URL in ``UrlEncoded``. Note that because the ``UrlEncoded`` class is
idempotent, making multiple calls to it is OK.
``UrlEncoded`` objects are identical to ``str`` objects (including being
equal if their contents are equal) except when passed to ``UrlEncoded``
again.
``UrlEncoded`` removes the ``str`` type support for interpolating values
with ``%`` (doing that raises a ``TypeError``). There is no reliable way to
encode values this way, so instead, interpolate into a string, quoting by
hand, and call ``UrlEncode`` with ``skip_encode=True``.
**Example**::
import urllib
UrlEncoded('%s://%s' % (scheme, urllib.quote(host)), skip_encode=True)
If you append ``str`` strings and ``UrlEncoded`` strings, the result is also
URL encoded.
**Example**::
UrlEncoded('ab c') + 'de f' == UrlEncoded('ab cde f')
'ab c' + UrlEncoded('de f') == UrlEncoded('ab cde f')
"""
def __new__(self, val='', skip_encode=False, encode_slash=False):
if isinstance(val, UrlEncoded):
# Don't urllib.quote something already URL encoded.
return val
elif skip_encode:
return str.__new__(self, val)
elif encode_slash:
return str.__new__(self, urllib.quote_plus(val))
else:
# When subclassing str, just call str's __new__ method
# with your class and the value you want to have in the
# new string.
return str.__new__(self, urllib.quote(val))
def __add__(self, other):
"""self + other
If *other* is not a ``UrlEncoded``, URL encode it before
adding it.
"""
if isinstance(other, UrlEncoded):
return UrlEncoded(str.__add__(self, other), skip_encode=True)
else:
return UrlEncoded(str.__add__(self, urllib.quote(other)), skip_encode=True)
def __radd__(self, other):
"""other + self
If *other* is not a ``UrlEncoded``, URL _encode it before
adding it.
"""
if isinstance(other, UrlEncoded):
return UrlEncoded(str.__radd__(self, other), skip_encode=True)
else:
return UrlEncoded(str.__add__(urllib.quote(other), self), skip_encode=True)
def __mod__(self, fields):
"""Interpolation into ``UrlEncoded``s is disabled.
If you try to write ``UrlEncoded("%s") % "abc", will get a
``TypeError``.
"""
raise TypeError("Cannot interpolate into a UrlEncoded object.")
def __repr__(self):
return "UrlEncoded(%s)" % repr(urllib.unquote(str(self)))
@contextmanager
def _handle_auth_error(msg):
"""Handle reraising HTTP authentication errors as something clearer.
If an ``HTTPError`` is raised with status 401 (access denied) in
the body of this context manager, reraise it as an
``AuthenticationError`` instead, with *msg* as its message.
This function adds no round trips to the server.
:param msg: The message to be raised in ``AuthenticationError``.
:type msg: ``str``
**Example**::
with _handle_auth_error("Your login failed."):
... # make an HTTP request
"""
try:
yield
except HTTPError as he:
if he.status == 401:
raise AuthenticationError(msg, he)
else:
raise
def _authentication(request_fun):
"""Decorator to handle autologin and authentication errors.
*request_fun* is a function taking no arguments that needs to
be run with this ``Context`` logged into Splunk.
``_authentication``'s behavior depends on whether the
``autologin`` field of ``Context`` is set to ``True`` or
``False``. If it's ``False``, then ``_authentication``
aborts if the ``Context`` is not logged in, and raises an
``AuthenticationError`` if an ``HTTPError`` of status 401 is
raised in *request_fun*. If it's ``True``, then
``_authentication`` will try at all sensible places to
log in before issuing the request.
If ``autologin`` is ``False``, ``_authentication`` makes
one roundtrip to the server if the ``Context`` is logged in,
or zero if it is not. If ``autologin`` is ``True``, it's less
deterministic, and may make at most three roundtrips (though
that would be a truly pathological case).
:param request_fun: A function of no arguments encapsulating
the request to make to the server.
**Example**::
import splunklib.binding as binding
c = binding.connect(..., autologin=True)
c.logout()
def f():
c.get("/services")
return 42
print _authentication(f)
"""
@wraps(request_fun)
def wrapper(self, *args, **kwargs):
if self.token is _NoAuthenticationToken and \
not self.has_cookies():
# Not yet logged in.
if self.autologin and self.username and self.password:
# This will throw an uncaught
# AuthenticationError if it fails.
self.login()
else:
# Try the request anyway without authentication.
# Most requests will fail. Some will succeed, such as
# 'GET server/info'.
with _handle_auth_error("Request aborted: not logged in."):
return request_fun(self, *args, **kwargs)
try:
# Issue the request
return request_fun(self, *args, **kwargs)
except HTTPError as he:
if he.status == 401 and self.autologin:
# Authentication failed. Try logging in, and then
# rerunning the request. If either step fails, throw
# an AuthenticationError and give up.
with _handle_auth_error("Autologin failed."):
self.login()
with _handle_auth_error(
"Autologin succeeded, but there was an auth error on "
"next request. Something is very wrong."):
return request_fun(self, *args, **kwargs)
elif he.status == 401 and not self.autologin:
raise AuthenticationError(
"Request failed: Session is not logged in.", he)
else:
raise
return wrapper
def _authority(scheme=DEFAULT_SCHEME, host=DEFAULT_HOST, port=DEFAULT_PORT):
"""Construct a URL authority from the given *scheme*, *host*, and *port*.
Named in accordance with RFC2396_, which defines URLs as::
<scheme>://<authority><path>?<query>
.. _RFC2396: http://www.ietf.org/rfc/rfc2396.txt
So ``https://localhost:8000/a/b/b?boris=hilda`` would be parsed as::
scheme := https
authority := localhost:8000
path := /a/b/c
query := boris=hilda
:param scheme: URL scheme (the default is "https")
:type scheme: "http" or "https"
:param host: The host name (the default is "localhost")
:type host: string
:param port: The port number (the default is 8089)
:type port: integer
:return: The URL authority.
:rtype: UrlEncoded (subclass of ``str``)
**Example**::
_authority() == "https://localhost:8089"
_authority(host="splunk.utopia.net") == "https://splunk.utopia.net:8089"
_authority(host="2001:0db8:85a3:0000:0000:8a2e:0370:7334") == \
"https://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:8089"
_authority(scheme="http", host="splunk.utopia.net", port="471") == \
"http://splunk.utopia.net:471"
"""
if ':' in host:
# IPv6 addresses must be enclosed in [ ] in order to be well
# formed.
host = '[' + host + ']'
return UrlEncoded("%s://%s:%s" % (scheme, host, port), skip_encode=True)
# kwargs: sharing, owner, app
def namespace(sharing=None, owner=None, app=None, **kwargs):
"""This function constructs a Splunk namespace.
Every Splunk resource belongs to a namespace. The namespace is specified by
the pair of values ``owner`` and ``app`` and is governed by a ``sharing`` mode.
The possible values for ``sharing`` are: "user", "app", "global" and "system",
which map to the following combinations of ``owner`` and ``app`` values:
"user" => {owner}, {app}
"app" => nobody, {app}
"global" => nobody, {app}
"system" => nobody, system
"nobody" is a special user name that basically means no user, and "system"
is the name reserved for system resources.
"-" is a wildcard that can be used for both ``owner`` and ``app`` values and
refers to all users and all apps, respectively.
In general, when you specify a namespace you can specify any combination of
these three values and the library will reconcile the triple, overriding the
provided values as appropriate.
Finally, if no namespacing is specified the library will make use of the
``/services`` branch of the REST API, which provides a namespaced view of
Splunk resources equivelent to using ``owner={currentUser}`` and
``app={defaultApp}``.
The ``namespace`` function returns a representation of the namespace from
reconciling the values you provide. It ignores any keyword arguments other
than ``owner``, ``app``, and ``sharing``, so you can provide ``dicts`` of
configuration information without first having to extract individual keys.
:param sharing: The sharing mode (the default is "user").
:type sharing: "system", "global", "app", or "user"
:param owner: The owner context (the default is "None").
:type owner: ``string``
:param app: The app context (the default is "None").
:type app: ``string``
:returns: A :class:`splunklib.data.Record` containing the reconciled
namespace.
**Example**::
import splunklib.binding as binding
n = binding.namespace(sharing="user", owner="boris", app="search")
n = binding.namespace(sharing="global", app="search")
"""
if sharing in ["system"]:
return record({'sharing': sharing, 'owner': "nobody", 'app': "system" })
if sharing in ["global", "app"]:
return record({'sharing': sharing, 'owner': "nobody", 'app': app})
if sharing in ["user", None]:
return record({'sharing': sharing, 'owner': owner, 'app': app})
raise ValueError("Invalid value for argument: 'sharing'")
class Context(object):
"""This class represents a context that encapsulates a splunkd connection.
The ``Context`` class encapsulates the details of HTTP requests,
authentication, a default namespace, and URL prefixes to simplify access to
the REST API.
After creating a ``Context`` object, you must call its :meth:`login`
method before you can issue requests to splunkd. Or, use the :func:`connect`
function to create an already-authenticated ``Context`` object. You can
provide a session token explicitly (the same token can be shared by multiple
``Context`` objects) to provide authentication.
:param host: The host name (the default is "localhost").
:type host: ``string``
:param port: The port number (the default is 8089).
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
:param sharing: The sharing mode for the namespace (the default is "user").
:type sharing: "global", "system", "app", or "user"
:param owner: The owner context of the namespace (optional, the default is "None").
:type owner: ``string``
:param app: The app context of the namespace (optional, the default is "None").
:type app: ``string``
:param token: A session token. When provided, you don't need to call :meth:`login`.
:type token: ``string``
:param cookie: A session cookie. When provided, you don't need to call :meth:`login`.
This parameter is only supported for Splunk 6.2+.
:type cookie: ``string``
:param username: The Splunk account username, which is used to
authenticate the Splunk instance.
:type username: ``string``
:param password: The password for the Splunk account.
:type password: ``string``
:param handler: The HTTP request handler (optional).
:returns: A ``Context`` instance.
**Example**::
import splunklib.binding as binding
c = binding.Context(username="boris", password="natasha", ...)
c.login()
# Or equivalently
c = binding.connect(username="boris", password="natasha")
# Or if you already have a session token
c = binding.Context(token="atg232342aa34324a")
# Or if you already have a valid cookie
c = binding.Context(cookie="splunkd_8089=...")
"""
def __init__(self, handler=None, **kwargs):
self.http = HttpLib(handler)
self.token = kwargs.get("token", _NoAuthenticationToken)
if self.token is None: # In case someone explicitly passes token=None
self.token = _NoAuthenticationToken
self.scheme = kwargs.get("scheme", DEFAULT_SCHEME)
self.host = kwargs.get("host", DEFAULT_HOST)
self.port = int(kwargs.get("port", DEFAULT_PORT))
self.authority = _authority(self.scheme, self.host, self.port)
self.namespace = namespace(**kwargs)
self.username = kwargs.get("username", "")
self.password = kwargs.get("password", "")
self.autologin = kwargs.get("autologin", False)
# Store any cookies in the self.http._cookies dict
if kwargs.has_key("cookie") and kwargs['cookie'] not in [None, _NoAuthenticationToken]:
_parse_cookies(kwargs["cookie"], self.http._cookies)
def get_cookies(self):
"""Gets the dictionary of cookies from the ``HttpLib`` member of this instance.
:return: Dictionary of cookies stored on the ``self.http``.
:rtype: ``dict``
"""
return self.http._cookies
def has_cookies(self):
"""Returns true if the ``HttpLib`` member of this instance has at least
one cookie stored.
:return: ``True`` if there is at least one cookie, else ``False``
:rtype: ``bool``
"""
return len(self.get_cookies()) > 0
# Shared per-context request headers
@property
def _auth_headers(self):
"""Headers required to authenticate a request.
Assumes your ``Context`` already has a authentication token or
cookie, either provided explicitly or obtained by logging
into the Splunk instance.
:returns: A list of 2-tuples containing key and value
"""
if self.has_cookies():
return [("Cookie", _make_cookie_header(self.get_cookies().items()))]
elif self.token is _NoAuthenticationToken:
return []
else:
# Ensure the token is properly formatted
if self.token.startswith('Splunk '):
token = self.token
else:
token = 'Splunk %s' % self.token
return [("Authorization", token)]
def connect(self):
"""Returns an open connection (socket) to the Splunk instance.
This method is used for writing bulk events to an index or similar tasks
where the overhead of opening a connection multiple times would be
prohibitive.
:returns: A socket.
**Example**::
import splunklib.binding as binding
c = binding.connect(...)
socket = c.connect()
socket.write("POST %s HTTP/1.1\\r\\n" % "some/path/to/post/to")
socket.write("Host: %s:%s\\r\\n" % (c.host, c.port))
socket.write("Accept-Encoding: identity\\r\\n")
socket.write("Authorization: %s\\r\\n" % c.token)
socket.write("X-Splunk-Input-Mode: Streaming\\r\\n")
socket.write("\\r\\n")
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.scheme == "https":
sock = ssl.wrap_socket(sock)
sock.connect((socket.gethostbyname(self.host), self.port))
return sock
@_authentication
@_log_duration
def delete(self, path_segment, owner=None, app=None, sharing=None, **query):
"""Performs a DELETE operation at the REST path segment with the given
namespace and query.
This method is named to match the HTTP method. ``delete`` makes at least
one round trip to the server, one additional round trip for each 303
status returned, and at most two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
If *owner*, *app*, and *sharing* are omitted, this method uses the
default :class:`Context` namespace. All other keyword arguments are
included in the URL as query parameters.
:raises AuthenticationError: Raised when the ``Context`` object is not
logged in.
:raises HTTPError: Raised when an error occurred in a GET operation from
*path_segment*.
:param path_segment: A REST path segment.
:type path_segment: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
c = binding.connect(...)
c.delete('saved/searches/boris') == \\
{'body': ...a response reader object...,
'headers': [('content-length', '1786'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 16:53:06 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'OK',
'status': 200}
c.delete('nonexistant/path') # raises HTTPError
c.logout()
c.delete('apps/local') # raises AuthenticationError
"""
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
logging.debug("DELETE request to %s (body: %s)", path, repr(query))
response = self.http.delete(path, self._auth_headers, **query)
return response
@_authentication
@_log_duration
def get(self, path_segment, owner=None, app=None, sharing=None, **query):
"""Performs a GET operation from the REST path segment with the given
namespace and query.
This method is named to match the HTTP method. ``get`` makes at least
one round trip to the server, one additional round trip for each 303
status returned, and at most two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
If *owner*, *app*, and *sharing* are omitted, this method uses the
default :class:`Context` namespace. All other keyword arguments are
included in the URL as query parameters.
:raises AuthenticationError: Raised when the ``Context`` object is not
logged in.
:raises HTTPError: Raised when an error occurred in a GET operation from
*path_segment*.
:param path_segment: A REST path segment.
:type path_segment: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
c = binding.connect(...)
c.get('apps/local') == \\
{'body': ...a response reader object...,
'headers': [('content-length', '26208'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 16:30:35 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'OK',
'status': 200}
c.get('nonexistant/path') # raises HTTPError
c.logout()
c.get('apps/local') # raises AuthenticationError
"""
path = self.authority + self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
logging.debug("GET request to %s (body: %s)", path, repr(query))
response = self.http.get(path, self._auth_headers, **query)
return response
@_authentication
@_log_duration
def post(self, path_segment, owner=None, app=None, sharing=None, headers=None, **query):
"""Performs a POST operation from the REST path segment with the given
namespace and query.
This method is named to match the HTTP method. ``post`` makes at least
one round trip to the server, one additional round trip for each 303
status returned, and at most two additional round trips if
the ``autologin`` field of :func:`connect` is set to ``True``.
If *owner*, *app*, and *sharing* are omitted, this method uses the
default :class:`Context` namespace. All other keyword arguments are
included in the URL as query parameters.
Some of Splunk's endpoints, such as ``receivers/simple`` and
``receivers/stream``, require unstructured data in the POST body
and all metadata passed as GET-style arguments. If you provide
a ``body`` argument to ``post``, it will be used as the POST
body, and all other keyword arguments will be passed as
GET-style arguments in the URL.
:raises AuthenticationError: Raised when the ``Context`` object is not
logged in.
:raises HTTPError: Raised when an error occurred in a GET operation from
*path_segment*.
:param path_segment: A REST path segment.
:type path_segment: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
:param headers: List of extra HTTP headers to send (optional).
:type headers: ``list`` of 2-tuples.
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
c = binding.connect(...)
c.post('saved/searches', name='boris',
search='search * earliest=-1m | head 1') == \\
{'body': ...a response reader object...,
'headers': [('content-length', '10455'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 16:46:06 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'Created',
'status': 201}
c.post('nonexistant/path') # raises HTTPError
c.logout()
# raises AuthenticationError:
c.post('saved/searches', name='boris',
search='search * earliest=-1m | head 1')
"""
if headers is None:
headers = []
path = self.authority + self._abspath(path_segment, owner=owner, app=app, sharing=sharing)
logging.debug("POST request to %s (body: %s)", path, repr(query))
all_headers = headers + self._auth_headers
response = self.http.post(path, all_headers, **query)
return response
@_authentication
@_log_duration
def request(self, path_segment, method="GET", headers=None, body="",
owner=None, app=None, sharing=None):
"""Issues an arbitrary HTTP request to the REST path segment.
This method is named to match ``httplib.request``. This function
makes a single round trip to the server.
If *owner*, *app*, and *sharing* are omitted, this method uses the
default :class:`Context` namespace. All other keyword arguments are
included in the URL as query parameters.
:raises AuthenticationError: Raised when the ``Context`` object is not
logged in.
:raises HTTPError: Raised when an error occurred in a GET operation from
*path_segment*.
:param path_segment: A REST path segment.
:type path_segment: ``string``
:param method: The HTTP method to use (optional).
:type method: ``string``
:param headers: List of extra HTTP headers to send (optional).
:type headers: ``list`` of 2-tuples.
:param body: Content of the HTTP request (optional).
:type body: ``string``
:param owner: The owner context of the namespace (optional).
:type owner: ``string``
:param app: The app context of the namespace (optional).
:type app: ``string``
:param sharing: The sharing mode of the namespace (optional).
:type sharing: ``string``
:param query: All other keyword arguments, which are used as query
parameters.
:type query: ``string``
:return: The response from the server.
:rtype: ``dict`` with keys ``body``, ``headers``, ``reason``,
and ``status``
**Example**::
c = binding.connect(...)
c.request('saved/searches', method='GET') == \\
{'body': ...a response reader object...,
'headers': [('content-length', '46722'),
('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'),
('server', 'Splunkd'),
('connection', 'close'),
('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'),
('date', 'Fri, 11 May 2012 17:24:19 GMT'),
('content-type', 'text/xml; charset=utf-8')],
'reason': 'OK',
'status': 200}
c.request('nonexistant/path', method='GET') # raises HTTPError
c.logout()
c.get('apps/local') # raises AuthenticationError
"""
if headers is None:
headers = []
path = self.authority \
+ self._abspath(path_segment, owner=owner,
app=app, sharing=sharing)
all_headers = headers + self._auth_headers
logging.debug("%s request to %s (headers: %s, body: %s)",
method, path, str(all_headers), repr(body))
response = self.http.request(path,
{'method': method,
'headers': all_headers,
'body': body})
return response
def login(self):
"""Logs into the Splunk instance referred to by the :class:`Context`
object.
Unless a ``Context`` is created with an explicit authentication token
(probably obtained by logging in from a different ``Context`` object)
you must call :meth:`login` before you can issue requests.
The authentication token obtained from the server is stored in the
``token`` field of the ``Context`` object.
:raises AuthenticationError: Raised when login fails.
:returns: The ``Context`` object, so you can chain calls.
**Example**::
import splunklib.binding as binding
c = binding.Context(...).login()
# Then issue requests...
"""
if self.has_cookies() and \
(not self.username and not self.password):
# If we were passed session cookie(s), but no username or
# password, then login is a nop, since we're automatically
# logged in.
return
if self.token is not _NoAuthenticationToken and \
(not self.username and not self.password):
# If we were passed a session token, but no username or
# password, then login is a nop, since we're automatically
# logged in.
return
# Only try to get a token and updated cookie if username & password are specified
try:
response = self.http.post(
self.authority + self._abspath("/services/auth/login"),
username=self.username,
password=self.password,
cookie="1") # In Splunk 6.2+, passing "cookie=1" will return the "set-cookie" header
body = response.body.read()
session = XML(body).findtext("./sessionKey")
self.token = "Splunk %s" % session
return self
except HTTPError as he:
if he.status == 401:
raise AuthenticationError("Login failed.", he)
else:
raise
def logout(self):
"""Forgets the current session token, and cookies."""
self.token = _NoAuthenticationToken
self.http._cookies = {}
return self
def _abspath(self, path_segment,
owner=None, app=None, sharing=None):
"""Qualifies *path_segment* into an absolute path for a URL.
If *path_segment* is already absolute, returns it unchanged.
If *path_segment* is relative, then qualifies it with either
the provided namespace arguments or the ``Context``'s default
namespace. Any forbidden characters in *path_segment* are URL
encoded. This function has no network activity.
Named to be consistent with RFC2396_.
.. _RFC2396: http://www.ietf.org/rfc/rfc2396.txt
:param path_segment: A relative or absolute URL path segment.
:type path_segment: ``string``
:param owner, app, sharing: Components of a namespace (defaults
to the ``Context``'s namespace if all
three are omitted)
:type owner, app, sharing: ``string``
:return: A ``UrlEncoded`` (a subclass of ``str``).
:rtype: ``string``
**Example**::
import splunklib.binding as binding
c = binding.connect(owner='boris', app='search', sharing='user')
c._abspath('/a/b/c') == '/a/b/c'
c._abspath('/a/b c/d') == '/a/b%20c/d'
c._abspath('apps/local/search') == \
'/servicesNS/boris/search/apps/local/search'
c._abspath('apps/local/search', sharing='system') == \
'/servicesNS/nobody/system/apps/local/search'
url = c.authority + c._abspath('apps/local/sharing')
"""
skip_encode = isinstance(path_segment, UrlEncoded)
# If path_segment is absolute, escape all forbidden characters
# in it and return it.
if path_segment.startswith('/'):
return UrlEncoded(path_segment, skip_encode=skip_encode)
# path_segment is relative, so we need a namespace to build an
# absolute path.
if owner or app or sharing:
ns = namespace(owner=owner, app=app, sharing=sharing)
else:
ns = self.namespace
# If no app or owner are specified, then use the /services
# endpoint. Otherwise, use /servicesNS with the specified
# namespace. If only one of app and owner is specified, use
# '-' for the other.
if ns.app is None and ns.owner is None:
return UrlEncoded("/services/%s" % path_segment, skip_encode=skip_encode)
oname = "nobody" if ns.owner is None else ns.owner
aname = "system" if ns.app is None else ns.app
path = UrlEncoded("/servicesNS/%s/%s/%s" % (oname, aname, path_segment),
skip_encode=skip_encode)
return path
def connect(**kwargs):
"""This function returns an authenticated :class:`Context` object.
This function is a shorthand for calling :meth:`Context.login`.
This function makes one round trip to the server.
:param host: The host name (the default is "localhost").
:type host: ``string``
:param port: The port number (the default is 8089).
:type port: ``integer``
:param scheme: The scheme for accessing the service (the default is "https").
:type scheme: "https" or "http"
:param owner: The owner context of the namespace (the default is "None").
:type owner: ``string``
:param app: The app context of the namespace (the default is "None").
:type app: ``string``
:param sharing: The sharing mode for the namespace (the default is "user").
:type sharing: "global", "system", "app", or "user"
:param token: The current session token (optional). Session tokens can be
shared across multiple service instances.
:type token: ``string``
:param cookie: A session cookie. When provided, you don't need to call :meth:`login`.
This parameter is only supported for Splunk 6.2+.
:type cookie: ``string``
:param username: The Splunk account username, which is used to
authenticate the Splunk instance.
:type username: ``string``
:param password: The password for the Splunk account.
:type password: ``string``
:param autologin: When ``True``, automatically tries to log in again if the
session terminates.
:type autologin: ``Boolean``
:return: An initialized :class:`Context` instance.
**Example**::
import splunklib.binding as binding
c = binding.connect(...)
response = c.get("apps/local")
"""
c = Context(**kwargs)
c.login()
return c
# Note: the error response schema supports multiple messages but we only
# return the first, although we do return the body so that an exception
# handler that wants to read multiple messages can do so.
class HTTPError(Exception):
"""This exception is raised for HTTP responses that return an error."""
def __init__(self, response, _message=None):
status = response.status
reason = response.reason
body = response.body.read()
try:
detail = XML(body).findtext("./messages/msg")
except ParseError as err:
detail = body
message = "HTTP %d %s%s" % (
status, reason, "" if detail is None else " -- %s" % detail)
Exception.__init__(self, _message or message)
self.status = status
self.reason = reason
self.headers = response.headers
self.body = body
self._response = response
class AuthenticationError(HTTPError):
"""Raised when a login request to Splunk fails.
If your username was unknown or you provided an incorrect password
in a call to :meth:`Context.login` or :meth:`splunklib.client.Service.login`,
this exception is raised.
"""
def __init__(self, message, cause):
# Put the body back in the response so that HTTPError's constructor can
# read it again.
cause._response.body = StringIO(cause.body)
HTTPError.__init__(self, cause._response, message)
#
# The HTTP interface used by the Splunk binding layer abstracts the underlying
# HTTP library using request & response 'messages' which are implemented as
# dictionaries with the following structure:
#
# # HTTP request message (only method required)
# request {
# method : str,
# headers? : [(str, str)*],
# body? : str,
# }
#
# # HTTP response message (all keys present)
# response {
# status : int,
# reason : str,
# headers : [(str, str)*],
# body : file,
# }
#
# Encode the given kwargs as a query string. This wrapper will also _encode
# a list value as a sequence of assignemnts to the corresponding arg name,
# for example an argument such as 'foo=[1,2,3]' will be encoded as
# 'foo=1&foo=2&foo=3'.
def _encode(**kwargs):
items = []
for key, value in kwargs.iteritems():
if isinstance(value, list):
items.extend([(key, item) for item in value])
else:
items.append((key, value))
return urllib.urlencode(items)
# Crack the given url into (scheme, host, port, path)
def _spliturl(url):
scheme, opaque = urllib.splittype(url)
netloc, path = urllib.splithost(opaque)
host, port = urllib.splitport(netloc)
# Strip brackets if its an IPv6 address
if host.startswith('[') and host.endswith(']'): host = host[1:-1]
if port is None: port = DEFAULT_PORT
return scheme, host, port, path
# Given an HTTP request handler, this wrapper objects provides a related
# family of convenience methods built using that handler.
class HttpLib(object):
"""A set of convenient methods for making HTTP calls.
``HttpLib`` provides a general :meth:`request` method, and :meth:`delete`,
:meth:`post`, and :meth:`get` methods for the three HTTP methods that Splunk
uses.
By default, ``HttpLib`` uses Python's built-in ``httplib`` library,
but you can replace it by passing your own handling function to the
constructor for ``HttpLib``.
The handling function should have the type:
``handler(`url`, `request_dict`) -> response_dict``
where `url` is the URL to make the request to (including any query and
fragment sections) as a dictionary with the following keys:
- method: The method for the request, typically ``GET``, ``POST``, or ``DELETE``.
- headers: A list of pairs specifying the HTTP headers (for example: ``[('key': value), ...]``).
- body: A string containing the body to send with the request (this string
should default to '').
and ``response_dict`` is a dictionary with the following keys:
- status: An integer containing the HTTP status code (such as 200 or 404).
- reason: The reason phrase, if any, returned by the server.
- headers: A list of pairs containing the response headers (for example, ``[('key': value), ...]``).
- body: A stream-like object supporting ``read(size=None)`` and ``close()``
methods to get the body of the response.
The response dictionary is returned directly by ``HttpLib``'s methods with
no further processing. By default, ``HttpLib`` calls the :func:`handler` function
to get a handler function.
"""
def __init__(self, custom_handler=None):
self.handler = handler() if custom_handler is None else custom_handler
self._cookies = {}
def delete(self, url, headers=None, **kwargs):
"""Sends a DELETE request to a URL.
:param url: The URL.
:type url: ``string``
:param headers: A list of pairs specifying the headers for the HTTP
response (for example, ``[('Content-Type': 'text/cthulhu'), ('Token': 'boris')]``).
:type headers: ``list``
:param kwargs: Additional keyword arguments (optional). These arguments
are interpreted as the query part of the URL. The order of keyword
arguments is not preserved in the request, but the keywords and
their arguments will be URL encoded.
:type kwargs: ``dict``
:returns: A dictionary describing the response (see :class:`HttpLib` for
its structure).
:rtype: ``dict``
"""
if headers is None: headers = []
if kwargs:
# url is already a UrlEncoded. We have to manually declare
# the query to be encoded or it will get automatically URL
# encoded by being appended to url.
url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True)
message = {
'method': "DELETE",
'headers': headers,
}
return self.request(url, message)
def get(self, url, headers=None, **kwargs):
"""Sends a GET request to a URL.
:param url: The URL.
:type url: ``string``
:param headers: A list of pairs specifying the headers for the HTTP
response (for example, ``[('Content-Type': 'text/cthulhu'), ('Token': 'boris')]``).
:type headers: ``list``
:param kwargs: Additional keyword arguments (optional). These arguments
are interpreted as the query part of the URL. The order of keyword
arguments is not preserved in the request, but the keywords and
their arguments will be URL encoded.
:type kwargs: ``dict``
:returns: A dictionary describing the response (see :class:`HttpLib` for
its structure).
:rtype: ``dict``
"""
if headers is None: headers = []
if kwargs:
# url is already a UrlEncoded. We have to manually declare
# the query to be encoded or it will get automatically URL
# encoded by being appended to url.
url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True)
return self.request(url, { 'method': "GET", 'headers': headers })
def post(self, url, headers=None, **kwargs):
"""Sends a POST request to a URL.
:param url: The URL.
:type url: ``string``
:param headers: A list of pairs specifying the headers for the HTTP
response (for example, ``[('Content-Type': 'text/cthulhu'), ('Token': 'boris')]``).
:type headers: ``list``
:param kwargs: Additional keyword arguments (optional). If the argument
is ``body``, the value is used as the body for the request, and the
keywords and their arguments will be URL encoded. If there is no
``body`` keyword argument, all the keyword arguments are encoded
into the body of the request in the format ``x-www-form-urlencoded``.
:type kwargs: ``dict``
:returns: A dictionary describing the response (see :class:`HttpLib` for
its structure).
:rtype: ``dict``
"""
if headers is None: headers = []
headers.append(("Content-Type", "application/x-www-form-urlencoded")),
# We handle GET-style arguments and an unstructured body. This is here
# to support the receivers/stream endpoint.
if 'body' in kwargs:
body = kwargs.pop('body')
if len(kwargs) > 0:
url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True)
else:
body = _encode(**kwargs)
message = {
'method': "POST",
'headers': headers,
'body': body
}
return self.request(url, message)
def request(self, url, message, **kwargs):
"""Issues an HTTP request to a URL.
:param url: The URL.
:type url: ``string``
:param message: A dictionary with the format as described in
:class:`HttpLib`.
:type message: ``dict``
:param kwargs: Additional keyword arguments (optional). These arguments
are passed unchanged to the handler.
:type kwargs: ``dict``
:returns: A dictionary describing the response (see :class:`HttpLib` for
its structure).
:rtype: ``dict``
"""
response = self.handler(url, message, **kwargs)
response = record(response)
if 400 <= response.status:
raise HTTPError(response)
# Update the cookie with any HTTP request
# Initially, assume list of 2-tuples
key_value_tuples = response.headers
# If response.headers is a dict, get the key-value pairs as 2-tuples
# this is the case when using urllib2
if isinstance(response.headers, dict):
key_value_tuples = response.headers.items()
for key, value in key_value_tuples:
if key.lower() == "set-cookie":
_parse_cookies(value, self._cookies)
return response
# Converts an httplib response into a file-like object.
class ResponseReader(io.RawIOBase):
"""This class provides a file-like interface for :class:`httplib` responses.
The ``ResponseReader`` class is intended to be a layer to unify the different
types of HTTP libraries used with this SDK. This class also provides a
preview of the stream and a few useful predicates.
"""
# For testing, you can use a StringIO as the argument to
# ``ResponseReader`` instead of an ``httplib.HTTPResponse``. It
# will work equally well.
def __init__(self, response):
self._response = response
self._buffer = ''
def __str__(self):
return self.read()
@property
def empty(self):
"""Indicates whether there is any more data in the response."""
return self.peek(1) == ""
def peek(self, size):
"""Nondestructively retrieves a given number of characters.
The next :meth:`read` operation behaves as though this method was never
called.
:param size: The number of characters to retrieve.
:type size: ``integer``
"""
c = self.read(size)
self._buffer = self._buffer + c
return c
def close(self):
"""Closes this response."""
self._response.close()
def read(self, size = None):
"""Reads a given number of characters from the response.
:param size: The number of characters to read, or "None" to read the
entire response.
:type size: ``integer`` or "None"
"""
r = self._buffer
self._buffer = ''
if size is not None:
size -= len(r)
r = r + self._response.read(size)
return r
def readable(self):
""" Indicates that the response reader is readable."""
return True
def readinto(self, byte_array):
""" Read data into a byte array, upto the size of the byte array.
:param byte_array: A byte array/memory view to pour bytes into.
:type byte_array: ``bytearray`` or ``memoryview``
"""
max_size = len(byte_array)
data = self.read(max_size)
bytes_read = len(data)
byte_array[:bytes_read] = data
return bytes_read
def handler(key_file=None, cert_file=None, timeout=None):
"""This class returns an instance of the default HTTP request handler using
the values you provide.
:param `key_file`: A path to a PEM (Privacy Enhanced Mail) formatted file containing your private key (optional).
:type key_file: ``string``
:param `cert_file`: A path to a PEM (Privacy Enhanced Mail) formatted file containing a certificate chain file (optional).
:type cert_file: ``string``
:param `timeout`: The request time-out period, in seconds (optional).
:type timeout: ``integer`` or "None"
"""
def connect(scheme, host, port):
kwargs = {}
if timeout is not None: kwargs['timeout'] = timeout
if scheme == "http":
return httplib.HTTPConnection(host, port, **kwargs)
if scheme == "https":
if key_file is not None: kwargs['key_file'] = key_file
if cert_file is not None: kwargs['cert_file'] = cert_file
# If running Python 2.7.9+, disable SSL certificate validation
if sys.version_info >= (2,7,9) and key_file is None and cert_file is None:
kwargs['context'] = ssl._create_unverified_context()
return httplib.HTTPSConnection(host, port, **kwargs)
raise ValueError("unsupported scheme: %s" % scheme)
def request(url, message, **kwargs):
scheme, host, port, path = _spliturl(url)
body = message.get("body", "")
head = {
"Content-Length": str(len(body)),
"Host": host,
"User-Agent": "splunk-sdk-python/1.4",
"Accept": "*/*",
} # defaults
for key, value in message["headers"]:
head[key] = value
method = message.get("method", "GET")
connection = connect(scheme, host, port)
try:
connection.request(method, path, body, head)
if timeout is not None:
connection.sock.settimeout(timeout)
response = connection.getresponse()
finally:
connection.close()
return {
"status": response.status,
"reason": response.reason,
"headers": response.getheaders(),
"body": ResponseReader(response),
}
return request
|
apache-2.0
|
seleniumbase/SeleniumBase
|
examples/swag_labs_suite.py
|
1
|
3866
|
from parameterized import parameterized
from seleniumbase import BaseCase
class SwagLabsTests(BaseCase):
def login_to_swag_labs(self, username="standard_user"):
""" Login to Swag Labs and verify success. """
url = "https://www.saucedemo.com"
self.open(url)
if username not in self.get_text("#login_credentials"):
self.fail("Invalid user for login: %s" % username)
self.type("#user-name", username)
self.type("#password", "secret_sauce")
self.click('input[type="submit"]')
self.assert_element("#inventory_container")
self.assert_element('div:contains("Sauce Labs Backpack")')
@parameterized.expand(
[
["standard_user"],
["problem_user"],
]
)
def test_swag_labs_basic_flow(self, username):
"""This test checks functional flow of the Swag Labs store.
This test is parameterized on the login user."""
self.login_to_swag_labs(username=username)
if username == "problem_user":
print("\n(This test should fail)")
# Verify that the "Test.allTheThings() T-Shirt" appears on the page
item_name = "Test.allTheThings() T-Shirt"
self.assert_text(item_name)
# Verify that a reverse-alphabetical sort works as expected
self.select_option_by_value("select.product_sort_container", "za")
if item_name not in self.get_text("div.inventory_item"):
self.fail('Sort Failed! Expecting "%s" on top!' % item_name)
# Add the "Test.allTheThings() T-Shirt" to the cart
self.assert_exact_text("ADD TO CART", "button.btn_inventory")
item_price = self.get_text("div.inventory_item_price")
self.click("button.btn_inventory")
self.assert_exact_text("REMOVE", "button.btn_inventory")
self.assert_exact_text("1", "span.shopping_cart_badge")
# Verify your cart
self.click("#shopping_cart_container")
self.assert_element('span:contains("Your Cart")')
self.assert_text(item_name, "div.inventory_item_name")
self.assert_exact_text("1", "div.cart_quantity")
self.assert_exact_text("REMOVE", "button.cart_button")
self.assert_element("button#continue-shopping")
# Checkout - Add info
self.click("button#checkout")
self.assert_element('span:contains("Checkout: Your Information")')
self.assert_element("button#cancel")
self.type("#first-name", "SeleniumBase")
self.type("#last-name", "Rocks")
self.type("#postal-code", "01720")
# Checkout - Overview
self.click("input#continue")
self.assert_element('span:contains("Checkout: Overview")')
self.assert_element("button#cancel")
self.assert_text(item_name, "div.inventory_item_name")
self.assert_text(item_price, "div.inventory_item_price")
self.assert_exact_text("1", "div.cart_quantity")
# Finish Checkout and verify that the cart is now empty
self.click("button#finish")
self.assert_exact_text("THANK YOU FOR YOUR ORDER", "h2")
self.assert_element("img.pony_express")
self.click("#shopping_cart_container")
self.assert_element_absent("div.inventory_item_name")
self.click("button#continue-shopping")
self.assert_element_absent("span.shopping_cart_badge")
def tearDown(self):
self.save_teardown_screenshot()
# Reset App State and Logout if the controls are present
try:
if self.is_element_present("a#reset_sidebar_link"):
self.js_click("a#reset_sidebar_link")
if self.is_element_present("a#logout_sidebar_link"):
self.js_click("a#logout_sidebar_link")
except Exception:
pass
super(SwagLabsTests, self).tearDown()
|
mit
|
ovnicraft/edx-platform
|
common/test/acceptance/pages/studio/settings_group_configurations.py
|
51
|
10070
|
"""
Course Group Configurations page.
"""
from bok_choy.promise import EmptyPromise
from ..common.utils import confirm_prompt
from .course_page import CoursePage
class GroupConfigurationsPage(CoursePage):
"""
Course Group Configurations page.
"""
url_path = "group_configurations"
experiment_groups_css = ".experiment-groups"
content_groups_css = ".content-groups"
def is_browser_on_page(self):
"""
Verify that the browser is on the page and it is not still loading.
"""
EmptyPromise(
lambda: self.q(css='body.view-group-configurations').present,
'On the group configuration page'
).fulfill()
EmptyPromise(
lambda: not self.q(css='span.spin').visible,
'Group Configurations are finished loading'
).fulfill()
return True
@property
def experiment_group_configurations(self):
"""
Return list of the experiment group configurations for the course.
"""
return self._get_groups(self.experiment_groups_css)
@property
def content_groups(self):
"""
Return list of the content groups for the course.
"""
return self._get_groups(self.content_groups_css)
def _get_groups(self, prefix):
"""
Return list of the group-configurations-list-item's of specified type for the course.
"""
css = prefix + ' .wrapper-collection'
return [GroupConfiguration(self, prefix, index) for index in xrange(len(self.q(css=css)))]
def create_experiment_group_configuration(self):
"""
Creates new group configuration.
"""
self.q(css=self.experiment_groups_css + " .new-button").first.click()
def create_first_content_group(self):
"""
Creates new content group when there are none initially defined.
"""
self.q(css=self.content_groups_css + " .new-button").first.click()
def add_content_group(self):
"""
Creates new content group when at least one already exists
"""
self.q(css=self.content_groups_css + " .action-add").first.click()
@property
def no_experiment_groups_message_is_present(self):
return self._no_content_message(self.experiment_groups_css).present
@property
def no_content_groups_message_is_present(self):
return self._no_content_message(self.content_groups_css).present
@property
def no_experiment_groups_message_text(self):
return self._no_content_message(self.experiment_groups_css).text[0]
@property
def no_content_groups_message_text(self):
return self._no_content_message(self.content_groups_css).text[0]
def _no_content_message(self, prefix):
"""
Returns the message about "no content" for the specified type.
"""
return self.q(css='.wrapper-content ' + prefix + ' .no-content')
@property
def experiment_group_sections_present(self):
"""
Returns whether or not anything related to content experiments is present.
"""
return self.q(css=self.experiment_groups_css).present or self.q(css=".experiment-groups-doc").present
class GroupConfiguration(object):
"""
Group Configuration wrapper.
"""
def __init__(self, page, prefix, index):
self.page = page
self.SELECTOR = prefix + ' .wrapper-collection-{}'.format(index)
self.index = index
def get_selector(self, css=''):
return ' '.join([self.SELECTOR, css])
def find_css(self, selector):
"""
Find elements as defined by css locator.
"""
return self.page.q(css=self.get_selector(css=selector))
def toggle(self):
"""
Expand/collapse group configuration.
"""
self.find_css('a.group-toggle').first.click()
@property
def is_expanded(self):
"""
Group configuration usage information is expanded.
"""
return self.find_css('a.group-toggle.hide-groups').present
def add_group(self):
"""
Add new group.
"""
self.find_css('button.action-add-group').first.click()
def get_text(self, css):
"""
Return text for the defined by css locator.
"""
return self.find_css(css).first.text[0]
def click_outline_anchor(self):
"""
Click on the `Course Outline` link.
"""
self.find_css('p.group-configuration-usage-text a').first.click()
def click_unit_anchor(self, index=0):
"""
Click on the link to the unit.
"""
self.find_css('li.group-configuration-usage-unit a').nth(index).click()
def edit(self):
"""
Open editing view for the group configuration.
"""
self.find_css('.action-edit .edit').first.click()
@property
def delete_button_is_disabled(self):
return self.find_css('.actions .delete.is-disabled').present
@property
def delete_button_is_present(self):
"""
Returns whether or not the delete icon is present.
"""
return self.find_css('.actions .delete').present
def delete(self):
"""
Delete the group configuration.
"""
self.find_css('.actions .delete').first.click()
confirm_prompt(self.page)
def save(self):
"""
Save group configuration.
"""
self.find_css('.action-primary').first.click()
self.page.wait_for_ajax()
def cancel(self):
"""
Cancel group configuration.
"""
self.find_css('.action-secondary').first.click()
@property
def mode(self):
"""
Return group configuration mode.
"""
if self.find_css('.collection-edit').present:
return 'edit'
elif self.find_css('.collection').present:
return 'details'
@property
def id(self):
"""
Return group configuration id.
"""
return self.get_text('.group-configuration-id .group-configuration-value')
@property
def validation_message(self):
"""
Return validation message.
"""
return self.get_text('.message-status.error')
@property
def usages(self):
"""
Return list of usages.
"""
css = '.group-configuration-usage-unit'
return self.find_css(css).text
@property
def name(self):
"""
Return group configuration name.
"""
return self.get_text('.title')
@name.setter
def name(self, value):
"""
Set group configuration name.
"""
self.find_css('.collection-name-input').first.fill(value)
@property
def description(self):
"""
Return group configuration description.
"""
return self.get_text('.group-configuration-description')
@description.setter
def description(self, value):
"""
Set group configuration description.
"""
self.find_css('.group-configuration-description-input').first.fill(value)
@property
def groups(self):
"""
Return list of groups.
"""
def group_selector(group_index):
return self.get_selector('.group-{} '.format(group_index))
return [Group(self.page, group_selector(index)) for index, element in enumerate(self.find_css('.group'))]
@property
def delete_note(self):
"""
Return delete note for the group configuration.
"""
return self.find_css('.wrapper-delete-button').first.attrs('data-tooltip')[0]
@property
def details_error_icon_is_present(self):
return self.find_css('.wrapper-group-configuration-usages .fa-exclamation-circle').present
@property
def details_warning_icon_is_present(self):
return self.find_css('.wrapper-group-configuration-usages .fa-warning').present
@property
def details_message_is_present(self):
return self.find_css('.wrapper-group-configuration-usages .group-configuration-validation-message').present
@property
def details_message_text(self):
return self.find_css('.wrapper-group-configuration-usages .group-configuration-validation-message').text[0]
@property
def edit_warning_icon_is_present(self):
return self.find_css('.wrapper-group-configuration-validation .fa-warning').present
@property
def edit_warning_message_is_present(self):
return self.find_css('.wrapper-group-configuration-validation .group-configuration-validation-text').present
@property
def edit_warning_message_text(self):
return self.find_css('.wrapper-group-configuration-validation .group-configuration-validation-text').text[0]
def __repr__(self):
return "<{}:{}>".format(self.__class__.__name__, self.name)
class Group(object):
"""
Group wrapper.
"""
def __init__(self, page, prefix_selector):
self.page = page
self.prefix = prefix_selector
def find_css(self, selector):
"""
Find elements as defined by css locator.
"""
return self.page.q(css=self.prefix + selector)
@property
def name(self):
"""
Return the name of the group .
"""
css = '.group-name'
return self.find_css(css).first.text[0]
@name.setter
def name(self, value):
"""
Set the name for the group.
"""
css = '.group-name'
self.find_css(css).first.fill(value)
@property
def allocation(self):
"""
Return allocation for the group.
"""
css = '.group-allocation'
return self.find_css(css).first.text[0]
def remove(self):
"""
Remove the group.
"""
css = '.action-close'
return self.find_css(css).first.click()
def __repr__(self):
return "<{}:{}>".format(self.__class__.__name__, self.name)
|
agpl-3.0
|
namecoin/namecore
|
test/functional/name_pending.py
|
4
|
3956
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 Daniel Kraft
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# RPC test for name_pending call.
from test_framework.names import NameTestFramework
from test_framework.util import *
class NamePendingTest (NameTestFramework):
def set_test_params (self):
self.setup_name_test ([[]] * 2)
def run_test (self):
node = self.nodes[0]
# Register a name that can then be update'd in the mempool.
newData = node.name_new ("a")
node.generate (10)
self.firstupdateName (0, "a", newData, "old-value-a")
node.generate (10)
# Start a new name registration so we can first_update it.
newData = node.name_new ("b")
node.generate (15)
# Perform the unconfirmed updates. Include a currency transaction
# and a name_new to check that those are not shown.
txa = node.name_update ("a", "value-a")
txb = self.firstupdateName (0, "b", newData, "value-b")
addrOther = self.nodes[1].getnewaddress ()
node.sendtoaddress (addrOther, 1)
newData = node.name_new ("c")
# Check that name_show still returns the old value.
self.checkName (0, "a", "old-value-a", None, False)
# Check sizes of mempool against name_pending.
mempool = node.getrawmempool ()
assert_equal (len (mempool), 4)
pending = node.name_pending ()
assert_equal (len (pending), 2)
# Check result of full name_pending (called above).
for op in pending:
assert op['txid'] in mempool
if op['name'] == 'a':
assert_equal (op['op'], 'name_update')
assert_equal (op['value'], 'value-a')
assert_equal (op['txid'], txa)
elif op['name'] == 'b':
assert_equal (op['op'], 'name_firstupdate')
assert_equal (op['value'], 'value-b')
assert_equal (op['txid'], txb)
else:
assert False
# Check name_pending with name filter that does not match any name.
pending = node.name_pending ('does not exist')
assert_equal (pending, [])
# Check name_pending with name filter.
self.checkPendingName (0, 'a', 'name_update', 'value-a', txa)
# We don't know the golden value for vout, as this is randomised. But we
# can store the output now and then verify it with name_show after the
# update has been mined.
pending = node.name_pending ('a')
assert_equal (len (pending), 1)
pending = pending[0]
assert 'vout' in pending
# Mine a block and check that all mempool is cleared.
node.generate (1)
assert_equal (node.getrawmempool (), [])
assert_equal (node.name_pending (), [])
# Verify vout from before against name_show.
confirmed = node.name_show ('a')
assert_equal (pending['vout'], confirmed['vout'])
# Send a name and check that ismine is handled correctly.
tx = node.name_update ('a', 'sent-a', {"destAddress": addrOther})
self.sync_mempools ()
self.checkPendingName (0, 'a', 'name_update', 'sent-a', tx, False)
self.checkPendingName (1, 'a', 'name_update', 'sent-a', tx, True)
def checkPendingName (self, ind, name, op, value, txid, mine=None):
"""
Call name_pending on a given name and check that the result
matches the expected values.
"""
res = self.nodes[ind].name_pending (name)
assert_equal (len (res), 1)
obj = res[0]
assert_equal (obj['op'], op)
assert_equal (obj['name'], name)
assert_equal (obj['value'], value)
assert_equal (obj['txid'], txid)
assert isinstance (obj['ismine'], bool)
if mine is not None:
assert_equal (obj['ismine'], mine)
# There is no golden value for vout, but we can decode the transaction
# to make sure it is correct.
rawtx = self.nodes[ind].getrawtransaction (txid, 1)
assert 'nameOp' in rawtx['vout'][obj['vout']]['scriptPubKey']
if __name__ == '__main__':
NamePendingTest ().main ()
|
mit
|
onceuponatimeforever/oh-mainline
|
vendor/packages/sphinx/sphinx/ext/graphviz.py
|
15
|
11017
|
# -*- coding: utf-8 -*-
"""
sphinx.ext.graphviz
~~~~~~~~~~~~~~~~~~~
Allow graphviz-formatted graphs to be included in Sphinx-generated
documents inline.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import codecs
import posixpath
from os import path
from subprocess import Popen, PIPE
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.errors import SphinxError
from sphinx.locale import _
from sphinx.util.osutil import ensuredir, ENOENT, EPIPE, EINVAL
from sphinx.util.compat import Directive
mapname_re = re.compile(r'<map id="(.*?)"')
class GraphvizError(SphinxError):
category = 'Graphviz error'
class graphviz(nodes.General, nodes.Element):
pass
class Graphviz(Directive):
"""
Directive to insert arbitrary dot markup.
"""
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = False
option_spec = {
'alt': directives.unchanged,
'inline': directives.flag,
'caption': directives.unchanged,
}
def run(self):
if self.arguments:
document = self.state.document
if self.content:
return [document.reporter.warning(
'Graphviz directive cannot have both content and '
'a filename argument', line=self.lineno)]
env = self.state.document.settings.env
rel_filename, filename = env.relfn2path(self.arguments[0])
env.note_dependency(rel_filename)
try:
fp = codecs.open(filename, 'r', 'utf-8')
try:
dotcode = fp.read()
finally:
fp.close()
except (IOError, OSError):
return [document.reporter.warning(
'External Graphviz file %r not found or reading '
'it failed' % filename, line=self.lineno)]
else:
dotcode = '\n'.join(self.content)
if not dotcode.strip():
return [self.state_machine.reporter.warning(
'Ignoring "graphviz" directive without content.',
line=self.lineno)]
node = graphviz()
node['code'] = dotcode
node['options'] = []
if 'alt' in self.options:
node['alt'] = self.options['alt']
if 'caption' in self.options:
node['caption'] = self.options['caption']
node['inline'] = 'inline' in self.options
return [node]
class GraphvizSimple(Directive):
"""
Directive to insert arbitrary dot markup.
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'alt': directives.unchanged,
'inline': directives.flag,
'caption': directives.unchanged,
}
def run(self):
node = graphviz()
node['code'] = '%s %s {\n%s\n}\n' % \
(self.name, self.arguments[0], '\n'.join(self.content))
node['options'] = []
if 'alt' in self.options:
node['alt'] = self.options['alt']
if 'caption' in self.options:
node['caption'] = self.options['caption']
node['inline'] = 'inline' in self.options
return [node]
def render_dot(self, code, options, format, prefix='graphviz'):
"""Render graphviz code into a PNG or PDF output file."""
hashkey = (code + str(options) + \
str(self.builder.config.graphviz_dot) + \
str(self.builder.config.graphviz_dot_args)
).encode('utf-8')
fname = '%s-%s.%s' % (prefix, sha(hashkey).hexdigest(), format)
if hasattr(self.builder, 'imgpath'):
# HTML
relfn = posixpath.join(self.builder.imgpath, fname)
outfn = path.join(self.builder.outdir, '_images', fname)
else:
# LaTeX
relfn = fname
outfn = path.join(self.builder.outdir, fname)
if path.isfile(outfn):
return relfn, outfn
if hasattr(self.builder, '_graphviz_warned_dot') or \
hasattr(self.builder, '_graphviz_warned_ps2pdf'):
return None, None
ensuredir(path.dirname(outfn))
# graphviz expects UTF-8 by default
if isinstance(code, unicode):
code = code.encode('utf-8')
dot_args = [self.builder.config.graphviz_dot]
dot_args.extend(self.builder.config.graphviz_dot_args)
dot_args.extend(options)
dot_args.extend(['-T' + format, '-o' + outfn])
if format == 'png':
dot_args.extend(['-Tcmapx', '-o%s.map' % outfn])
try:
p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE)
except OSError, err:
if err.errno != ENOENT: # No such file or directory
raise
self.builder.warn('dot command %r cannot be run (needed for graphviz '
'output), check the graphviz_dot setting' %
self.builder.config.graphviz_dot)
self.builder._graphviz_warned_dot = True
return None, None
try:
# Graphviz may close standard input when an error occurs,
# resulting in a broken pipe on communicate()
stdout, stderr = p.communicate(code)
except (OSError, IOError), err:
if err.errno not in (EPIPE, EINVAL):
raise
# in this case, read the standard output and standard error streams
# directly, to get the error message(s)
stdout, stderr = p.stdout.read(), p.stderr.read()
p.wait()
if p.returncode != 0:
raise GraphvizError('dot exited with error:\n[stderr]\n%s\n'
'[stdout]\n%s' % (stderr, stdout))
return relfn, outfn
def render_dot_html(self, node, code, options, prefix='graphviz',
imgcls=None, alt=None):
format = self.builder.config.graphviz_output_format
try:
if format not in ('png', 'svg'):
raise GraphvizError("graphviz_output_format must be one of 'png', "
"'svg', but is %r" % format)
fname, outfn = render_dot(self, code, options, format, prefix)
except GraphvizError, exc:
self.builder.warn('dot code %r: ' % code + str(exc))
raise nodes.SkipNode
inline = node.get('inline', False)
if inline:
wrapper = 'span'
else:
wrapper = 'p'
self.body.append(self.starttag(node, wrapper, CLASS='graphviz'))
if fname is None:
self.body.append(self.encode(code))
else:
if alt is None:
alt = node.get('alt', self.encode(code).strip())
imgcss = imgcls and 'class="%s"' % imgcls or ''
if format == 'svg':
svgtag = '<img src="%s" alt="%s" %s/>\n' % (fname, alt, imgcss)
self.body.append(svgtag)
else:
mapfile = open(outfn + '.map', 'rb')
try:
imgmap = mapfile.readlines()
finally:
mapfile.close()
if len(imgmap) == 2:
# nothing in image map (the lines are <map> and </map>)
self.body.append('<img src="%s" alt="%s" %s/>\n' %
(fname, alt, imgcss))
else:
# has a map: get the name of the map and connect the parts
mapname = mapname_re.match(imgmap[0].decode('utf-8')).group(1)
self.body.append('<img src="%s" alt="%s" usemap="#%s" %s/>\n' %
(fname, alt, mapname, imgcss))
self.body.extend([item.decode('utf-8') for item in imgmap])
if node.get('caption') and not inline:
self.body.append('</p>\n<p class="caption">')
self.body.append(self.encode(node['caption']))
self.body.append('</%s>\n' % wrapper)
raise nodes.SkipNode
def html_visit_graphviz(self, node):
render_dot_html(self, node, node['code'], node['options'])
def render_dot_latex(self, node, code, options, prefix='graphviz'):
try:
fname, outfn = render_dot(self, code, options, 'pdf', prefix)
except GraphvizError, exc:
self.builder.warn('dot code %r: ' % code + str(exc))
raise nodes.SkipNode
inline = node.get('inline', False)
if inline:
para_separator = ''
else:
para_separator = '\n'
if fname is not None:
caption = node.get('caption')
# XXX add ids from previous target node
if caption and not inline:
self.body.append('\n\\begin{figure}[h!]')
self.body.append('\n\\begin{center}')
self.body.append('\n\\caption{%s}' % self.encode(caption))
self.body.append('\n\\includegraphics{%s}' % fname)
self.body.append('\n\\end{center}')
self.body.append('\n\\end{figure}\n')
else:
self.body.append('%s\\includegraphics{%s}%s' %
(para_separator, fname, para_separator))
raise nodes.SkipNode
def latex_visit_graphviz(self, node):
render_dot_latex(self, node, node['code'], node['options'])
def render_dot_texinfo(self, node, code, options, prefix='graphviz'):
try:
fname, outfn = render_dot(self, code, options, 'png', prefix)
except GraphvizError, exc:
self.builder.warn('dot code %r: ' % code + str(exc))
raise nodes.SkipNode
if fname is not None:
self.body.append('\n\n@float\n')
caption = node.get('caption')
if caption:
self.body.append('@caption{%s}\n' % self.escape_arg(caption))
self.body.append('@image{%s,,,[graphviz],png}\n'
'@end float\n\n' % fname[:-4])
raise nodes.SkipNode
def texinfo_visit_graphviz(self, node):
render_dot_texinfo(self, node, node['code'], node['options'])
def text_visit_graphviz(self, node):
if 'alt' in node.attributes:
self.add_text(_('[graph: %s]') % node['alt'])
else:
self.add_text(_('[graph]'))
raise nodes.SkipNode
def man_visit_graphviz(self, node):
if 'alt' in node.attributes:
self.body.append(_('[graph: %s]') % node['alt'])
else:
self.body.append(_('[graph]'))
raise nodes.SkipNode
def setup(app):
app.add_node(graphviz,
html=(html_visit_graphviz, None),
latex=(latex_visit_graphviz, None),
texinfo=(texinfo_visit_graphviz, None),
text=(text_visit_graphviz, None),
man=(man_visit_graphviz, None))
app.add_directive('graphviz', Graphviz)
app.add_directive('graph', GraphvizSimple)
app.add_directive('digraph', GraphvizSimple)
app.add_config_value('graphviz_dot', 'dot', 'html')
app.add_config_value('graphviz_dot_args', [], 'html')
app.add_config_value('graphviz_output_format', 'png', 'html')
|
agpl-3.0
|
vagonbar/GNUnetwork
|
gwn/utils/libfsm/fsm.py
|
2
|
14255
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of GNUWiNetwork,
# Copyright (C) 2014 by
# Pablo Belzarena, Gabriel Gomez Sena, Victor Gonzalez Barbone,
# Facultad de Ingenieria, Universidad de la Republica, Uruguay.
#
# GNUWiNetwork is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GNUWiNetwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNUWiNetwork. If not, see <http://www.gnu.org/licenses/>.
#
"""Implementation of a Finite State Machine (FSM).
This module implements a Finite State Machine (FSM). In addition to state
this FSM also maintains a user defined "memory". So this FSM can be used as a
Push-down Automata (PDA) since a PDA is a FSM + memory.
The following describes how the FSM works, but you will probably also need to
see the example function to understand how the FSM is used in practice.
You define an FSM by building tables of transitions. For a given input symbol
the process() method uses these tables to decide what action to call and what
the next state will be. The FSM has a table of transitions that associate::
(input_symbol, current_state) --> (action, next_state)
Where "action" is a function you define. The symbols and states can be any
objects. You use the add_transition() and add_transition_list() methods to add
to the transition table. The FSM also has a table of transitions that
associate::
(current_state) --> (action, next_state)
You use the add_transition_any() method to add to this transition table. The
FSM also has one default transition that is not associated with any specific
input_symbol or state. You use the set_default_transition() method to set the
default transition.
When an action function is called it is passed a reference to the FSM. The
action function may then access attributes of the FSM such as input_symbol,
current_state, or "memory". The "memory" attribute can be any object that you
want to pass along to the action functions. It is not used by the FSM itself.
For parsing you would typically pass a list to be used as a stack.
The processing sequence is as follows. The process() method is given an
input_symbol to process. The FSM will search the table of transitions that
associate::
(input_symbol, current_state) --> (action, next_state)
If the pair (input_symbol, current_state) is found then process() will call the
associated action function and then set the current state to the next_state.
If the FSM cannot find a match for (input_symbol, current_state) it will then
search the table of transitions that associate::
(current_state) --> (action, next_state)
If the current_state is found then the process() method will call the
associated action function and then set the current state to the next_state.
Notice that this table lacks an input_symbol. It lets you define transitions
for a current_state and ANY input_symbol. Hence, it is called the "any" table.
Remember, it is always checked after first searching the table for a specific
(input_symbol, current_state).
For the case where the FSM did not match either of the previous two cases the
FSM will try to use the default transition. If the default transition is
defined then the process() method will call the associated action function and
then set the current state to the next_state. This lets you define a default
transition as a catch-all case. You can think of it as an exception handler.
There can be only one default transition.
Finally, if none of the previous cases are defined for an input_symbol and
current_state then the FSM will raise an exception. This may be desirable, but
you can always prevent this just by defining a default transition.
Noah Spurrier 20020822
"""
class ExceptionFSM(Exception):
"""This is the FSM Exception class."""
def __init__(self, value):
self.value = value
def __str__(self):
return `self.value`
class FSM:
"""This is a Finite State Machine (FSM).
"""
def __init__(self, initial_state, memory=None):
"""This creates the FSM. You set the initial state here. The "memory"
attribute is any object that you want to pass along to the action
functions. It is not used by the FSM. For parsing you would typically
pass a list to be used as a stack. """
# Map (input_symbol, current_state) --> (action, next_state).
self.state_transitions = {}
# Map (current_state) --> (action, next_state).
self.state_transitions_any = {}
self.default_transition = None
self.input_symbol = None
self.initial_state = initial_state
self.current_state = self.initial_state
self.next_state = None
self.action = None
self.memory = memory
def reset (self):
"""This sets the current_state to the initial_state and sets
input_symbol to None. The initial state was set by the constructor
__init__(). """
self.current_state = self.initial_state
self.input_symbol = None
def add_transition (self, input_symbol, state, action=None, next_state=None):
"""This adds a transition that associates::
(input_symbol, current_state) --> (action, next_state)
The action may be set to None in which case the process() method will
ignore the action and only set the next_state. The next_state may be
set to None in which case the current state will be unchanged.
You can also set transitions for a list of symbols by using
add_transition_list(). """
if next_state is None:
next_state = state
self.state_transitions[(input_symbol, state)] = (action, next_state)
def add_transition_list (self, list_input_symbols, state, action=None, next_state=None):
"""This adds the same transition for a list of input symbols.
You can pass a list or a string. Note that it is handy to use
string.digits, string.whitespace, string.letters, etc. to add
transitions that match character classes.
The action may be set to None in which case the process() method will
ignore the action and only set the next_state. The next_state may be
set to None in which case the current state will be unchanged. """
if next_state is None:
next_state = state
for input_symbol in list_input_symbols:
self.add_transition (input_symbol, state, action, next_state)
def add_transition_any (self, state, action=None, next_state=None):
"""This adds a transition that associates::
(current_state) --> (action, next_state)
That is, any input symbol will match the current state.
The process() method checks the "any" state associations after it first
checks for an exact match of (input_symbol, current_state).
The action may be set to None in which case the process() method will
ignore the action and only set the next_state. The next_state may be
set to None in which case the current state will be unchanged. """
if next_state is None:
next_state = state
self.state_transitions_any [state] = (action, next_state)
def set_default_transition (self, action, next_state):
"""This sets the default transition. This defines an action and
next_state if the FSM cannot find the input symbol and the current
state in the transition list and if the FSM cannot find the
current_state in the transition_any list. This is useful as a final
fall-through state for catching errors and undefined states.
The default transition can be removed by setting the attribute
default_transition to None. """
self.default_transition = (action, next_state)
def get_transition (self, input_symbol, state):
"""This returns (action, next state) given an input_symbol and state.
This does not modify the FSM state, so calling this method has no side
effects. Normally you do not call this method directly. It is called by
process().
The sequence of steps to check for a defined transition goes from the
most specific to the least specific.
1. Check state_transitions[] that match exactly the tuple,
(input_symbol, state)
2. Check state_transitions_any[] that match (state)
In other words, match a specific state and ANY input_symbol.
3. Check if the default_transition is defined.
This catches any input_symbol and any state.
This is a handler for errors, undefined states, or defaults.
4. No transition was defined. If we get here then raise an exception.
"""
if self.state_transitions.has_key((input_symbol, state)):
return self.state_transitions[(input_symbol, state)]
elif self.state_transitions_any.has_key (state):
return self.state_transitions_any[state]
elif self.default_transition is not None:
return self.default_transition
else:
raise ExceptionFSM ('Transition is undefined: (%s, %s).' %
(str(input_symbol), str(state)) )
def process (self, input_symbol):
"""This is the main method that you call to process input. This may
cause the FSM to change state and call an action. This method calls
get_transition() to find the action and next_state associated with the
input_symbol and current_state. If the action is None then the action
is not called and only the current state is changed. This method
processes one complete input symbol. You can process a list of symbols
(or a string) by calling process_list(). """
self.input_symbol = input_symbol
(self.action, self.next_state) = self.get_transition (self.input_symbol, self.current_state)
if self.action is not None:
self.action (self)
self.current_state = self.next_state
self.next_state = None
def process_list (self, input_symbols):
"""This takes a list and sends each element to process(). The list may
be a string or any iterable object. """
for s in input_symbols:
self.process (s)
##############################################################################
# The following is an example that demonstrates the use of the FSM class to
# process an RPN expression. Run this module from the command line. You will
# get a prompt > for input. Enter an RPN Expression. Numbers may be integers.
# Operators are * / + - Use the = sign to evaluate and print the expression.
# For example:
#
# 167 3 2 2 * * * 1 - =
#
# will print:
#
# 2003
##############################################################################
import sys, os, traceback, optparse, time, string
#
# These define the actions.
# Note that "memory" is a list being used as a stack.
#
def BeginBuildNumber (fsm):
fsm.memory.append (fsm.input_symbol)
def BuildNumber (fsm):
s = fsm.memory.pop ()
s = s + fsm.input_symbol
fsm.memory.append (s)
def EndBuildNumber (fsm):
s = fsm.memory.pop ()
fsm.memory.append (int(s))
def DoOperator (fsm):
ar = fsm.memory.pop()
al = fsm.memory.pop()
if fsm.input_symbol == '+':
fsm.memory.append (al + ar)
elif fsm.input_symbol == '-':
fsm.memory.append (al - ar)
elif fsm.input_symbol == '*':
fsm.memory.append (al * ar)
elif fsm.input_symbol == '/':
fsm.memory.append (al / ar)
def DoEqual (fsm):
print str(fsm.memory.pop())
def Error (fsm):
print 'That does not compute.'
print str(fsm.input_symbol)
def main():
"""This is where the example starts and the FSM state transitions are
defined. Note that states are strings (such as 'INIT'). This is not
necessary, but it makes the example easier to read. """
f = FSM ('INIT', []) # "memory" will be used as a stack.
f.set_default_transition (Error, 'INIT')
f.add_transition_any ('INIT', None, 'INIT')
f.add_transition ('=', 'INIT', DoEqual, 'INIT')
f.add_transition_list (string.digits, 'INIT', BeginBuildNumber, 'BUILDING_NUMBER')
f.add_transition_list (string.digits, 'BUILDING_NUMBER', BuildNumber, 'BUILDING_NUMBER')
f.add_transition_list (string.whitespace, 'BUILDING_NUMBER', EndBuildNumber, 'INIT')
f.add_transition_list ('+-*/', 'INIT', DoOperator, 'INIT')
print
print 'Enter an RPN Expression.'
print 'Numbers may be integers. Operators are * / + -'
print 'Use the = sign to evaluate and print the expression.'
print 'For example: '
print ' 167 3 2 2 * * * 1 - ='
inputstr = raw_input ('> ')
f.process_list(inputstr)
if __name__ == '__main__':
try:
start_time = time.time()
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(), usage=globals()['__doc__'], version='$Id$')
parser.add_option ('-v', '--verbose', action='store_true', default=False, help='verbose output')
(options, args) = parser.parse_args()
if options.verbose: print time.asctime()
main()
if options.verbose: print time.asctime()
if options.verbose: print 'TOTAL TIME IN MINUTES:',
if options.verbose: print (time.time() - start_time) / 60.0
sys.exit(0)
except KeyboardInterrupt, e: # Ctrl-C
raise e
except SystemExit, e: # sys.exit()
raise e
except Exception, e:
print 'ERROR, UNEXPECTED EXCEPTION'
print str(e)
traceback.print_exc()
os._exit(1)
|
gpl-3.0
|
google-research/google-research
|
psycholab/examples/prisoners_dilemma.py
|
1
|
3208
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""prisoners dilemma grid game.
this example comes from the games introduced in paper
A Polynomial-time Nash Equilibrium Algorithm for Repeated Stochastic Games
by Enrique Munoz de Cote and Michael L. Littman
"""
import numpy as np
from psycholab import game
from psycholab import visualizer
def create_game():
"""Create the prisoners dilemma game."""
art = ['####d####',
'a A B b',
'#########'
]
item_a = game.Item(color=(0, 254, 254))
item_b = game.Item(color=(254, 254, 0))
item_d = game.Item(color=(0, 254, 254))
items = {'a': item_a, 'b': item_b, 'd': item_d}
player_a = game.Player(color=(0, 100, 254))
player_b = game.Player(color=(254, 100, 0))
players = {'A': player_a, 'B': player_b}
env = game.Game(art, items, players, tabular=True)
env.display()
env.add_reward('A_moves', {'A': -1})
env.add_reward('B_moves', {'B': -1})
env.add_reward('A_collects_a', {'A': 100})
env.add_reward('B_collects_b', {'B': 100})
env.add_reward('A_collects_d', {'A': 100})
env.add_reward('B_collects_d', {'B': 100})
env.add_terminaison('A_collects_d')
env.add_terminaison('B_collects_d')
env.add_terminaison('A_collects_a')
env.add_terminaison('B_collects_b')
# for frame-by-frame visualization:
env = visualizer.Visualizer(env, fps=2, by_episode=False)
# for fast visualization:
# env = visualizer.Visualizer(env, fps=1000, by_episode=True)
return env
def run_game(env, max_step):
"""Runs `max_step` iterations of the game `env` and print players returns."""
obs = env.reset()
# discrete_state converts observations into states
# 'obs' contains all agent x, y positions.
# 'state' is an integer representing the combination of
# all agents x, y positions.
state = env.discrete_state(obs)
transitions = []
returns = 0
episode = 0
for _ in range(max_step):
# Pick a random action for all agents:
actions = np.random.choice(range(env.num_actions), env.num_players)
# Environment step:
obs, rewards, done, info = env.step(actions)
new_state = env.discrete_state(obs)
transitions.append((state, new_state, rewards, actions, done, info))
state = new_state
# Sum rewards:
returns += rewards
if done:
# The last episode is finished:
episode += 1
print('episode', episode, 'returns', returns)
# Reset env for new episode
obs = env.reset()
# state = env.discrete_state(obs)
returns = 0
# Close visualizer:
env.finish()
if __name__ == '__main__':
game_env = create_game()
run_game(game_env, max_step=200000)
|
apache-2.0
|
eltonsantos/django
|
django/contrib/auth/backends.py
|
37
|
5066
|
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
class ModelBackend(object):
"""
Authenticates against django.contrib.auth.models.User.
"""
def authenticate(self, username=None, password=None, **kwargs):
UserModel = get_user_model()
if username is None:
username = kwargs.get(UserModel.USERNAME_FIELD)
try:
user = UserModel._default_manager.get_by_natural_key(username)
if user.check_password(password):
return user
except UserModel.DoesNotExist:
return None
def get_group_permissions(self, user_obj, obj=None):
"""
Returns a set of permission strings that this user has through his/her
groups.
"""
if user_obj.is_anonymous() or obj is not None:
return set()
if not hasattr(user_obj, '_group_perm_cache'):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
user_groups_field = get_user_model()._meta.get_field('groups')
user_groups_query = 'group__%s' % user_groups_field.related_query_name()
perms = Permission.objects.filter(**{user_groups_query: user_obj})
perms = perms.values_list('content_type__app_label', 'codename').order_by()
user_obj._group_perm_cache = set(["%s.%s" % (ct, name) for ct, name in perms])
return user_obj._group_perm_cache
def get_all_permissions(self, user_obj, obj=None):
if user_obj.is_anonymous() or obj is not None:
return set()
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = set(["%s.%s" % (p.content_type.app_label, p.codename) for p in user_obj.user_permissions.select_related()])
user_obj._perm_cache.update(self.get_group_permissions(user_obj))
return user_obj._perm_cache
def has_perm(self, user_obj, perm, obj=None):
if not user_obj.is_active:
return False
return perm in self.get_all_permissions(user_obj, obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
"""
if not user_obj.is_active:
return False
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_user(self, user_id):
try:
UserModel = get_user_model()
return UserModel._default_manager.get(pk=user_id)
except UserModel.DoesNotExist:
return None
class RemoteUserBackend(ModelBackend):
"""
This backend is to be used in conjunction with the ``RemoteUserMiddleware``
found in the middleware module of this package, and is used when the server
is handling authentication outside of Django.
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, remote_user):
"""
The username passed as ``remote_user`` is considered trusted. This
method simply returns the ``User`` object with the given username,
creating a new ``User`` object if ``create_unknown_user`` is ``True``.
Returns None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not remote_user:
return
user = None
username = self.clean_username(remote_user)
UserModel = get_user_model()
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = UserModel.objects.get_or_create(**{
UserModel.USERNAME_FIELD: username
})
if created:
user = self.configure_user(user)
else:
try:
user = UserModel.objects.get_by_natural_key(username)
except UserModel.DoesNotExist:
pass
return user
def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
return username
def configure_user(self, user):
"""
Configures a user after creation and returns the updated user.
By default, returns the user unmodified.
"""
return user
|
bsd-3-clause
|
mith1979/ansible_automation
|
applied_python/applied_python/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/_collections.py
|
172
|
6278
|
from collections import Mapping, MutableMapping
try:
from threading import RLock
except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
from .packages.six import iterkeys, itervalues
__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self.lock:
# Copy pointers to all values, then wipe the mapping
values = list(itervalues(self._container))
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self.lock:
return list(iterkeys(self._container))
class HTTPHeaderDict(MutableMapping):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
If you want to access the raw headers with their original casing
for debugging purposes you can access the private ``._data`` attribute
which is a normal python ``dict`` that maps the case-insensitive key to a
list of tuples stored as (case-sensitive-original-name, value). Using the
structure from above as our example:
>>> headers._data
{'set-cookie': [('Set-Cookie', 'foo=bar'), ('set-cookie', 'baz=quxx')],
'content-length': [('content-length', '7')]}
"""
def __init__(self, headers=None, **kwargs):
self._data = {}
if headers is None:
headers = {}
self.update(headers, **kwargs)
def add(self, key, value):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
self._data.setdefault(key.lower(), []).append((key, value))
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
return self[key].split(', ') if key in self else []
def copy(self):
h = HTTPHeaderDict()
for key in self._data:
for rawkey, value in self._data[key]:
h.add(rawkey, value)
return h
def __eq__(self, other):
if not isinstance(other, Mapping):
return False
other = HTTPHeaderDict(other)
return dict((k1, self[k1]) for k1 in self._data) == \
dict((k2, other[k2]) for k2 in other._data)
def __getitem__(self, key):
values = self._data[key.lower()]
return ', '.join(value[1] for value in values)
def __setitem__(self, key, value):
self._data[key.lower()] = [(key, value)]
def __delitem__(self, key):
del self._data[key.lower()]
def __len__(self):
return len(self._data)
def __iter__(self):
for headers in itervalues(self._data):
yield headers[0][0]
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
|
apache-2.0
|
pbmanis/acq4
|
acq4/pyqtgraph/reload.py
|
3
|
19191
|
# -*- coding: utf-8 -*-
"""
Magic Reload Library
Luke Campagnola 2010
Python reload function that actually works (the way you expect it to)
- No re-importing necessary
- Modules can be reloaded in any order
- Replaces functions and methods with their updated code
- Changes instances to use updated classes
- Automatically decides which modules to update by comparing file modification times
Does NOT:
- re-initialize exting instances, even if __init__ changes
- update references to any module-level objects
ie, this does not reload correctly:
from module import someObject
print someObject
..but you can use this instead: (this works even for the builtin reload)
import module
print module.someObject
"""
from __future__ import print_function
import inspect, os, sys, gc, traceback, types
from .debug import printExc
try:
from importlib import reload as orig_reload
except ImportError:
orig_reload = reload
py3 = sys.version_info >= (3,)
def reloadAll(prefix=None, debug=False):
"""Automatically reload everything whose __file__ begins with prefix.
- Skips reload if the file has not been updated (if .pyc is newer than .py)
- if prefix is None, checks all loaded modules
"""
failed = []
changed = []
for modName, mod in list(sys.modules.items()): ## don't use iteritems; size may change during reload
if not inspect.ismodule(mod):
continue
if modName == '__main__':
continue
## Ignore if the file name does not start with prefix
if not hasattr(mod, '__file__') or os.path.splitext(mod.__file__)[1] not in ['.py', '.pyc']:
continue
if prefix is not None and mod.__file__[:len(prefix)] != prefix:
continue
## ignore if the .pyc is newer than the .py (or if there is no pyc or py)
py = os.path.splitext(mod.__file__)[0] + '.py'
if not os.path.isfile(py):
# skip modules that lie about their __file__
continue
pyc = py + 'c'
if py not in changed and os.path.isfile(pyc) and os.stat(pyc).st_mtime >= os.stat(py).st_mtime:
#if debug:
#print "Ignoring module %s; unchanged" % str(mod)
continue
changed.append(py) ## keep track of which modules have changed to insure that duplicate-import modules get reloaded.
try:
reload(mod, debug=debug)
except:
printExc("Error while reloading module %s, skipping\n" % mod)
failed.append(mod.__name__)
if len(failed) > 0:
raise Exception("Some modules failed to reload: %s" % ', '.join(failed))
def reload(module, debug=False, lists=False, dicts=False):
"""Replacement for the builtin reload function:
- Reloads the module as usual
- Updates all old functions and class methods to use the new code
- Updates all instances of each modified class to use the new class
- Can update lists and dicts, but this is disabled by default
- Requires that class and function names have not changed
"""
if debug:
print("Reloading %s" % str(module))
## make a copy of the old module dictionary, reload, then grab the new module dictionary for comparison
oldDict = module.__dict__.copy()
orig_reload(module)
newDict = module.__dict__
## Allow modules access to the old dictionary after they reload
if hasattr(module, '__reload__'):
module.__reload__(oldDict)
## compare old and new elements from each dict; update where appropriate
for k in oldDict:
old = oldDict[k]
new = newDict.get(k, None)
if old is new or new is None:
continue
if inspect.isclass(old):
if debug:
print(" Updating class %s.%s (0x%x -> 0x%x)" % (module.__name__, k, id(old), id(new)))
updateClass(old, new, debug)
# don't put this inside updateClass because it is reentrant.
new.__previous_reload_version__ = old
elif inspect.isfunction(old):
depth = updateFunction(old, new, debug)
if debug:
extra = ""
if depth > 0:
extra = " (and %d previous versions)" % depth
print(" Updating function %s.%s%s" % (module.__name__, k, extra))
elif lists and isinstance(old, list):
l = old.len()
old.extend(new)
for i in range(l):
old.pop(0)
elif dicts and isinstance(old, dict):
old.update(new)
for k in old:
if k not in new:
del old[k]
## For functions:
## 1) update the code and defaults to new versions.
## 2) keep a reference to the previous version so ALL versions get updated for every reload
def updateFunction(old, new, debug, depth=0, visited=None):
#if debug and depth > 0:
#print " -> also updating previous version", old, " -> ", new
old.__code__ = new.__code__
old.__defaults__ = new.__defaults__
if hasattr(old, '__kwdefaults'):
old.__kwdefaults__ = new.__kwdefaults__
old.__doc__ = new.__doc__
if visited is None:
visited = []
if old in visited:
return
visited.append(old)
## finally, update any previous versions still hanging around..
if hasattr(old, '__previous_reload_version__'):
maxDepth = updateFunction(old.__previous_reload_version__, new, debug, depth=depth+1, visited=visited)
else:
maxDepth = depth
## We need to keep a pointer to the previous version so we remember to update BOTH
## when the next reload comes around.
if depth == 0:
new.__previous_reload_version__ = old
return maxDepth
## For classes:
## 1) find all instances of the old class and set instance.__class__ to the new class
## 2) update all old class methods to use code from the new class methods
def updateClass(old, new, debug):
## Track town all instances and subclasses of old
refs = gc.get_referrers(old)
for ref in refs:
try:
if isinstance(ref, old) and ref.__class__ is old:
ref.__class__ = new
if debug:
print(" Changed class for %s" % safeStr(ref))
elif inspect.isclass(ref) and issubclass(ref, old) and old in ref.__bases__:
ind = ref.__bases__.index(old)
## Does not work:
#ref.__bases__ = ref.__bases__[:ind] + (new,) + ref.__bases__[ind+1:]
## reason: Even though we change the code on methods, they remain bound
## to their old classes (changing im_class is not allowed). Instead,
## we have to update the __bases__ such that this class will be allowed
## as an argument to older methods.
## This seems to work. Is there any reason not to?
## Note that every time we reload, the class hierarchy becomes more complex.
## (and I presume this may slow things down?)
newBases = ref.__bases__[:ind] + (new,old) + ref.__bases__[ind+1:]
try:
ref.__bases__ = newBases
except TypeError:
print(" Error setting bases for class %s" % ref)
print(" old bases: %s" % repr(ref.__bases__))
print(" new bases: %s" % repr(newBases))
raise
if debug:
print(" Changed superclass for %s" % safeStr(ref))
#else:
#if debug:
#print " Ignoring reference", type(ref)
except Exception:
print("Error updating reference (%s) for class change (%s -> %s)" % (safeStr(ref), safeStr(old), safeStr(new)))
raise
## update all class methods to use new code.
## Generally this is not needed since instances already know about the new class,
## but it fixes a few specific cases (pyqt signals, for one)
for attr in dir(old):
oa = getattr(old, attr)
if (py3 and inspect.isfunction(oa)) or inspect.ismethod(oa):
# note python2 has unbound methods, whereas python3 just uses plain functions
try:
na = getattr(new, attr)
except AttributeError:
if debug:
print(" Skipping method update for %s; new class does not have this attribute" % attr)
continue
ofunc = getattr(oa, '__func__', oa) # in py2 we have to get the __func__ from unbound method,
nfunc = getattr(na, '__func__', na) # in py3 the attribute IS the function
if ofunc is not nfunc:
depth = updateFunction(ofunc, nfunc, debug)
if not hasattr(nfunc, '__previous_reload_method__'):
nfunc.__previous_reload_method__ = oa # important for managing signal connection
#oa.__class__ = new ## bind old method to new class ## not allowed
if debug:
extra = ""
if depth > 0:
extra = " (and %d previous versions)" % depth
print(" Updating method %s%s" % (attr, extra))
## And copy in new functions that didn't exist previously
for attr in dir(new):
if attr == '__previous_reload_version__':
continue
if not hasattr(old, attr):
if debug:
print(" Adding missing attribute %s" % attr)
setattr(old, attr, getattr(new, attr))
## finally, update any previous versions still hanging around..
if hasattr(old, '__previous_reload_version__'):
updateClass(old.__previous_reload_version__, new, debug)
## It is possible to build classes for which str(obj) just causes an exception.
## Avoid thusly:
def safeStr(obj):
try:
s = str(obj)
except Exception:
try:
s = repr(obj)
except Exception:
s = "<instance of %s at 0x%x>" % (safeStr(type(obj)), id(obj))
return s
def getPreviousVersion(obj):
"""Return the previous version of *obj*, or None if this object has not
been reloaded.
"""
if isinstance(obj, type) or inspect.isfunction(obj):
return getattr(obj, '__previous_reload_version__', None)
elif inspect.ismethod(obj):
if obj.__self__ is None:
# unbound method
return getattr(obj.__func__, '__previous_reload_method__', None)
else:
oldmethod = getattr(obj.__func__, '__previous_reload_method__', None)
if oldmethod is None:
return None
self = obj.__self__
oldfunc = getattr(oldmethod, '__func__', oldmethod)
if hasattr(oldmethod, 'im_class'):
# python 2
cls = oldmethod.im_class
return types.MethodType(oldfunc, self, cls)
else:
# python 3
return types.MethodType(oldfunc, self)
## Tests:
# write modules to disk, import, then re-write and run again
if __name__ == '__main__':
doQtTest = True
try:
from PyQt4 import QtCore
if not hasattr(QtCore, 'Signal'):
QtCore.Signal = QtCore.pyqtSignal
#app = QtGui.QApplication([])
class Btn(QtCore.QObject):
sig = QtCore.Signal()
def emit(self):
self.sig.emit()
btn = Btn()
except:
raise
print("Error; skipping Qt tests")
doQtTest = False
import os
if not os.path.isdir('test1'):
os.mkdir('test1')
open('test1/__init__.py', 'w')
modFile1 = "test1/test1.py"
modCode1 = """
import sys
class A(object):
def __init__(self, msg):
object.__init__(self)
self.msg = msg
def fn(self, pfx = ""):
print(pfx+"A class: %%s %%s" %% (str(self.__class__), str(id(self.__class__))))
print(pfx+" %%s: %d" %% self.msg)
class B(A):
def fn(self, pfx=""):
print(pfx+"B class:", self.__class__, id(self.__class__))
print(pfx+" %%s: %d" %% self.msg)
print(pfx+" calling superclass.. (%%s)" %% id(A) )
A.fn(self, " ")
"""
modFile2 = "test2.py"
modCode2 = """
from test1.test1 import A
from test1.test1 import B
a1 = A("ax1")
b1 = B("bx1")
class C(A):
def __init__(self, msg):
#print "| C init:"
#print "| C.__bases__ = ", map(id, C.__bases__)
#print "| A:", id(A)
#print "| A.__init__ = ", id(A.__init__.im_func), id(A.__init__.im_func.__code__), id(A.__init__.im_class)
A.__init__(self, msg + "(init from C)")
def fn():
print("fn: %s")
"""
open(modFile1, 'w').write(modCode1%(1,1))
open(modFile2, 'w').write(modCode2%"message 1")
import test1.test1 as test1
import test2
print("Test 1 originals:")
A1 = test1.A
B1 = test1.B
a1 = test1.A("a1")
b1 = test1.B("b1")
a1.fn()
b1.fn()
#print "function IDs a1 bound method: %d a1 func: %d a1 class: %d b1 func: %d b1 class: %d" % (id(a1.fn), id(a1.fn.im_func), id(a1.fn.im_class), id(b1.fn.im_func), id(b1.fn.im_class))
from test2 import fn, C
if doQtTest:
print("Button test before:")
btn.sig.connect(fn)
btn.sig.connect(a1.fn)
btn.emit()
#btn.sig.emit()
print("")
#print "a1.fn referrers:", sys.getrefcount(a1.fn.im_func), gc.get_referrers(a1.fn.im_func)
print("Test2 before reload:")
fn()
oldfn = fn
test2.a1.fn()
test2.b1.fn()
c1 = test2.C('c1')
c1.fn()
os.remove(modFile1+'c')
open(modFile1, 'w').write(modCode1%(2,2))
print("\n----RELOAD test1-----\n")
reloadAll(os.path.abspath(__file__)[:10], debug=True)
print("Subclass test:")
c2 = test2.C('c2')
c2.fn()
os.remove(modFile2+'c')
open(modFile2, 'w').write(modCode2%"message 2")
print("\n----RELOAD test2-----\n")
reloadAll(os.path.abspath(__file__)[:10], debug=True)
if doQtTest:
print("Button test after:")
btn.emit()
#btn.sig.emit()
#print "a1.fn referrers:", sys.getrefcount(a1.fn.im_func), gc.get_referrers(a1.fn.im_func)
print("Test2 after reload:")
fn()
test2.a1.fn()
test2.b1.fn()
print("\n==> Test 1 Old instances:")
a1.fn()
b1.fn()
c1.fn()
#print "function IDs a1 bound method: %d a1 func: %d a1 class: %d b1 func: %d b1 class: %d" % (id(a1.fn), id(a1.fn.im_func), id(a1.fn.im_class), id(b1.fn.im_func), id(b1.fn.im_class))
print("\n==> Test 1 New instances:")
a2 = test1.A("a2")
b2 = test1.B("b2")
a2.fn()
b2.fn()
c2 = test2.C('c2')
c2.fn()
#print "function IDs a1 bound method: %d a1 func: %d a1 class: %d b1 func: %d b1 class: %d" % (id(a1.fn), id(a1.fn.im_func), id(a1.fn.im_class), id(b1.fn.im_func), id(b1.fn.im_class))
os.remove(modFile1+'c')
os.remove(modFile2+'c')
open(modFile1, 'w').write(modCode1%(3,3))
open(modFile2, 'w').write(modCode2%"message 3")
print("\n----RELOAD-----\n")
reloadAll(os.path.abspath(__file__)[:10], debug=True)
if doQtTest:
print("Button test after:")
btn.emit()
#btn.sig.emit()
#print "a1.fn referrers:", sys.getrefcount(a1.fn.im_func), gc.get_referrers(a1.fn.im_func)
print("Test2 after reload:")
fn()
test2.a1.fn()
test2.b1.fn()
print("\n==> Test 1 Old instances:")
a1.fn()
b1.fn()
print("function IDs a1 bound method: %d a1 func: %d a1 class: %d b1 func: %d b1 class: %d" % (id(a1.fn), id(a1.fn.__func__), id(a1.fn.__self__.__class__), id(b1.fn.__func__), id(b1.fn.__self__.__class__)))
print("\n==> Test 1 New instances:")
a2 = test1.A("a2")
b2 = test1.B("b2")
a2.fn()
b2.fn()
print("function IDs a1 bound method: %d a1 func: %d a1 class: %d b1 func: %d b1 class: %d" % (id(a1.fn), id(a1.fn.__func__), id(a1.fn.__self__.__class__), id(b1.fn.__func__), id(b1.fn.__self__.__class__)))
os.remove(modFile1)
os.remove(modFile2)
os.remove(modFile1+'c')
os.remove(modFile2+'c')
os.system('rm -r test1')
#
# Failure graveyard ahead:
#
"""Reload Importer:
Hooks into import system to
1) keep a record of module dependencies as they are imported
2) make sure modules are always reloaded in correct order
3) update old classes and functions to use reloaded code"""
#import imp, sys
## python's import hook mechanism doesn't work since we need to be
## informed every time there is an import statement, not just for new imports
#class ReloadImporter:
#def __init__(self):
#self.depth = 0
#def find_module(self, name, path):
#print " "*self.depth + "find: ", name, path
##if name == 'PyQt4' and path is None:
##print "PyQt4 -> PySide"
##self.modData = imp.find_module('PySide')
##return self
##return None ## return none to allow the import to proceed normally; return self to intercept with load_module
#self.modData = imp.find_module(name, path)
#self.depth += 1
##sys.path_importer_cache = {}
#return self
#def load_module(self, name):
#mod = imp.load_module(name, *self.modData)
#self.depth -= 1
#print " "*self.depth + "load: ", name
#return mod
#def pathHook(path):
#print "path hook:", path
#raise ImportError
#sys.path_hooks.append(pathHook)
#sys.meta_path.append(ReloadImporter())
### replace __import__ with a wrapper that tracks module dependencies
#modDeps = {}
#reloadModule = None
#origImport = __builtins__.__import__
#def _import(name, globals=None, locals=None, fromlist=None, level=-1, stack=[]):
### Note that stack behaves as a static variable.
##print " "*len(importStack) + "import %s" % args[0]
#stack.append(set())
#mod = origImport(name, globals, locals, fromlist, level)
#deps = stack.pop()
#if len(stack) > 0:
#stack[-1].add(mod)
#elif reloadModule is not None: ## If this is the top level import AND we're inside a module reload
#modDeps[reloadModule].add(mod)
#if mod in modDeps:
#modDeps[mod] |= deps
#else:
#modDeps[mod] = deps
#return mod
#__builtins__.__import__ = _import
### replace
#origReload = __builtins__.reload
#def _reload(mod):
#reloadModule = mod
#ret = origReload(mod)
#reloadModule = None
#return ret
#__builtins__.reload = _reload
#def reload(mod, visited=None):
#if visited is None:
#visited = set()
#if mod in visited:
#return
#visited.add(mod)
#for dep in modDeps.get(mod, []):
#reload(dep, visited)
#__builtins__.reload(mod)
|
mit
|
chamikaramj/beam
|
sdks/python/apache_beam/utils/pipeline_options_validator_test.py
|
9
|
11571
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the pipeline options validator module."""
import logging
import unittest
from apache_beam.internal import pickler
from apache_beam.utils.pipeline_options import PipelineOptions
from apache_beam.utils.pipeline_options_validator import PipelineOptionsValidator
from hamcrest.core.base_matcher import BaseMatcher
# Mock runners to use for validations.
class MockRunners(object):
class DataflowRunner(object):
pass
class TestDataflowRunner(object):
pass
class OtherRunner(object):
pass
# Matcher that always passes for testing on_success_matcher option
class AlwaysPassMatcher(BaseMatcher):
def _matches(self, item):
return True
class SetupTest(unittest.TestCase):
def check_errors_for_arguments(self, errors, args):
"""Checks that there is exactly one error for each given argument."""
missing = []
remaining = list(errors)
for arg in args:
found = False
for error in remaining:
if arg in error:
remaining.remove(error)
found = True
break
if not found:
missing.append('Missing error for: ' + arg)
# Return missing and remaining (not matched) errors.
return missing + remaining
def test_local_runner(self):
runner = MockRunners.OtherRunner()
options = PipelineOptions([])
validator = PipelineOptionsValidator(options, runner)
errors = validator.validate()
self.assertEqual(len(errors), 0)
def test_missing_required_options(self):
options = PipelineOptions([''])
runner = MockRunners.DataflowRunner()
validator = PipelineOptionsValidator(options, runner)
errors = validator.validate()
self.assertEqual(
self.check_errors_for_arguments(
errors,
['project', 'staging_location', 'temp_location']),
[])
def test_gcs_path(self):
def get_validator(temp_location, staging_location):
options = ['--project=example:example', '--job_name=job']
if temp_location is not None:
options.append('--temp_location=' + temp_location)
if staging_location is not None:
options.append('--staging_location=' + staging_location)
pipeline_options = PipelineOptions(options)
runner = MockRunners.DataflowRunner()
validator = PipelineOptionsValidator(pipeline_options, runner)
return validator
test_cases = [
{'temp_location': None,
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': None,
'staging_location': None,
'errors': ['staging_location', 'temp_location']},
{'temp_location': 'gs://foo/bar',
'staging_location': None,
'errors': []},
{'temp_location': 'gs://foo/bar',
'staging_location': 'gs://ABC/bar',
'errors': ['staging_location']},
{'temp_location': 'gcs:/foo/bar',
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': 'gs:/foo/bar',
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': 'gs://ABC/bar',
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': 'gs://ABC/bar',
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': 'gs://foo',
'staging_location': 'gs://foo/bar',
'errors': ['temp_location']},
{'temp_location': 'gs://foo/',
'staging_location': 'gs://foo/bar',
'errors': []},
{'temp_location': 'gs://foo/bar',
'staging_location': 'gs://foo/bar',
'errors': []},
]
for case in test_cases:
errors = get_validator(case['temp_location'],
case['staging_location']).validate()
self.assertEqual(
self.check_errors_for_arguments(errors, case['errors']), [])
def test_project(self):
def get_validator(project):
options = ['--job_name=job', '--staging_location=gs://foo/bar',
'--temp_location=gs://foo/bar']
if project is not None:
options.append('--project=' + project)
pipeline_options = PipelineOptions(options)
runner = MockRunners.DataflowRunner()
validator = PipelineOptionsValidator(pipeline_options, runner)
return validator
test_cases = [
{'project': None, 'errors': ['project']},
{'project': '12345', 'errors': ['project']},
{'project': 'FOO', 'errors': ['project']},
{'project': 'foo:BAR', 'errors': ['project']},
{'project': 'fo', 'errors': ['project']},
{'project': 'foo', 'errors': []},
{'project': 'foo:bar', 'errors': []},
]
for case in test_cases:
errors = get_validator(case['project']).validate()
self.assertEqual(
self.check_errors_for_arguments(errors, case['errors']), [])
def test_job_name(self):
def get_validator(job_name):
options = ['--project=example:example', '--staging_location=gs://foo/bar',
'--temp_location=gs://foo/bar']
if job_name is not None:
options.append('--job_name=' + job_name)
pipeline_options = PipelineOptions(options)
runner = MockRunners.DataflowRunner()
validator = PipelineOptionsValidator(pipeline_options, runner)
return validator
test_cases = [
{'job_name': None, 'errors': []},
{'job_name': '12345', 'errors': ['job_name']},
{'job_name': 'FOO', 'errors': ['job_name']},
{'job_name': 'foo:bar', 'errors': ['job_name']},
{'job_name': 'fo', 'errors': []},
{'job_name': 'foo', 'errors': []},
]
for case in test_cases:
errors = get_validator(case['job_name']).validate()
self.assertEqual(
self.check_errors_for_arguments(errors, case['errors']), [])
def test_num_workers(self):
def get_validator(num_workers):
options = ['--project=example:example', '--job_name=job',
'--staging_location=gs://foo/bar',
'--temp_location=gs://foo/bar']
if num_workers is not None:
options.append('--num_workers=' + num_workers)
pipeline_options = PipelineOptions(options)
runner = MockRunners.DataflowRunner()
validator = PipelineOptionsValidator(pipeline_options, runner)
return validator
test_cases = [
{'num_workers': None, 'errors': []},
{'num_workers': '1', 'errors': []},
{'num_workers': '0', 'errors': ['num_workers']},
{'num_workers': '-1', 'errors': ['num_workers']},
]
for case in test_cases:
errors = get_validator(case['num_workers']).validate()
self.assertEqual(
self.check_errors_for_arguments(errors, case['errors']), [])
def test_is_service_runner(self):
test_cases = [
{
'runner': MockRunners.OtherRunner(),
'options': [],
'expected': False,
},
{
'runner': MockRunners.OtherRunner(),
'options': ['--dataflow_endpoint=https://dataflow.googleapis.com'],
'expected': False,
},
{
'runner': MockRunners.OtherRunner(),
'options': ['--dataflow_endpoint=https://dataflow.googleapis.com/'],
'expected': False,
},
{
'runner': MockRunners.DataflowRunner(),
'options': ['--dataflow_endpoint=https://another.service.com'],
'expected': False,
},
{
'runner': MockRunners.DataflowRunner(),
'options': ['--dataflow_endpoint=https://another.service.com/'],
'expected': False,
},
{
'runner': MockRunners.DataflowRunner(),
'options': ['--dataflow_endpoint=https://dataflow.googleapis.com'],
'expected': True,
},
{
'runner': MockRunners.DataflowRunner(),
'options': ['--dataflow_endpoint=https://dataflow.googleapis.com/'],
'expected': True,
},
{
'runner': MockRunners.DataflowRunner(),
'options': [],
'expected': True,
},
]
for case in test_cases:
validator = PipelineOptionsValidator(
PipelineOptions(case['options']), case['runner'])
self.assertEqual(validator.is_service_runner(), case['expected'])
def test_dataflow_job_file_and_template_location_mutually_exclusive(self):
runner = MockRunners.OtherRunner()
options = PipelineOptions([
'--template_location', 'abc',
'--dataflow_job_file', 'def'
])
validator = PipelineOptionsValidator(options, runner)
errors = validator.validate()
self.assertTrue(errors)
def test_validate_template_location(self):
runner = MockRunners.OtherRunner()
options = PipelineOptions([
'--template_location', 'abc',
])
validator = PipelineOptionsValidator(options, runner)
errors = validator.validate()
self.assertFalse(errors)
def test_validate_dataflow_job_file(self):
runner = MockRunners.OtherRunner()
options = PipelineOptions([
'--dataflow_job_file', 'abc'
])
validator = PipelineOptionsValidator(options, runner)
errors = validator.validate()
self.assertFalse(errors)
def test_streaming(self):
pipeline_options = PipelineOptions(['--streaming'])
runner = MockRunners.TestDataflowRunner()
validator = PipelineOptionsValidator(pipeline_options, runner)
errors = validator.validate()
self.assertIn('Streaming pipelines are not supported.', errors)
def test_test_matcher(self):
def get_validator(matcher):
options = ['--project=example:example',
'--job_name=job',
'--staging_location=gs://foo/bar',
'--temp_location=gs://foo/bar',]
if matcher:
options.append('--on_success_matcher=' + matcher)
pipeline_options = PipelineOptions(options)
runner = MockRunners.TestDataflowRunner()
return PipelineOptionsValidator(pipeline_options, runner)
test_case = [
{'on_success_matcher': None,
'errors': []},
{'on_success_matcher': pickler.dumps(AlwaysPassMatcher()),
'errors': []},
{'on_success_matcher': 'abc',
'errors': ['on_success_matcher']},
{'on_success_matcher': pickler.dumps(object),
'errors': ['on_success_matcher']},
]
for case in test_case:
errors = get_validator(case['on_success_matcher']).validate()
self.assertEqual(
self.check_errors_for_arguments(errors, case['errors']), [])
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
apache-2.0
|
Aristocles/CouchPotatoServer
|
libs/requests/packages/chardet/escprober.py
|
2936
|
3187
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
|
gpl-3.0
|
javier3407/Plugin.Video.eljavihay
|
resources/lib/chardet/mbcssm.py
|
982
|
19608
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
# BIG5
BIG5_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17
)
Big5CharLenTable = (0, 1, 1, 2, 0)
Big5SMModel = {'classTable': BIG5_cls,
'classFactor': 5,
'stateTable': BIG5_st,
'charLenTable': Big5CharLenTable,
'name': 'Big5'}
# CP949
CP949_cls = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_st = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
eError,eStart, 3,eError,eStart,eStart, 4, 5,eError, 6, # eStart
eError,eError,eError,eError,eError,eError,eError,eError,eError,eError, # eError
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe, # eItsMe
eError,eError,eStart,eStart,eError,eError,eError,eStart,eStart,eStart, # 3
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 4
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 5
eError,eStart,eStart,eStart,eStart,eError,eError,eStart,eStart,eStart, # 6
)
CP949CharLenTable = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949SMModel = {'classTable': CP949_cls,
'classFactor': 10,
'stateTable': CP949_st,
'charLenTable': CP949CharLenTable,
'name': 'CP949'}
# EUC-JP
EUCJP_cls = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_st = (
3, 4, 3, 5,eStart,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17
eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f
3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27
)
EUCJPCharLenTable = (2, 2, 2, 3, 1, 0)
EUCJPSMModel = {'classTable': EUCJP_cls,
'classFactor': 6,
'stateTable': EUCJP_st,
'charLenTable': EUCJPCharLenTable,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_st = (
eError,eStart, 3,eError,eError,eError,eError,eError,#00-07
eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f
)
EUCKRCharLenTable = (0, 1, 2, 0)
EUCKRSMModel = {'classTable': EUCKR_cls,
'classFactor': 4,
'stateTable': EUCKR_st,
'charLenTable': EUCKRCharLenTable,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_cls = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_st = (
eError,eError,eStart, 3, 3, 3, 4,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17
eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f
5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27
eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3)
EUCTWSMModel = {'classTable': EUCTW_cls,
'classFactor': 7,
'stateTable': EUCTW_st,
'charLenTable': EUCTWCharLenTable,
'name': 'x-euc-tw'}
# GB2312
GB2312_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_st = (
eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17
4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f
eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validing
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2)
GB2312SMModel = {'classTable': GB2312_cls,
'classFactor': 7,
'stateTable': GB2312_st,
'charLenTable': GB2312CharLenTable,
'name': 'GB2312'}
# Shift_JIS
SJIS_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,3,3,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
4,4,4,4,4,4,4,4, # f0 - f7
4,4,4,4,4,0,0,0 # f8 - ff
)
SJIS_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17
)
SJISCharLenTable = (0, 1, 1, 2, 0, 0)
SJISSMModel = {'classTable': SJIS_cls,
'classFactor': 6,
'stateTable': SJIS_st,
'charLenTable': SJISCharLenTable,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_st = (
5, 7, 7,eError, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17
6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,eError,#20-27
5, 8, 6, 6,eError, 6, 6, 6,#28-2f
6, 6, 6, 6,eError,eError,eStart,eStart #30-37
)
UCS2BECharLenTable = (2, 2, 2, 0, 2, 2)
UCS2BESMModel = {'classTable': UCS2BE_cls,
'classFactor': 6,
'stateTable': UCS2BE_st,
'charLenTable': UCS2BECharLenTable,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_st = (
6, 6, 7, 6, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17
5, 5, 5,eError, 5,eError, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,eError,#20-27
5, 5, 5,eError,eError,eError, 5, 5,#28-2f
5, 5, 5,eError, 5,eError,eStart,eStart #30-37
)
UCS2LECharLenTable = (2, 2, 2, 2, 2, 2)
UCS2LESMModel = {'classTable': UCS2LE_cls,
'classFactor': 6,
'stateTable': UCS2LE_st,
'charLenTable': UCS2LECharLenTable,
'name': 'UTF-16LE'}
# UTF-8
UTF8_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_st = (
eError,eStart,eError,eError,eError,eError, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
eError,eError,eError,eError,eError,eError,eError,eError,#10-17
eError,eError,eError,eError,eError,eError,eError,eError,#18-1f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f
eError,eError, 5, 5, 5, 5,eError,eError,#30-37
eError,eError,eError,eError,eError,eError,eError,eError,#38-3f
eError,eError,eError, 5, 5, 5,eError,eError,#40-47
eError,eError,eError,eError,eError,eError,eError,eError,#48-4f
eError,eError, 7, 7, 7, 7,eError,eError,#50-57
eError,eError,eError,eError,eError,eError,eError,eError,#58-5f
eError,eError,eError,eError, 7, 7,eError,eError,#60-67
eError,eError,eError,eError,eError,eError,eError,eError,#68-6f
eError,eError, 9, 9, 9, 9,eError,eError,#70-77
eError,eError,eError,eError,eError,eError,eError,eError,#78-7f
eError,eError,eError,eError,eError, 9,eError,eError,#80-87
eError,eError,eError,eError,eError,eError,eError,eError,#88-8f
eError,eError, 12, 12, 12, 12,eError,eError,#90-97
eError,eError,eError,eError,eError,eError,eError,eError,#98-9f
eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7
eError,eError,eError,eError,eError,eError,eError,eError,#a8-af
eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7
eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf
eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7
eError,eError,eError,eError,eError,eError,eError,eError #c8-cf
)
UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8SMModel = {'classTable': UTF8_cls,
'classFactor': 16,
'stateTable': UTF8_st,
'charLenTable': UTF8CharLenTable,
'name': 'UTF-8'}
# flake8: noqa
|
gpl-3.0
|
servo/servo
|
tests/wpt/web-platform-tests/tools/third_party/pytest/src/_pytest/junitxml.py
|
11
|
25760
|
"""Report test results in JUnit-XML format, for use with Jenkins and build
integration servers.
Based on initial code from Ross Lawley.
Output conforms to
https://github.com/jenkinsci/xunit-plugin/blob/master/src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
"""
import functools
import os
import platform
import re
import xml.etree.ElementTree as ET
from datetime import datetime
from typing import Callable
from typing import Dict
from typing import List
from typing import Match
from typing import Optional
from typing import Tuple
from typing import Union
import pytest
from _pytest import nodes
from _pytest import timing
from _pytest._code.code import ExceptionRepr
from _pytest._code.code import ReprFileLocation
from _pytest.config import Config
from _pytest.config import filename_arg
from _pytest.config.argparsing import Parser
from _pytest.fixtures import FixtureRequest
from _pytest.reports import TestReport
from _pytest.store import StoreKey
from _pytest.terminal import TerminalReporter
xml_key = StoreKey["LogXML"]()
def bin_xml_escape(arg: object) -> str:
r"""Visually escape invalid XML characters.
For example, transforms
'hello\aworld\b'
into
'hello#x07world#x08'
Note that the #xABs are *not* XML escapes - missing the ampersand «.
The idea is to escape visually for the user rather than for XML itself.
"""
def repl(matchobj: Match[str]) -> str:
i = ord(matchobj.group())
if i <= 0xFF:
return "#x%02X" % i
else:
return "#x%04X" % i
# The spec range of valid chars is:
# Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
# For an unknown(?) reason, we disallow #x7F (DEL) as well.
illegal_xml_re = (
"[^\u0009\u000A\u000D\u0020-\u007E\u0080-\uD7FF\uE000-\uFFFD\u10000-\u10FFFF]"
)
return re.sub(illegal_xml_re, repl, str(arg))
def merge_family(left, right) -> None:
result = {}
for kl, vl in left.items():
for kr, vr in right.items():
if not isinstance(vl, list):
raise TypeError(type(vl))
result[kl] = vl + vr
left.update(result)
families = {}
families["_base"] = {"testcase": ["classname", "name"]}
families["_base_legacy"] = {"testcase": ["file", "line", "url"]}
# xUnit 1.x inherits legacy attributes.
families["xunit1"] = families["_base"].copy()
merge_family(families["xunit1"], families["_base_legacy"])
# xUnit 2.x uses strict base attributes.
families["xunit2"] = families["_base"]
class _NodeReporter:
def __init__(self, nodeid: Union[str, TestReport], xml: "LogXML") -> None:
self.id = nodeid
self.xml = xml
self.add_stats = self.xml.add_stats
self.family = self.xml.family
self.duration = 0
self.properties = [] # type: List[Tuple[str, str]]
self.nodes = [] # type: List[ET.Element]
self.attrs = {} # type: Dict[str, str]
def append(self, node: ET.Element) -> None:
self.xml.add_stats(node.tag)
self.nodes.append(node)
def add_property(self, name: str, value: object) -> None:
self.properties.append((str(name), bin_xml_escape(value)))
def add_attribute(self, name: str, value: object) -> None:
self.attrs[str(name)] = bin_xml_escape(value)
def make_properties_node(self) -> Optional[ET.Element]:
"""Return a Junit node containing custom properties, if any."""
if self.properties:
properties = ET.Element("properties")
for name, value in self.properties:
properties.append(ET.Element("property", name=name, value=value))
return properties
return None
def record_testreport(self, testreport: TestReport) -> None:
names = mangle_test_address(testreport.nodeid)
existing_attrs = self.attrs
classnames = names[:-1]
if self.xml.prefix:
classnames.insert(0, self.xml.prefix)
attrs = {
"classname": ".".join(classnames),
"name": bin_xml_escape(names[-1]),
"file": testreport.location[0],
} # type: Dict[str, str]
if testreport.location[1] is not None:
attrs["line"] = str(testreport.location[1])
if hasattr(testreport, "url"):
attrs["url"] = testreport.url
self.attrs = attrs
self.attrs.update(existing_attrs) # Restore any user-defined attributes.
# Preserve legacy testcase behavior.
if self.family == "xunit1":
return
# Filter out attributes not permitted by this test family.
# Including custom attributes because they are not valid here.
temp_attrs = {}
for key in self.attrs.keys():
if key in families[self.family]["testcase"]:
temp_attrs[key] = self.attrs[key]
self.attrs = temp_attrs
def to_xml(self) -> ET.Element:
testcase = ET.Element("testcase", self.attrs, time="%.3f" % self.duration)
properties = self.make_properties_node()
if properties is not None:
testcase.append(properties)
testcase.extend(self.nodes)
return testcase
def _add_simple(self, tag: str, message: str, data: Optional[str] = None) -> None:
node = ET.Element(tag, message=message)
node.text = bin_xml_escape(data)
self.append(node)
def write_captured_output(self, report: TestReport) -> None:
if not self.xml.log_passing_tests and report.passed:
return
content_out = report.capstdout
content_log = report.caplog
content_err = report.capstderr
if self.xml.logging == "no":
return
content_all = ""
if self.xml.logging in ["log", "all"]:
content_all = self._prepare_content(content_log, " Captured Log ")
if self.xml.logging in ["system-out", "out-err", "all"]:
content_all += self._prepare_content(content_out, " Captured Out ")
self._write_content(report, content_all, "system-out")
content_all = ""
if self.xml.logging in ["system-err", "out-err", "all"]:
content_all += self._prepare_content(content_err, " Captured Err ")
self._write_content(report, content_all, "system-err")
content_all = ""
if content_all:
self._write_content(report, content_all, "system-out")
def _prepare_content(self, content: str, header: str) -> str:
return "\n".join([header.center(80, "-"), content, ""])
def _write_content(self, report: TestReport, content: str, jheader: str) -> None:
tag = ET.Element(jheader)
tag.text = bin_xml_escape(content)
self.append(tag)
def append_pass(self, report: TestReport) -> None:
self.add_stats("passed")
def append_failure(self, report: TestReport) -> None:
# msg = str(report.longrepr.reprtraceback.extraline)
if hasattr(report, "wasxfail"):
self._add_simple("skipped", "xfail-marked test passes unexpectedly")
else:
assert report.longrepr is not None
reprcrash = getattr(
report.longrepr, "reprcrash", None
) # type: Optional[ReprFileLocation]
if reprcrash is not None:
message = reprcrash.message
else:
message = str(report.longrepr)
message = bin_xml_escape(message)
self._add_simple("failure", message, str(report.longrepr))
def append_collect_error(self, report: TestReport) -> None:
# msg = str(report.longrepr.reprtraceback.extraline)
assert report.longrepr is not None
self._add_simple("error", "collection failure", str(report.longrepr))
def append_collect_skipped(self, report: TestReport) -> None:
self._add_simple("skipped", "collection skipped", str(report.longrepr))
def append_error(self, report: TestReport) -> None:
assert report.longrepr is not None
reprcrash = getattr(
report.longrepr, "reprcrash", None
) # type: Optional[ReprFileLocation]
if reprcrash is not None:
reason = reprcrash.message
else:
reason = str(report.longrepr)
if report.when == "teardown":
msg = 'failed on teardown with "{}"'.format(reason)
else:
msg = 'failed on setup with "{}"'.format(reason)
self._add_simple("error", msg, str(report.longrepr))
def append_skipped(self, report: TestReport) -> None:
if hasattr(report, "wasxfail"):
xfailreason = report.wasxfail
if xfailreason.startswith("reason: "):
xfailreason = xfailreason[8:]
xfailreason = bin_xml_escape(xfailreason)
skipped = ET.Element("skipped", type="pytest.xfail", message=xfailreason)
self.append(skipped)
else:
assert isinstance(report.longrepr, tuple)
filename, lineno, skipreason = report.longrepr
if skipreason.startswith("Skipped: "):
skipreason = skipreason[9:]
details = "{}:{}: {}".format(filename, lineno, skipreason)
skipped = ET.Element("skipped", type="pytest.skip", message=skipreason)
skipped.text = bin_xml_escape(details)
self.append(skipped)
self.write_captured_output(report)
def finalize(self) -> None:
data = self.to_xml()
self.__dict__.clear()
# Type ignored becuase mypy doesn't like overriding a method.
# Also the return value doesn't match...
self.to_xml = lambda: data # type: ignore[assignment]
def _warn_incompatibility_with_xunit2(
request: FixtureRequest, fixture_name: str
) -> None:
"""Emit a PytestWarning about the given fixture being incompatible with newer xunit revisions."""
from _pytest.warning_types import PytestWarning
xml = request.config._store.get(xml_key, None)
if xml is not None and xml.family not in ("xunit1", "legacy"):
request.node.warn(
PytestWarning(
"{fixture_name} is incompatible with junit_family '{family}' (use 'legacy' or 'xunit1')".format(
fixture_name=fixture_name, family=xml.family
)
)
)
@pytest.fixture
def record_property(request: FixtureRequest) -> Callable[[str, object], None]:
"""Add extra properties to the calling test.
User properties become part of the test report and are available to the
configured reporters, like JUnit XML.
The fixture is callable with ``name, value``. The value is automatically
XML-encoded.
Example::
def test_function(record_property):
record_property("example_key", 1)
"""
_warn_incompatibility_with_xunit2(request, "record_property")
def append_property(name: str, value: object) -> None:
request.node.user_properties.append((name, value))
return append_property
@pytest.fixture
def record_xml_attribute(request: FixtureRequest) -> Callable[[str, object], None]:
"""Add extra xml attributes to the tag for the calling test.
The fixture is callable with ``name, value``. The value is
automatically XML-encoded.
"""
from _pytest.warning_types import PytestExperimentalApiWarning
request.node.warn(
PytestExperimentalApiWarning("record_xml_attribute is an experimental feature")
)
_warn_incompatibility_with_xunit2(request, "record_xml_attribute")
# Declare noop
def add_attr_noop(name: str, value: object) -> None:
pass
attr_func = add_attr_noop
xml = request.config._store.get(xml_key, None)
if xml is not None:
node_reporter = xml.node_reporter(request.node.nodeid)
attr_func = node_reporter.add_attribute
return attr_func
def _check_record_param_type(param: str, v: str) -> None:
"""Used by record_testsuite_property to check that the given parameter name is of the proper
type."""
__tracebackhide__ = True
if not isinstance(v, str):
msg = "{param} parameter needs to be a string, but {g} given" # type: ignore[unreachable]
raise TypeError(msg.format(param=param, g=type(v).__name__))
@pytest.fixture(scope="session")
def record_testsuite_property(request: FixtureRequest) -> Callable[[str, object], None]:
"""Record a new ``<property>`` tag as child of the root ``<testsuite>``.
This is suitable to writing global information regarding the entire test
suite, and is compatible with ``xunit2`` JUnit family.
This is a ``session``-scoped fixture which is called with ``(name, value)``. Example:
.. code-block:: python
def test_foo(record_testsuite_property):
record_testsuite_property("ARCH", "PPC")
record_testsuite_property("STORAGE_TYPE", "CEPH")
``name`` must be a string, ``value`` will be converted to a string and properly xml-escaped.
.. warning::
Currently this fixture **does not work** with the
`pytest-xdist <https://github.com/pytest-dev/pytest-xdist>`__ plugin. See issue
`#7767 <https://github.com/pytest-dev/pytest/issues/7767>`__ for details.
"""
__tracebackhide__ = True
def record_func(name: str, value: object) -> None:
"""No-op function in case --junitxml was not passed in the command-line."""
__tracebackhide__ = True
_check_record_param_type("name", name)
xml = request.config._store.get(xml_key, None)
if xml is not None:
record_func = xml.add_global_property # noqa
return record_func
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("terminal reporting")
group.addoption(
"--junitxml",
"--junit-xml",
action="store",
dest="xmlpath",
metavar="path",
type=functools.partial(filename_arg, optname="--junitxml"),
default=None,
help="create junit-xml style report file at given path.",
)
group.addoption(
"--junitprefix",
"--junit-prefix",
action="store",
metavar="str",
default=None,
help="prepend prefix to classnames in junit-xml output",
)
parser.addini(
"junit_suite_name", "Test suite name for JUnit report", default="pytest"
)
parser.addini(
"junit_logging",
"Write captured log messages to JUnit report: "
"one of no|log|system-out|system-err|out-err|all",
default="no",
)
parser.addini(
"junit_log_passing_tests",
"Capture log information for passing tests to JUnit report: ",
type="bool",
default=True,
)
parser.addini(
"junit_duration_report",
"Duration time to report: one of total|call",
default="total",
) # choices=['total', 'call'])
parser.addini(
"junit_family",
"Emit XML for schema: one of legacy|xunit1|xunit2",
default="xunit2",
)
def pytest_configure(config: Config) -> None:
xmlpath = config.option.xmlpath
# Prevent opening xmllog on worker nodes (xdist).
if xmlpath and not hasattr(config, "workerinput"):
junit_family = config.getini("junit_family")
config._store[xml_key] = LogXML(
xmlpath,
config.option.junitprefix,
config.getini("junit_suite_name"),
config.getini("junit_logging"),
config.getini("junit_duration_report"),
junit_family,
config.getini("junit_log_passing_tests"),
)
config.pluginmanager.register(config._store[xml_key])
def pytest_unconfigure(config: Config) -> None:
xml = config._store.get(xml_key, None)
if xml:
del config._store[xml_key]
config.pluginmanager.unregister(xml)
def mangle_test_address(address: str) -> List[str]:
path, possible_open_bracket, params = address.partition("[")
names = path.split("::")
try:
names.remove("()")
except ValueError:
pass
# Convert file path to dotted path.
names[0] = names[0].replace(nodes.SEP, ".")
names[0] = re.sub(r"\.py$", "", names[0])
# Put any params back.
names[-1] += possible_open_bracket + params
return names
class LogXML:
def __init__(
self,
logfile,
prefix: Optional[str],
suite_name: str = "pytest",
logging: str = "no",
report_duration: str = "total",
family="xunit1",
log_passing_tests: bool = True,
) -> None:
logfile = os.path.expanduser(os.path.expandvars(logfile))
self.logfile = os.path.normpath(os.path.abspath(logfile))
self.prefix = prefix
self.suite_name = suite_name
self.logging = logging
self.log_passing_tests = log_passing_tests
self.report_duration = report_duration
self.family = family
self.stats = dict.fromkeys(
["error", "passed", "failure", "skipped"], 0
) # type: Dict[str, int]
self.node_reporters = (
{}
) # type: Dict[Tuple[Union[str, TestReport], object], _NodeReporter]
self.node_reporters_ordered = [] # type: List[_NodeReporter]
self.global_properties = [] # type: List[Tuple[str, str]]
# List of reports that failed on call but teardown is pending.
self.open_reports = [] # type: List[TestReport]
self.cnt_double_fail_tests = 0
# Replaces convenience family with real family.
if self.family == "legacy":
self.family = "xunit1"
def finalize(self, report: TestReport) -> None:
nodeid = getattr(report, "nodeid", report)
# Local hack to handle xdist report order.
workernode = getattr(report, "node", None)
reporter = self.node_reporters.pop((nodeid, workernode))
if reporter is not None:
reporter.finalize()
def node_reporter(self, report: Union[TestReport, str]) -> _NodeReporter:
nodeid = getattr(report, "nodeid", report) # type: Union[str, TestReport]
# Local hack to handle xdist report order.
workernode = getattr(report, "node", None)
key = nodeid, workernode
if key in self.node_reporters:
# TODO: breaks for --dist=each
return self.node_reporters[key]
reporter = _NodeReporter(nodeid, self)
self.node_reporters[key] = reporter
self.node_reporters_ordered.append(reporter)
return reporter
def add_stats(self, key: str) -> None:
if key in self.stats:
self.stats[key] += 1
def _opentestcase(self, report: TestReport) -> _NodeReporter:
reporter = self.node_reporter(report)
reporter.record_testreport(report)
return reporter
def pytest_runtest_logreport(self, report: TestReport) -> None:
"""Handle a setup/call/teardown report, generating the appropriate
XML tags as necessary.
Note: due to plugins like xdist, this hook may be called in interlaced
order with reports from other nodes. For example:
Usual call order:
-> setup node1
-> call node1
-> teardown node1
-> setup node2
-> call node2
-> teardown node2
Possible call order in xdist:
-> setup node1
-> call node1
-> setup node2
-> call node2
-> teardown node2
-> teardown node1
"""
close_report = None
if report.passed:
if report.when == "call": # ignore setup/teardown
reporter = self._opentestcase(report)
reporter.append_pass(report)
elif report.failed:
if report.when == "teardown":
# The following vars are needed when xdist plugin is used.
report_wid = getattr(report, "worker_id", None)
report_ii = getattr(report, "item_index", None)
close_report = next(
(
rep
for rep in self.open_reports
if (
rep.nodeid == report.nodeid
and getattr(rep, "item_index", None) == report_ii
and getattr(rep, "worker_id", None) == report_wid
)
),
None,
)
if close_report:
# We need to open new testcase in case we have failure in
# call and error in teardown in order to follow junit
# schema.
self.finalize(close_report)
self.cnt_double_fail_tests += 1
reporter = self._opentestcase(report)
if report.when == "call":
reporter.append_failure(report)
self.open_reports.append(report)
if not self.log_passing_tests:
reporter.write_captured_output(report)
else:
reporter.append_error(report)
elif report.skipped:
reporter = self._opentestcase(report)
reporter.append_skipped(report)
self.update_testcase_duration(report)
if report.when == "teardown":
reporter = self._opentestcase(report)
reporter.write_captured_output(report)
for propname, propvalue in report.user_properties:
reporter.add_property(propname, str(propvalue))
self.finalize(report)
report_wid = getattr(report, "worker_id", None)
report_ii = getattr(report, "item_index", None)
close_report = next(
(
rep
for rep in self.open_reports
if (
rep.nodeid == report.nodeid
and getattr(rep, "item_index", None) == report_ii
and getattr(rep, "worker_id", None) == report_wid
)
),
None,
)
if close_report:
self.open_reports.remove(close_report)
def update_testcase_duration(self, report: TestReport) -> None:
"""Accumulate total duration for nodeid from given report and update
the Junit.testcase with the new total if already created."""
if self.report_duration == "total" or report.when == self.report_duration:
reporter = self.node_reporter(report)
reporter.duration += getattr(report, "duration", 0.0)
def pytest_collectreport(self, report: TestReport) -> None:
if not report.passed:
reporter = self._opentestcase(report)
if report.failed:
reporter.append_collect_error(report)
else:
reporter.append_collect_skipped(report)
def pytest_internalerror(self, excrepr: ExceptionRepr) -> None:
reporter = self.node_reporter("internal")
reporter.attrs.update(classname="pytest", name="internal")
reporter._add_simple("error", "internal error", str(excrepr))
def pytest_sessionstart(self) -> None:
self.suite_start_time = timing.time()
def pytest_sessionfinish(self) -> None:
dirname = os.path.dirname(os.path.abspath(self.logfile))
if not os.path.isdir(dirname):
os.makedirs(dirname)
logfile = open(self.logfile, "w", encoding="utf-8")
suite_stop_time = timing.time()
suite_time_delta = suite_stop_time - self.suite_start_time
numtests = (
self.stats["passed"]
+ self.stats["failure"]
+ self.stats["skipped"]
+ self.stats["error"]
- self.cnt_double_fail_tests
)
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
suite_node = ET.Element(
"testsuite",
name=self.suite_name,
errors=str(self.stats["error"]),
failures=str(self.stats["failure"]),
skipped=str(self.stats["skipped"]),
tests=str(numtests),
time="%.3f" % suite_time_delta,
timestamp=datetime.fromtimestamp(self.suite_start_time).isoformat(),
hostname=platform.node(),
)
global_properties = self._get_global_properties_node()
if global_properties is not None:
suite_node.append(global_properties)
for node_reporter in self.node_reporters_ordered:
suite_node.append(node_reporter.to_xml())
testsuites = ET.Element("testsuites")
testsuites.append(suite_node)
logfile.write(ET.tostring(testsuites, encoding="unicode"))
logfile.close()
def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None:
terminalreporter.write_sep("-", "generated xml file: {}".format(self.logfile))
def add_global_property(self, name: str, value: object) -> None:
__tracebackhide__ = True
_check_record_param_type("name", name)
self.global_properties.append((name, bin_xml_escape(value)))
def _get_global_properties_node(self) -> Optional[ET.Element]:
"""Return a Junit node containing custom properties, if any."""
if self.global_properties:
properties = ET.Element("properties")
for name, value in self.global_properties:
properties.append(ET.Element("property", name=name, value=value))
return properties
return None
|
mpl-2.0
|
blackzw/openwrt_sdk_dev1
|
staging_dir/host/lib/python2.7/test/test__locale.py
|
111
|
5807
|
from test.test_support import run_unittest
from _locale import (setlocale, LC_NUMERIC, localeconv, Error)
try:
from _locale import (RADIXCHAR, THOUSEP, nl_langinfo)
except ImportError:
nl_langinfo = None
import unittest
import sys
from platform import uname
if uname()[0] == "Darwin":
maj, min, mic = [int(part) for part in uname()[2].split(".")]
if (maj, min, mic) < (8, 0, 0):
raise unittest.SkipTest("locale support broken for OS X < 10.4")
candidate_locales = ['es_UY', 'fr_FR', 'fi_FI', 'es_CO', 'pt_PT', 'it_IT',
'et_EE', 'es_PY', 'no_NO', 'nl_NL', 'lv_LV', 'el_GR', 'be_BY', 'fr_BE',
'ro_RO', 'ru_UA', 'ru_RU', 'es_VE', 'ca_ES', 'se_NO', 'es_EC', 'id_ID',
'ka_GE', 'es_CL', 'hu_HU', 'wa_BE', 'lt_LT', 'sl_SI', 'hr_HR', 'es_AR',
'es_ES', 'oc_FR', 'gl_ES', 'bg_BG', 'is_IS', 'mk_MK', 'de_AT', 'pt_BR',
'da_DK', 'nn_NO', 'cs_CZ', 'de_LU', 'es_BO', 'sq_AL', 'sk_SK', 'fr_CH',
'de_DE', 'sr_YU', 'br_FR', 'nl_BE', 'sv_FI', 'pl_PL', 'fr_CA', 'fo_FO',
'bs_BA', 'fr_LU', 'kl_GL', 'fa_IR', 'de_BE', 'sv_SE', 'it_CH', 'uk_UA',
'eu_ES', 'vi_VN', 'af_ZA', 'nb_NO', 'en_DK', 'tg_TJ', 'en_US',
'es_ES.ISO8859-1', 'fr_FR.ISO8859-15', 'ru_RU.KOI8-R', 'ko_KR.eucKR']
# Workaround for MSVC6(debug) crash bug
if "MSC v.1200" in sys.version:
def accept(loc):
a = loc.split(".")
return not(len(a) == 2 and len(a[-1]) >= 9)
candidate_locales = [loc for loc in candidate_locales if accept(loc)]
# List known locale values to test against when available.
# Dict formatted as ``<locale> : (<decimal_point>, <thousands_sep>)``. If a
# value is not known, use '' .
known_numerics = {'fr_FR' : (',', ''), 'en_US':('.', ',')}
class _LocaleTests(unittest.TestCase):
def setUp(self):
self.oldlocale = setlocale(LC_NUMERIC)
def tearDown(self):
setlocale(LC_NUMERIC, self.oldlocale)
# Want to know what value was calculated, what it was compared against,
# what function was used for the calculation, what type of data was used,
# the locale that was supposedly set, and the actual locale that is set.
lc_numeric_err_msg = "%s != %s (%s for %s; set to %s, using %s)"
def numeric_tester(self, calc_type, calc_value, data_type, used_locale):
"""Compare calculation against known value, if available"""
try:
set_locale = setlocale(LC_NUMERIC)
except Error:
set_locale = "<not able to determine>"
known_value = known_numerics.get(used_locale,
('', ''))[data_type == 'thousands_sep']
if known_value and calc_value:
self.assertEqual(calc_value, known_value,
self.lc_numeric_err_msg % (
calc_value, known_value,
calc_type, data_type, set_locale,
used_locale))
@unittest.skipUnless(nl_langinfo, "nl_langinfo is not available")
def test_lc_numeric_nl_langinfo(self):
# Test nl_langinfo against known values
for loc in candidate_locales:
try:
setlocale(LC_NUMERIC, loc)
except Error:
continue
for li, lc in ((RADIXCHAR, "decimal_point"),
(THOUSEP, "thousands_sep")):
self.numeric_tester('nl_langinfo', nl_langinfo(li), lc, loc)
def test_lc_numeric_localeconv(self):
# Test localeconv against known values
for loc in candidate_locales:
try:
setlocale(LC_NUMERIC, loc)
except Error:
continue
for lc in ("decimal_point", "thousands_sep"):
self.numeric_tester('localeconv', localeconv()[lc], lc, loc)
@unittest.skipUnless(nl_langinfo, "nl_langinfo is not available")
def test_lc_numeric_basic(self):
# Test nl_langinfo against localeconv
for loc in candidate_locales:
try:
setlocale(LC_NUMERIC, loc)
except Error:
continue
for li, lc in ((RADIXCHAR, "decimal_point"),
(THOUSEP, "thousands_sep")):
nl_radixchar = nl_langinfo(li)
li_radixchar = localeconv()[lc]
try:
set_locale = setlocale(LC_NUMERIC)
except Error:
set_locale = "<not able to determine>"
self.assertEqual(nl_radixchar, li_radixchar,
"%s (nl_langinfo) != %s (localeconv) "
"(set to %s, using %s)" % (
nl_radixchar, li_radixchar,
loc, set_locale))
def test_float_parsing(self):
# Bug #1391872: Test whether float parsing is okay on European
# locales.
for loc in candidate_locales:
try:
setlocale(LC_NUMERIC, loc)
except Error:
continue
# Ignore buggy locale databases. (Mac OS 10.4 and some other BSDs)
if loc == 'eu_ES' and localeconv()['decimal_point'] == "' ":
continue
self.assertEqual(int(eval('3.14') * 100), 314,
"using eval('3.14') failed for %s" % loc)
self.assertEqual(int(float('3.14') * 100), 314,
"using float('3.14') failed for %s" % loc)
if localeconv()['decimal_point'] != '.':
self.assertRaises(ValueError, float,
localeconv()['decimal_point'].join(['1', '23']))
def test_main():
run_unittest(_LocaleTests)
if __name__ == '__main__':
test_main()
|
gpl-2.0
|
zhouzhenghui/python-for-android
|
python3-alpha/extra_modules/gdata/test_data.py
|
103
|
348233
|
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
XML_ENTRY_1 = """<?xml version='1.0'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:g='http://base.google.com/ns/1.0'>
<category scheme="http://base.google.com/categories/itemtypes"
term="products"/>
<id> http://www.google.com/test/id/url </id>
<title type='text'>Testing 2000 series laptop</title>
<content type='xhtml'>
<div xmlns='http://www.w3.org/1999/xhtml'>A Testing Laptop</div>
</content>
<link rel='alternate' type='text/html'
href='http://www.provider-host.com/123456789'/>
<link rel='license'
href='http://creativecommons.org/licenses/by-nc/2.5/rdf'/>
<g:label>Computer</g:label>
<g:label>Laptop</g:label>
<g:label>testing laptop</g:label>
<g:item_type>products</g:item_type>
</entry>"""
TEST_BASE_ENTRY = """<?xml version='1.0'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:g='http://base.google.com/ns/1.0'>
<category scheme="http://base.google.com/categories/itemtypes"
term="products"/>
<title type='text'>Testing 2000 series laptop</title>
<content type='xhtml'>
<div xmlns='http://www.w3.org/1999/xhtml'>A Testing Laptop</div>
</content>
<app:control xmlns:app='http://purl.org/atom/app#'>
<app:draft>yes</app:draft>
<gm:disapproved xmlns:gm='http://base.google.com/ns-metadata/1.0'/>
</app:control>
<link rel='alternate' type='text/html'
href='http://www.provider-host.com/123456789'/>
<g:label>Computer</g:label>
<g:label>Laptop</g:label>
<g:label>testing laptop</g:label>
<g:item_type>products</g:item_type>
</entry>"""
BIG_FEED = """<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title type="text">dive into mark</title>
<subtitle type="html">
A <em>lot</em> of effort
went into making this effortless
</subtitle>
<updated>2005-07-31T12:29:29Z</updated>
<id>tag:example.org,2003:3</id>
<link rel="alternate" type="text/html"
hreflang="en" href="http://example.org/"/>
<link rel="self" type="application/atom+xml"
href="http://example.org/feed.atom"/>
<rights>Copyright (c) 2003, Mark Pilgrim</rights>
<generator uri="http://www.example.com/" version="1.0">
Example Toolkit
</generator>
<entry>
<title>Atom draft-07 snapshot</title>
<link rel="alternate" type="text/html"
href="http://example.org/2005/04/02/atom"/>
<link rel="enclosure" type="audio/mpeg" length="1337"
href="http://example.org/audio/ph34r_my_podcast.mp3"/>
<id>tag:example.org,2003:3.2397</id>
<updated>2005-07-31T12:29:29Z</updated>
<published>2003-12-13T08:29:29-04:00</published>
<author>
<name>Mark Pilgrim</name>
<uri>http://example.org/</uri>
<email>[email protected]</email>
</author>
<contributor>
<name>Sam Ruby</name>
</contributor>
<contributor>
<name>Joe Gregorio</name>
</contributor>
<content type="xhtml" xml:lang="en"
xml:base="http://diveintomark.org/">
<div xmlns="http://www.w3.org/1999/xhtml">
<p><i>[Update: The Atom draft is finished.]</i></p>
</div>
</content>
</entry>
</feed>
"""
SMALL_FEED = """<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title>Example Feed</title>
<link href="http://example.org/"/>
<updated>2003-12-13T18:30:02Z</updated>
<author>
<name>John Doe</name>
</author>
<id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id>
<entry>
<title>Atom-Powered Robots Run Amok</title>
<link href="http://example.org/2003/12/13/atom03"/>
<id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>
<updated>2003-12-13T18:30:02Z</updated>
<summary>Some text.</summary>
</entry>
</feed>
"""
GBASE_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:g='http://base.google.com/ns/1.0' xmlns:batch='http://schemas.google.com/gdata/batch'>
<id>http://www.google.com/base/feeds/snippets</id>
<updated>2007-02-08T23:18:21.935Z</updated>
<title type='text'>Items matching query: digital camera</title>
<link rel='alternate' type='text/html' href='http://base.google.com'>
</link>
<link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets'>
</link>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets?start-index=1&max-results=25&bq=digital+camera'>
</link>
<link rel='next' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets?start-index=26&max-results=25&bq=digital+camera'>
</link>
<generator version='1.0' uri='http://base.google.com'>GoogleBase </generator>
<openSearch:totalResults>2171885</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>http://www.google.com/base/feeds/snippets/13246453826751927533</id>
<published>2007-02-08T13:23:27.000Z</published>
<updated>2007-02-08T16:40:57.000Z</updated>
<category scheme='http://base.google.com/categories/itemtypes' term='Products'>
</category>
<title type='text'>Digital Camera Battery Notebook Computer 12v DC Power Cable - 5.5mm x 2.5mm (Center +) Camera Connecting Cables</title>
<content type='html'>Notebook Computer 12v DC Power Cable - 5.5mm x 2.1mm (Center +) This connection cable will allow any Digital Pursuits battery pack to power portable computers that operate with 12v power and have a 2.1mm power connector (center +) Digital ...</content>
<link rel='alternate' type='text/html' href='http://www.bhphotovideo.com/bnh/controller/home?O=productlist&A=details&Q=&sku=305668&is=REG&kw=DIDCB5092&BI=583'>
</link>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets/13246453826751927533'>
</link>
<author>
<name>B&H Photo-Video</name>
<email>[email protected]</email>
</author>
<g:payment_notes type='text'>PayPal & Bill Me Later credit available online only.</g:payment_notes>
<g:condition type='text'>new</g:condition>
<g:location type='location'>420 9th Ave. 10001</g:location>
<g:id type='text'>305668-REG</g:id>
<g:item_type type='text'>Products</g:item_type>
<g:brand type='text'>Digital Camera Battery</g:brand>
<g:expiration_date type='dateTime'>2007-03-10T13:23:27.000Z</g:expiration_date>
<g:customer_id type='int'>1172711</g:customer_id>
<g:price type='floatUnit'>34.95 usd</g:price>
<g:product_type type='text'>Digital Photography>Camera Connecting Cables</g:product_type>
<g:item_language type='text'>EN</g:item_language>
<g:manufacturer_id type='text'>DCB5092</g:manufacturer_id>
<g:target_country type='text'>US</g:target_country>
<g:weight type='float'>1.0</g:weight>
<g:image_link type='url'>http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305668.jpg&dhm=ffffffff84c9a95e&size=6</g:image_link>
</entry>
<entry>
<id>http://www.google.com/base/feeds/snippets/10145771037331858608</id>
<published>2007-02-08T13:23:27.000Z</published>
<updated>2007-02-08T16:40:57.000Z</updated>
<category scheme='http://base.google.com/categories/itemtypes' term='Products'>
</category>
<title type='text'>Digital Camera Battery Electronic Device 5v DC Power Cable - 5.5mm x 2.5mm (Center +) Camera Connecting Cables</title>
<content type='html'>Electronic Device 5v DC Power Cable - 5.5mm x 2.5mm (Center +) This connection cable will allow any Digital Pursuits battery pack to power any electronic device that operates with 5v power and has a 2.5mm power connector (center +) Digital ...</content>
<link rel='alternate' type='text/html' href='http://www.bhphotovideo.com/bnh/controller/home?O=productlist&A=details&Q=&sku=305656&is=REG&kw=DIDCB5108&BI=583'>
</link>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets/10145771037331858608'>
</link>
<author>
<name>B&H Photo-Video</name>
<email>[email protected]</email>
</author>
<g:location type='location'>420 9th Ave. 10001</g:location>
<g:condition type='text'>new</g:condition>
<g:weight type='float'>0.18</g:weight>
<g:target_country type='text'>US</g:target_country>
<g:product_type type='text'>Digital Photography>Camera Connecting Cables</g:product_type>
<g:payment_notes type='text'>PayPal & Bill Me Later credit available online only.</g:payment_notes>
<g:id type='text'>305656-REG</g:id>
<g:image_link type='url'>http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305656.jpg&dhm=7315bdc8&size=6</g:image_link>
<g:manufacturer_id type='text'>DCB5108</g:manufacturer_id>
<g:upc type='text'>838098005108</g:upc>
<g:price type='floatUnit'>34.95 usd</g:price>
<g:item_language type='text'>EN</g:item_language>
<g:brand type='text'>Digital Camera Battery</g:brand>
<g:customer_id type='int'>1172711</g:customer_id>
<g:item_type type='text'>Products</g:item_type>
<g:expiration_date type='dateTime'>2007-03-10T13:23:27.000Z</g:expiration_date>
</entry>
<entry>
<id>http://www.google.com/base/feeds/snippets/3128608193804768644</id>
<published>2007-02-08T02:21:27.000Z</published>
<updated>2007-02-08T15:40:13.000Z</updated>
<category scheme='http://base.google.com/categories/itemtypes' term='Products'>
</category>
<title type='text'>Digital Camera Battery Power Cable for Kodak 645 Pro-Back ProBack & DCS-300 Series Camera Connecting Cables</title>
<content type='html'>Camera Connection Cable - to Power Kodak 645 Pro-Back DCS-300 Series Digital Cameras This connection cable will allow any Digital Pursuits battery pack to power the following digital cameras: Kodak DCS Pro Back 645 DCS-300 series Digital Photography ...</content>
<link rel='alternate' type='text/html' href='http://www.bhphotovideo.com/bnh/controller/home?O=productlist&A=details&Q=&sku=305685&is=REG&kw=DIDCB6006&BI=583'>
</link>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets/3128608193804768644'>
</link>
<author>
<name>B&H Photo-Video</name>
<email>[email protected]</email>
</author>
<g:weight type='float'>0.3</g:weight>
<g:manufacturer_id type='text'>DCB6006</g:manufacturer_id>
<g:image_link type='url'>http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305685.jpg&dhm=72f0ca0a&size=6</g:image_link>
<g:location type='location'>420 9th Ave. 10001</g:location>
<g:payment_notes type='text'>PayPal & Bill Me Later credit available online only.</g:payment_notes>
<g:item_type type='text'>Products</g:item_type>
<g:target_country type='text'>US</g:target_country>
<g:accessory_for type='text'>digital kodak camera</g:accessory_for>
<g:brand type='text'>Digital Camera Battery</g:brand>
<g:expiration_date type='dateTime'>2007-03-10T02:21:27.000Z</g:expiration_date>
<g:item_language type='text'>EN</g:item_language>
<g:condition type='text'>new</g:condition>
<g:price type='floatUnit'>34.95 usd</g:price>
<g:customer_id type='int'>1172711</g:customer_id>
<g:product_type type='text'>Digital Photography>Camera Connecting Cables</g:product_type>
<g:id type='text'>305685-REG</g:id>
</entry>
</feed>"""
EXTENSION_TREE = """<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<g:author xmlns:g="http://www.google.com">
<g:name>John Doe
<g:foo yes="no" up="down">Bar</g:foo>
</g:name>
</g:author>
</feed>
"""
TEST_AUTHOR = """<?xml version="1.0" encoding="utf-8"?>
<author xmlns="http://www.w3.org/2005/Atom">
<name xmlns="http://www.w3.org/2005/Atom">John Doe</name>
<email xmlns="http://www.w3.org/2005/Atom">[email protected]</email>
<uri xmlns="http://www.w3.org/2005/Atom">http://www.google.com</uri>
</author>
"""
TEST_LINK = """<?xml version="1.0" encoding="utf-8"?>
<link xmlns="http://www.w3.org/2005/Atom" href="http://www.google.com"
rel="test rel" foo1="bar" foo2="rab"/>
"""
TEST_GBASE_ATTRIBUTE = """<?xml version="1.0" encoding="utf-8"?>
<g:brand type='text' xmlns:g="http://base.google.com/ns/1.0">Digital Camera Battery</g:brand>
"""
CALENDAR_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gd='http://schemas.google.com/g/2005'
xmlns:gCal='http://schemas.google.com/gCal/2005'>
<id>http://www.google.com/calendar/feeds/default</id>
<updated>2007-03-20T22:48:57.833Z</updated>
<title type='text'>GData Ops Demo's Calendar List</title>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default'></link>
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default'></link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<generator version='1.0' uri='http://www.google.com/calendar'>
Google Calendar</generator>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>
http://www.google.com/calendar/feeds/default/gdata.ops.demo%40gmail.com</id>
<published>2007-03-20T22:48:57.837Z</published>
<updated>2007-03-20T22:48:52.000Z</updated>
<title type='text'>GData Ops Demo</title>
<link rel='alternate' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/gdata.ops.demo%40gmail.com/private/full'>
</link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/gdata.ops.demo%40gmail.com'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:color value='#2952A3'></gCal:color>
<gCal:accesslevel value='owner'></gCal:accesslevel>
<gCal:hidden value='false'></gCal:hidden>
<gCal:timezone value='America/Los_Angeles'></gCal:timezone>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/jnh21ovnjgfph21h32gvms2758%40group.calendar.google.com</id>
<published>2007-03-20T22:48:57.837Z</published>
<updated>2007-03-20T22:48:53.000Z</updated>
<title type='text'>GData Ops Demo Secondary Calendar</title>
<summary type='text'></summary>
<link rel='alternate' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/jnh21ovnjgfph21h32gvms2758%40group.calendar.google.com/private/full'>
</link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/jnh21ovnjgfph21h32gvms2758%40group.calendar.google.com'>
</link>
<author>
<name>GData Ops Demo Secondary Calendar</name>
</author>
<gCal:color value='#528800'></gCal:color>
<gCal:accesslevel value='owner'></gCal:accesslevel>
<gCal:hidden value='false'></gCal:hidden>
<gCal:timezone value='America/Los_Angeles'></gCal:timezone>
<gd:where valueString=''></gd:where>
</entry>
</feed>
"""
CALENDAR_FULL_EVENT_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gd='http://schemas.google.com/g/2005'
xmlns:gCal='http://schemas.google.com/gCal/2005'>
<id>
http://www.google.com/calendar/feeds/default/private/full</id>
<updated>2007-03-20T21:29:57.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>GData Ops Demo</title>
<subtitle type='text'>GData Ops Demo</subtitle>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full'>
</link>
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full'>
</link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full?updated-min=2001-01-01&max-results=25'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<generator version='1.0' uri='http://www.google.com/calendar'>
Google Calendar</generator>
<openSearch:totalResults>10</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<gCal:timezone value='America/Los_Angeles'></gCal:timezone>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/o99flmgmkfkfrr8u745ghr3100</id>
<published>2007-03-20T21:29:52.000Z</published>
<updated>2007-03-20T21:29:57.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>test deleted</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=bzk5ZmxtZ21rZmtmcnI4dTc0NWdocjMxMDAgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/o99flmgmkfkfrr8u745ghr3100'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/o99flmgmkfkfrr8u745ghr3100/63310109397'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.canceled'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/o99flmgmkfkfrr8u745ghr3100/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-23T12:00:00.000-07:00'
endTime='2007-03-23T13:00:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/2qt3ao5hbaq7m9igr5ak9esjo0</id>
<published>2007-03-20T21:26:04.000Z</published>
<updated>2007-03-20T21:28:46.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Afternoon at Dolores Park with Kim</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=MnF0M2FvNWhiYXE3bTlpZ3I1YWs5ZXNqbzAgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/2qt3ao5hbaq7m9igr5ak9esjo0'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/2qt3ao5hbaq7m9igr5ak9esjo0/63310109326'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/2qt3ao5hbaq7m9igr5ak9esjo0/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.private'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:who rel='http://schemas.google.com/g/2005#event.organizer'
valueString='GData Ops Demo' email='[email protected]'>
<gd:attendeeStatus value='http://schemas.google.com/g/2005#event.accepted'>
</gd:attendeeStatus>
</gd:who>
<gd:who rel='http://schemas.google.com/g/2005#event.attendee'
valueString='Ryan Boyd (API)' email='[email protected]'>
<gd:attendeeStatus value='http://schemas.google.com/g/2005#event.invited'>
</gd:attendeeStatus>
</gd:who>
<gd:when startTime='2007-03-24T12:00:00.000-07:00'
endTime='2007-03-24T15:00:00.000-07:00'>
<gd:reminder minutes='20'></gd:reminder>
</gd:when>
<gd:where valueString='Dolores Park with Kim'></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/uvsqhg7klnae40v50vihr1pvos</id>
<published>2007-03-20T21:28:37.000Z</published>
<updated>2007-03-20T21:28:37.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Team meeting</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=dXZzcWhnN2tsbmFlNDB2NTB2aWhyMXB2b3NfMjAwNzAzMjNUMTYwMDAwWiBnZGF0YS5vcHMuZGVtb0Bt'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/uvsqhg7klnae40v50vihr1pvos'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/uvsqhg7klnae40v50vihr1pvos/63310109317'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gd:recurrence>DTSTART;TZID=America/Los_Angeles:20070323T090000
DTEND;TZID=America/Los_Angeles:20070323T100000
RRULE:FREQ=WEEKLY;BYDAY=FR;UNTIL=20070817T160000Z;WKST=SU
BEGIN:VTIMEZONE TZID:America/Los_Angeles
X-LIC-LOCATION:America/Los_Angeles BEGIN:STANDARD
TZOFFSETFROM:-0700 TZOFFSETTO:-0800 TZNAME:PST
DTSTART:19701025T020000 RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU
END:STANDARD BEGIN:DAYLIGHT TZOFFSETFROM:-0800 TZOFFSETTO:-0700
TZNAME:PDT DTSTART:19700405T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU END:DAYLIGHT
END:VTIMEZONE</gd:recurrence>
<gCal:sendEventNotifications value='true'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:visibility value='http://schemas.google.com/g/2005#event.public'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:reminder minutes='10'></gd:reminder>
<gd:where valueString=''></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/st4vk9kiffs6rasrl32e4a7alo</id>
<published>2007-03-20T21:25:46.000Z</published>
<updated>2007-03-20T21:25:46.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Movie with Kim and danah</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=c3Q0dms5a2lmZnM2cmFzcmwzMmU0YTdhbG8gZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/st4vk9kiffs6rasrl32e4a7alo'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/st4vk9kiffs6rasrl32e4a7alo/63310109146'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/st4vk9kiffs6rasrl32e4a7alo/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-24T20:00:00.000-07:00'
endTime='2007-03-24T21:00:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/ofl1e45ubtsoh6gtu127cls2oo</id>
<published>2007-03-20T21:24:43.000Z</published>
<updated>2007-03-20T21:25:08.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Dinner with Kim and Sarah</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=b2ZsMWU0NXVidHNvaDZndHUxMjdjbHMyb28gZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ofl1e45ubtsoh6gtu127cls2oo'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ofl1e45ubtsoh6gtu127cls2oo/63310109108'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/ofl1e45ubtsoh6gtu127cls2oo/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-20T19:00:00.000-07:00'
endTime='2007-03-20T21:30:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/b69s2avfi2joigsclecvjlc91g</id>
<published>2007-03-20T21:24:19.000Z</published>
<updated>2007-03-20T21:25:05.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Dinner with Jane and John</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=YjY5czJhdmZpMmpvaWdzY2xlY3ZqbGM5MWcgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/b69s2avfi2joigsclecvjlc91g'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/b69s2avfi2joigsclecvjlc91g/63310109105'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/b69s2avfi2joigsclecvjlc91g/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-22T17:00:00.000-07:00'
endTime='2007-03-22T19:30:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/u9p66kkiotn8bqh9k7j4rcnjjc</id>
<published>2007-03-20T21:24:33.000Z</published>
<updated>2007-03-20T21:24:33.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Tennis with Elizabeth</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=dTlwNjZra2lvdG44YnFoOWs3ajRyY25qamMgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/u9p66kkiotn8bqh9k7j4rcnjjc'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/u9p66kkiotn8bqh9k7j4rcnjjc/63310109073'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/u9p66kkiotn8bqh9k7j4rcnjjc/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-24T10:00:00.000-07:00'
endTime='2007-03-24T11:00:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/76oj2kceidob3s708tvfnuaq3c</id>
<published>2007-03-20T21:24:00.000Z</published>
<updated>2007-03-20T21:24:00.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Lunch with Jenn</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=NzZvajJrY2VpZG9iM3M3MDh0dmZudWFxM2MgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/76oj2kceidob3s708tvfnuaq3c'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/76oj2kceidob3s708tvfnuaq3c/63310109040'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/76oj2kceidob3s708tvfnuaq3c/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-20T11:30:00.000-07:00'
endTime='2007-03-20T12:30:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/5np9ec8m7uoauk1vedh5mhodco</id>
<published>2007-03-20T07:50:02.000Z</published>
<updated>2007-03-20T20:39:26.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>test entry</title>
<content type='text'>test desc</content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=NW5wOWVjOG03dW9hdWsxdmVkaDVtaG9kY28gZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/5np9ec8m7uoauk1vedh5mhodco'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/5np9ec8m7uoauk1vedh5mhodco/63310106366'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/5np9ec8m7uoauk1vedh5mhodco/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.private'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:who rel='http://schemas.google.com/g/2005#event.attendee'
valueString='Vivian Li' email='[email protected]'>
<gd:attendeeStatus value='http://schemas.google.com/g/2005#event.declined'>
</gd:attendeeStatus>
</gd:who>
<gd:who rel='http://schemas.google.com/g/2005#event.organizer'
valueString='GData Ops Demo' email='[email protected]'>
<gd:attendeeStatus value='http://schemas.google.com/g/2005#event.accepted'>
</gd:attendeeStatus>
</gd:who>
<gd:when startTime='2007-03-21T08:00:00.000-07:00'
endTime='2007-03-21T09:00:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where valueString='anywhere'></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/fu6sl0rqakf3o0a13oo1i1a1mg</id>
<published>2007-02-14T23:23:37.000Z</published>
<updated>2007-02-14T23:25:30.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>test</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=ZnU2c2wwcnFha2YzbzBhMTNvbzFpMWExbWcgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/fu6sl0rqakf3o0a13oo1i1a1mg'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/fu6sl0rqakf3o0a13oo1i1a1mg/63307178730'>
</link>
<link rel="http://schemas.google.com/gCal/2005/webContent" title="World Cup" href="http://www.google.com/calendar/images/google-holiday.gif" type="image/gif">
<gCal:webContent width="276" height="120" url="http://www.google.com/logos/worldcup06.gif" />
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/fu6sl0rqakf3o0a13oo1i1a1mg/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-02-15T08:30:00.000-08:00'
endTime='2007-02-15T09:30:00.000-08:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/h7a0haa4da8sil3rr19ia6luvc</id>
<published>2007-07-16T22:13:28.000Z</published>
<updated>2007-07-16T22:13:29.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event' />
<title type='text'></title>
<content type='text' />
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=aDdhMGhhYTRkYThzaWwzcnIxOWlhNmx1dmMgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate' />
<link rel='http://schemas.google.com/gCal/2005/webContent'
type='application/x-google-gadgets+xml'
href='http://gdata.ops.demo.googlepages.com/birthdayicon.gif'
title='Date and Time Gadget'>
<gCal:webContent width='300' height='136'
url='http://google.com/ig/modules/datetime.xml'>
<gCal:webContentGadgetPref name='color' value='green' />
</gCal:webContent>
</link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/h7a0haa4da8sil3rr19ia6luvc' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/h7a0haa4da8sil3rr19ia6luvc/63320307209' />
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/h7a0haa4da8sil3rr19ia6luvc/comments' />
</gd:comments>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed' />
<gd:visibility value='http://schemas.google.com/g/2005#event.default' />
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque' />
<gd:when startTime='2007-03-14' endTime='2007-03-15' />
<gd:where />
</entry>
</feed>
"""
CALENDAR_BATCH_REQUEST = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:batch='http://schemas.google.com/gdata/batch'
xmlns:gCal='http://schemas.google.com/gCal/2005'>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<entry>
<batch:id>1</batch:id>
<batch:operation type='insert' />
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event inserted via batch</title>
</entry>
<entry>
<batch:id>2</batch:id>
<batch:operation type='query' />
<id>http://www.google.com/calendar/feeds/default/private/full/glcs0kv2qqa0gf52qi1jo018gc</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event queried via batch</title>
</entry>
<entry>
<batch:id>3</batch:id>
<batch:operation type='update' />
<id>http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event updated via batch</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=dWptMGdvNWR0bmdka3I2dTkxZGNxdmowcXMgaGFyaXNodi50ZXN0QG0' title='alternate' />
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs/63326098791' />
</entry>
<entry>
<batch:id>4</batch:id>
<batch:operation type='delete' />
<id>http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event deleted via batch</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=ZDhxYmc5ZWdrMW42bGhzZ3Exc2picWZmcWMgaGFyaXNodi50ZXN0QG0' title='alternate' />
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc/63326018324' />
</entry>
</feed>
"""
CALENDAR_BATCH_RESPONSE = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:batch='http://schemas.google.com/gdata/batch'
xmlns:gCal='http://schemas.google.com/gCal/2005'>
<id>http://www.google.com/calendar/feeds/default/private/full</id>
<updated>2007-09-21T23:01:00.380Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Batch Feed</title>
<link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full' />
<link rel='http://schemas.google.com/g/2005#post' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full' />
<link rel='http://schemas.google.com/g/2005#batch' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/batch' />
<entry>
<batch:id>1</batch:id>
<batch:status code='201' reason='Created' />
<batch:operation type='insert' />
<id>http://www.google.com/calendar/feeds/default/private/full/n9ug78gd9tv53ppn4hdjvk68ek</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event inserted via batch</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=bjl1Zzc4Z2Q5dHY1M3BwbjRoZGp2azY4ZWsgaGFyaXNodi50ZXN0QG0' title='alternate' />
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/n9ug78gd9tv53ppn4hdjvk68ek' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/n9ug78gd9tv53ppn4hdjvk68ek/63326098860' />
</entry>
<entry>
<batch:id>2</batch:id>
<batch:status code='200' reason='Success' />
<batch:operation type='query' />
<id>http://www.google.com/calendar/feeds/default/private/full/glsc0kv2aqa0ff52qi1jo018gc</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event queried via batch</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=Z2xzYzBrdjJhcWEwZmY1MnFpMWpvMDE4Z2MgaGFyaXNodi50ZXN0QG0' title='alternate' />
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/glsc0kv2aqa0ff52qi1jo018gc' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/glsc0kv2aqa0ff52qi1jo018gc/63326098791' />
</entry>
<entry xmlns:gCal='http://schemas.google.com/gCal/2005'>
<batch:id>3</batch:id>
<batch:status code='200' reason='Success' />
<batch:operation type='update' />
<id>http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event updated via batch</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=dWptMGdvNWR0bmdka3I2dTkxZGNxdmowcXMgaGFyaXNodi50ZXN0QG0' title='alternate' />
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs/63326098860' />
<batch:id>3</batch:id>
<batch:status code='200' reason='Success' />
<batch:operation type='update' />
</entry>
<entry>
<batch:id>4</batch:id>
<batch:status code='200' reason='Success' />
<batch:operation type='delete' />
<id>http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event deleted via batch</title>
<content type='text'>Deleted</content>
</entry>
</feed>
"""
GBASE_ATTRIBUTE_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gm='http://base.google.com/ns-metadata/1.0'>
<id>http://www.google.com/base/feeds/attributes</id>
<updated>2006-11-01T20:35:59.578Z</updated>
<category scheme='http://base.google.com/categories/itemtypes' term='online jobs'></category>
<category scheme='http://base.google.com/categories/itemtypes' term='jobs'></category>
<title type='text'>Attribute histogram for query: [item type:jobs]</title>
<link rel='alternate' type='text/html' href='http://base.google.com'></link>
<link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://www.google.com/base/feeds
/attributes'></link>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/attributes/-/jobs'></link>
<generator version='1.0' uri='http://base.google.com'>GoogleBase</generator>
<openSearch:totalResults>16</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>16</openSearch:itemsPerPage>
<entry>
<id>http://www.google.com/base/feeds/attributes/job+industry%28text%29N%5Bitem+type%3Ajobs%5D</id>
<updated>2006-11-01T20:36:00.100Z</updated>
<title type='text'>job industry(text)</title>
<content type='text'>Attribute"job industry" of type text.
</content>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/attributes/job+industry%28text
%29N%5Bitem+type%3Ajobs%5D'></link>
<gm:attribute name='job industry' type='text' count='4416629'>
<gm:value count='380772'>it internet</gm:value>
<gm:value count='261565'>healthcare</gm:value>
<gm:value count='142018'>information technology</gm:value>
<gm:value count='124622'>accounting</gm:value>
<gm:value count='111311'>clerical and administrative</gm:value>
<gm:value count='82928'>other</gm:value>
<gm:value count='77620'>sales and sales management</gm:value>
<gm:value count='68764'>information systems</gm:value>
<gm:value count='65859'>engineering and architecture</gm:value>
<gm:value count='64757'>sales</gm:value>
</gm:attribute>
</entry>
</feed>
"""
GBASE_ATTRIBUTE_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gm='http://base.google.com/ns-metadata/1.0'>
<id>http://www.google.com/base/feeds/attributes/job+industry%28text%29N%5Bitem+type%3Ajobs%5D</id>
<updated>2006-11-01T20:36:00.100Z</updated>
<title type='text'>job industry(text)</title>
<content type='text'>Attribute"job industry" of type text.
</content>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/attributes/job+industry%28text%29N%5Bitem+type%3Ajobs%5D'></link>
<gm:attribute name='job industry' type='text' count='4416629'>
<gm:value count='380772'>it internet</gm:value>
<gm:value count='261565'>healthcare</gm:value>
<gm:value count='142018'>information technology</gm:value>
<gm:value count='124622'>accounting</gm:value>
<gm:value count='111311'>clerical and administrative</gm:value>
<gm:value count='82928'>other</gm:value>
<gm:value count='77620'>sales and sales management</gm:value>
<gm:value count='68764'>information systems</gm:value>
<gm:value count='65859'>engineering and architecture</gm:value>
<gm:value count='64757'>sales</gm:value>
</gm:attribute>
</entry>
"""
GBASE_LOCALES_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gm='http://base.google.com/ns-metadata/1.0'>
<id> http://www.google.com/base/feeds/locales/</id>
<updated>2006-06-13T18:11:40.120Z</updated>
<title type="text">Locales</title>
<link rel="alternate" type="text/html" href="http://base.google.com"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml"
href="http://www.google.com/base/feeds/locales/"/>
<link rel="self" type="application/atom+xml" href="http://www.google.com/base/feeds/locales/"/>
<author>
<name>Google Inc.</name>
<email>[email protected]</email>
</author>
<generator version="1.0" uri="http://base.google.com">GoogleBase</generator>
<openSearch:totalResults>3</openSearch:totalResults>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>http://www.google.com/base/feeds/locales/en_US</id>
<updated>2006-03-27T22:27:36.658Z</updated>
<category scheme="http://base.google.com/categories/locales" term="en_US"/>
<title type="text">en_US</title>
<content type="text">en_US</content>
<link rel="self" type="application/atom+xml"
href="http://www.google.com/base/feeds/locales/en_US"></link>
<link rel="related" type="application/atom+xml"
href="http://www.google.com/base/feeds/itemtypes/en_US" title="Item types in en_US"/>
</entry>
<entry>
<id>http://www.google.com/base/feeds/locales/en_GB</id>
<updated>2006-06-13T18:14:18.601Z</updated>
<category scheme="http://base.google.com/categories/locales" term="en_GB"/>
<title type="text">en_GB</title>
<content type="text">en_GB</content>
<link rel="related" type="application/atom+xml"
href="http://www.google.com/base/feeds/itemtypes/en_GB" title="Item types in en_GB"/>
<link rel="self" type="application/atom+xml"
href="http://www.google.com/base/feeds/locales/en_GB"/>
</entry>
<entry>
<id>http://www.google.com/base/feeds/locales/de_DE</id>
<updated>2006-06-13T18:14:18.601Z</updated>
<category scheme="http://base.google.com/categories/locales" term="de_DE"/>
<title type="text">de_DE</title>
<content type="text">de_DE</content>
<link rel="related" type="application/atom+xml"
href="http://www.google.com/base/feeds/itemtypes/de_DE" title="Item types in de_DE"/>
<link rel="self" type="application/atom+xml"
href="http://www.google.com/base/feeds/locales/de_DE"/>
</entry>
</feed>"""
GBASE_STRING_ENCODING_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom' xmlns:gm='http://base.google.com/ns-metadata/1.0'
xmlns:g='http://base.google.com/ns/1.0' xmlns:batch='http://schemas.google.com/gdata/batch'>
<id>http://www.google.com/base/feeds/snippets/17495780256183230088</id>
<published>2007-12-09T03:13:07.000Z</published>
<updated>2008-01-07T03:26:46.000Z</updated>
<category scheme='http://base.google.com/categories/itemtypes' term='Products'/>
<title type='text'>Digital Camera Cord Fits SONY Cybershot DSC-R1 S40</title>
<content type='html'>SONY \xC2\xB7 Cybershot Digital Camera Usb Cable DESCRIPTION
This is a 2.5 USB 2.0 A to Mini B (5 Pin) high quality digital camera
cable used for connecting your Sony Digital Cameras and Camcoders. Backward
Compatible with USB 2.0, 1.0 and 1.1. Fully ...</content>
<link rel='alternate' type='text/html'
href='http://adfarm.mediaplex.com/ad/ck/711-5256-8196-2?loc=http%3A%2F%2Fcgi.ebay.com%2FDigital-Camera-Cord-Fits-SONY-Cybershot-DSC-R1-S40_W0QQitemZ270195049057QQcmdZViewItem'/>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/base/feeds/snippets/17495780256183230088'/>
<author>
<name>eBay</name>
</author>
<g:item_type type='text'>Products</g:item_type>
<g:item_language type='text'>EN</g:item_language>
<g:target_country type='text'>US</g:target_country>
<g:price type='floatUnit'>0.99 usd</g:price>
<g:image_link type='url'>http://thumbs.ebaystatic.com/pict/270195049057_1.jpg</g:image_link>
<g:category type='text'>Cameras & Photo>Digital Camera Accessories>Cables</g:category>
<g:category type='text'>Cords & Connectors>USB Cables>For Other Brands</g:category>
<g:customer_id type='int'>11729</g:customer_id>
<g:id type='text'>270195049057</g:id>
<g:expiration_date type='dateTime'>2008-02-06T03:26:46Z</g:expiration_date>
</entry>"""
RECURRENCE_EXCEPTION_ENTRY = """<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gd='http://schemas.google.com/g/2005'
xmlns:gCal='http://schemas.google.com/gCal/2005'>
<id>
http://www.google.com/calendar/feeds/default/private/composite/i7lgfj69mjqjgnodklif3vbm7g</id>
<published>2007-04-05T21:51:49.000Z</published>
<updated>2007-04-05T21:51:49.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>testDavid</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=aTdsZ2ZqNjltanFqZ25vZGtsaWYzdmJtN2dfMjAwNzA0MDNUMTgwMDAwWiBnZGF0YS5vcHMudGVzdEBt'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/composite/i7lgfj69mjqjgnodklif3vbm7g'>
</link>
<author>
<name>gdata ops</name>
<email>[email protected]</email>
</author>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gCal:sendEventNotifications value='true'>
</gCal:sendEventNotifications>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:recurrence>DTSTART;TZID=America/Anchorage:20070403T100000
DTEND;TZID=America/Anchorage:20070403T110000
RRULE:FREQ=DAILY;UNTIL=20070408T180000Z;WKST=SU
EXDATE;TZID=America/Anchorage:20070407T100000
EXDATE;TZID=America/Anchorage:20070405T100000
EXDATE;TZID=America/Anchorage:20070404T100000 BEGIN:VTIMEZONE
TZID:America/Anchorage X-LIC-LOCATION:America/Anchorage
BEGIN:STANDARD TZOFFSETFROM:-0800 TZOFFSETTO:-0900 TZNAME:AKST
DTSTART:19701025T020000 RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU
END:STANDARD BEGIN:DAYLIGHT TZOFFSETFROM:-0900 TZOFFSETTO:-0800
TZNAME:AKDT DTSTART:19700405T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU END:DAYLIGHT
END:VTIMEZONE</gd:recurrence>
<gd:where valueString=''></gd:where>
<gd:reminder minutes='10'></gd:reminder>
<gd:recurrenceException specialized='true'>
<gd:entryLink>
<entry>
<id>i7lgfj69mjqjgnodklif3vbm7g_20070407T180000Z</id>
<published>2007-04-05T21:51:49.000Z</published>
<updated>2007-04-05T21:52:58.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>testDavid</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=aTdsZ2ZqNjltanFqZ25vZGtsaWYzdmJtN2dfMjAwNzA0MDdUMTgwMDAwWiBnZGF0YS5vcHMudGVzdEBt'
title='alternate'></link>
<author>
<name>gdata ops</name>
<email>[email protected]</email>
</author>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:originalEvent id='i7lgfj69mjqjgnodklif3vbm7g'
href='http://www.google.com/calendar/feeds/default/private/composite/i7lgfj69mjqjgnodklif3vbm7g'>
<gd:when startTime='2007-04-07T13:00:00.000-05:00'>
</gd:when>
</gd:originalEvent>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.canceled'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/i7lgfj69mjqjgnodklif3vbm7g_20070407T180000Z/comments'>
<feed>
<updated>2007-04-05T21:54:09.285Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#message'>
</category>
<title type='text'>Comments for: testDavid</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/feeds/default/private/full/i7lgfj69mjqjgnodklif3vbm7g_20070407T180000Z/comments'
title='alternate'></link>
</feed>
</gd:feedLink>
</gd:comments>
<gd:when startTime='2007-04-07T13:00:00.000-05:00'
endTime='2007-04-07T14:00:00.000-05:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where valueString=''></gd:where>
</entry>
</gd:entryLink>
</gd:recurrenceException>
</entry>"""
NICK_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<atom:entry xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>https://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname'/>
<atom:title type="text">Foo</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo"/>
<atom:link rel="edit" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo"/>
<apps:nickname name="Foo"/>
<apps:login userName="TestUser"/>
</atom:entry>"""
NICK_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:apps="http://schemas.google.com/apps/2006">
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/nickname/2.0
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname'/>
<atom:title type="text">Nicknames for user SusanJones</atom:title>
<atom:link rel='http://schemas.google.com/g/2005#feed'
type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0"/>
<atom:link rel='http://schemas.google.com/g/2005#post'
type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0"/>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0?username=TestUser"/>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>2</openSearch:itemsPerPage>
<atom:entry>
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo
</atom:id>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname'/>
<atom:title type="text">Foo</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo"/>
<apps:nickname name="Foo"/>
<apps:login userName="TestUser"/>
</atom:entry>
<atom:entry>
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/suse
</atom:id>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname'/>
<atom:title type="text">suse</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Bar"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Bar"/>
<apps:nickname name="Bar"/>
<apps:login userName="TestUser"/>
</atom:entry>
</atom:feed>"""
USER_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<atom:entry xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>https://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#user'/>
<atom:title type="text">TestUser</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser"/>
<atom:link rel="edit" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser"/>
<apps:login userName="TestUser" password="password" suspended="false"
ipWhitelisted='false' hashFunctionName="SHA-1"/>
<apps:name familyName="Test" givenName="User"/>
<apps:quota limit="1024"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.nicknames'
href="https://apps-apis.google.com/a/feeds/example.com/nickname/2.0?username=Test-3121"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.emailLists'
href="https://apps-apis.google.com/a/feeds/example.com/emailList/[email protected]"/>
</atom:entry>"""
USER_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/user/2.0
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#user'/>
<atom:title type="text">Users</atom:title>
<atom:link rel="next" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/user/2.0?startUsername=john"/>
<atom:link rel='http://schemas.google.com/g/2005#feed'
type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/user/2.0"/>
<atom:link rel='http://schemas.google.com/g/2005#post'
type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/user/2.0"/>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/user/2.0"/>
<openSearch:startIndex>1</openSearch:startIndex>
<atom:entry>
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser
</atom:id>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#user'/>
<atom:title type="text">TestUser</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser"/>
<gd:who rel='http://schemas.google.com/apps/2006#user.recipient'
email="[email protected]"/>
<apps:login userName="TestUser" suspended="false"/>
<apps:quota limit="2048"/>
<apps:name familyName="Test" givenName="User"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.nicknames'
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0?username=TestUser"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.emailLists'
href="http://apps-apis.google.com/a/feeds/example.com/emailList/[email protected]"/>
</atom:entry>
<atom:entry>
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/user/2.0/JohnSmith
</atom:id>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#user'/>
<atom:title type="text">JohnSmith</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/user/2.0/JohnSmith"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/user/2.0/JohnSmith"/>
<gd:who rel='http://schemas.google.com/apps/2006#user.recipient'
email="[email protected]"/>
<apps:login userName="JohnSmith" suspended="false"/>
<apps:quota limit="2048"/>
<apps:name familyName="Smith" givenName="John"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.nicknames'
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0?username=JohnSmith"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.emailLists'
href="http://apps-apis.google.com/a/feeds/example.com/emailList/[email protected]"/>
</atom:entry>
</atom:feed>"""
EMAIL_LIST_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<atom:entry xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>
https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/testlist
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList'/>
<atom:title type="text">testlist</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/testlist"/>
<atom:link rel="edit" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/testlist"/>
<apps:emailList name="testlist"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#emailList.recipients'
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/testlist/recipient/"/>
</atom:entry>"""
EMAIL_LIST_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/emailList/2.0
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList'/>
<atom:title type="text">EmailLists</atom:title>
<atom:link rel="next" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0?startEmailListName=john"/>
<atom:link rel='http://schemas.google.com/g/2005#feed'
type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0"/>
<atom:link rel='http://schemas.google.com/g/2005#post'
type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0"/>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0"/>
<openSearch:startIndex>1</openSearch:startIndex>
<atom:entry>
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList'/>
<atom:title type="text">us-sales</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales"/>
<apps:emailList name="us-sales"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#emailList.recipients'
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/"/>
</atom:entry>
<atom:entry>
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-eng
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList'/>
<atom:title type="text">us-eng</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-eng"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-eng"/>
<apps:emailList name="us-eng"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#emailList.recipients'
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-eng/recipient/"/>
</atom:entry>
</atom:feed>"""
EMAIL_LIST_RECIPIENT_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<atom:entry xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/TestUser%40example.com</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList.recipient'/>
<atom:title type="text">TestUser</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/TestUser%40example.com"/>
<atom:link rel="edit" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/TestUser%40example.com"/>
<gd:who email="[email protected]"/>
</atom:entry>"""
EMAIL_LIST_RECIPIENT_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList.recipient'/>
<atom:title type="text">Recipients for email list us-sales</atom:title>
<atom:link rel="next" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/[email protected]"/>
<atom:link rel='http://schemas.google.com/g/2005#feed'
type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient"/>
<atom:link rel='http://schemas.google.com/g/2005#post'
type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient"/>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient"/>
<openSearch:startIndex>1</openSearch:startIndex>
<atom:entry>
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/joe%40example.com
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList.recipient'/>
<atom:title type="text">[email protected]</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/joe%40example.com"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/joe%40example.com"/>
<gd:who email="[email protected]"/>
</atom:entry>
<atom:entry>
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/susan%40example.com
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList.recipient'/>
<atom:title type="text">[email protected]</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/susan%40example.com"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/susan%40example.com"/>
<gd:who email="[email protected]"/>
</atom:entry>
</atom:feed>"""
ACL_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gAcl='http://schemas.google.com/acl/2007'>
<id>http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full</id>
<updated>2007-04-21T00:52:04.000Z</updated>
<title type='text'>Elizabeth Bennet's access control list</title>
<link rel='http://schemas.google.com/acl/2007#controlledObject'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/private/full'>
</link>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full'>
</link>
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full'>
</link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full'>
</link>
<generator version='1.0'
uri='http://www.google.com/calendar'>Google Calendar</generator>
<openSearch:totalResults>2</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com</id>
<updated>2007-04-21T00:52:04.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/acl/2007#accessRule'>
</category>
<title type='text'>owner</title>
<content type='text'></content>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com'>
</link>
<author>
<name>Elizabeth Bennet</name>
<email>[email protected]</email>
</author>
<gAcl:scope type='user' value='[email protected]'></gAcl:scope>
<gAcl:role value='http://schemas.google.com/gCal/2005#owner'>
</gAcl:role>
</entry>
<entry>
<id>http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default</id>
<updated>2007-04-21T00:52:04.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/acl/2007#accessRule'>
</category>
<title type='text'>read</title>
<content type='text'></content>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default'>
</link>
<author>
<name>Elizabeth Bennet</name>
<email>[email protected]</email>
</author>
<gAcl:scope type='default'></gAcl:scope>
<gAcl:role value='http://schemas.google.com/gCal/2005#read'>
</gAcl:role>
</entry>
</feed>"""
ACL_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gd='http://schemas.google.com/g/2005' xmlns:gCal='http://schemas.google.com/gCal/2005' xmlns:gAcl='http://schemas.google.com/acl/2007'>
<id>http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com</id>
<updated>2007-04-21T00:52:04.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/acl/2007#accessRule'>
</category>
<title type='text'>owner</title>
<content type='text'></content>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com'>
</link>
<author>
<name>Elizabeth Bennet</name>
<email>[email protected]</email>
</author>
<gAcl:scope type='user' value='[email protected]'></gAcl:scope>
<gAcl:role value='http://schemas.google.com/gCal/2005#owner'>
</gAcl:role>
</entry>"""
DOCUMENT_LIST_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<ns0:feed xmlns:ns0="http://www.w3.org/2005/Atom" xmlns:ns2="http://schemas.google.com/g/2005" xmlns:ns3="http://schemas.google.com/docs/2007"><ns1:totalResults
xmlns:ns1="http://a9.com/-/spec/opensearchrss/1.0/">2</ns1:totalResults><ns1:startIndex
xmlns:ns1="http://a9.com/-/spec/opensearchrss/1.0/">1</ns1:startIndex><ns0:entry><ns0:content
src="https://foo.com/fm?fmcmd=102&key=supercalifragilisticexpeadocious"
type="text/html"
/><ns0:author><ns0:name>test.user</ns0:name><ns0:email>[email protected]</ns0:email></ns0:author><ns0:category
label="spreadsheet" scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/docs/2007#spreadsheet"
/><ns0:id>https://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpeadocious</ns0:id><ns0:link
href="https://foo.com/ccc?key=supercalifragilisticexpeadocious" rel="alternate"
type="text/html" /><ns0:link
href="https://foo.com/feeds/worksheets/supercalifragilisticexpeadocious/private/full"
rel="http://schemas.google.com/spreadsheets/2006#worksheetsfeed"
type="application/atom+xml" /><ns0:link
href="https://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpeadocious"
rel="self" type="application/atom+xml" /><ns0:title type="text">Test Spreadsheet</ns0:title><ns0:updated>2007-07-03T18:03:32.045Z</ns0:updated>
<ns2:feedLink href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3Afoofoofoo" rel="http://schemas.google.com/acl/2007#accessControlList"/>
<ns2:resourceId>document:dfrkj84g_3348jbxpxcd</ns2:resourceId>
<ns2:lastModifiedBy>
<ns0:name>test.user</ns0:name>
<ns0:email>[email protected]</ns0:email>
</ns2:lastModifiedBy>
<ns2:lastViewed>2009-03-05T07:48:21.493Z</ns2:lastViewed>
<ns3:writersCanInvite value='true'/>
</ns0:entry><ns0:entry><ns0:content
src="http://docs.google.com/RawDocContents?action=fetch&docID=gr00vy"
type="text/html"
/><ns0:author><ns0:name>test.user</ns0:name><ns0:email>[email protected]</ns0:email></ns0:author><ns0:category
label="document" scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/docs/2007#document"
/><ns0:id>http://docs.google.com/feeds/documents/private/full/document%3Agr00vy</ns0:id><ns0:link
href="http://foobar.com/Doc?id=gr00vy" rel="alternate" type="text/html"
/><ns0:link
href="http://docs.google.com/feeds/documents/private/full/document%3Agr00vy"
rel="self" type="application/atom+xml" /><ns0:title type="text">Test Document</ns0:title><ns0:updated>2007-07-03T18:02:50.338Z</ns0:updated>
<ns2:feedLink href="http://docs.google.com/feeds/acl/private/full/document%3Afoofoofoo" rel="http://schemas.google.com/acl/2007#accessControlList"/>
<ns2:lastModifiedBy>
<ns0:name>test.user</ns0:name>
<ns0:email>[email protected]</ns0:email>
</ns2:lastModifiedBy>
<ns3:writersCanInvite value='false'/>
<ns2:lastViewed>2009-03-05T07:48:21.493Z</ns2:lastViewed>
</ns0:entry><ns0:id>http://docs.google.com/feeds/documents/private/full</ns0:id><ns0:link
href="http://docs.google.com" rel="alternate" type="text/html" /><ns0:link
href="http://docs.google.com/feeds/documents/private/full"
rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml"
/><ns0:link href="http://docs.google.com/feeds/documents/private/full"
rel="http://schemas.google.com/g/2005#post" type="application/atom+xml"
/><ns0:link href="http://docs.google.com/feeds/documents/private/full"
rel="self" type="application/atom+xml" /><ns0:title type="text">Available
Documents -
[email protected]</ns0:title><ns0:updated>2007-07-09T23:07:21.898Z</ns0:updated>
</ns0:feed>
"""
DOCUMENT_LIST_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<ns0:entry xmlns:ns0="http://www.w3.org/2005/Atom" xmlns:ns1="http://schemas.google.com/g/2005" xmlns:ns2="http://schemas.google.com/docs/2007"><ns0:content
src="https://foo.com/fm?fmcmd=102&key=supercalifragilisticexpealidocious" type="text/html"/>
<ns0:author><ns0:name>test.user</ns0:name><ns0:email>[email protected]</ns0:email></ns0:author>
<ns0:category label="spreadsheet" scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/docs/2007#spreadsheet"
/><ns0:id>https://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpealidocious</ns0:id>
<ns0:link href="https://foo.com/ccc?key=supercalifragilisticexpealidocious"
rel="alternate" type="text/html" /><ns0:link
href="https://foo.com/feeds/worksheets/supercalifragilisticexpealidocious/private/full"
rel="http://schemas.google.com/spreadsheets/2006#worksheetsfeed"
type="application/atom+xml" /><ns0:link
href="https://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpealidocious"
rel="self" type="application/atom+xml" />
<ns0:title type="text">Test Spreadsheet</ns0:title><ns0:updated>2007-07-03T18:03:32.045Z</ns0:updated>
<ns1:resourceId>spreadsheet:supercalifragilisticexpealidocious</ns1:resourceId>
<ns1:lastModifiedBy>
<ns0:name>test.user</ns0:name>
<ns0:email>[email protected]</ns0:email>
</ns1:lastModifiedBy>
<ns1:lastViewed>2009-03-05T07:48:21.493Z</ns1:lastViewed>
<ns2:writersCanInvite value='true'/>
</ns0:entry>
"""
DOCUMENT_LIST_ENTRY_V3 = """<?xml version='1.0' encoding='UTF-8'?>
<ns0:entry xmlns:ns0="http://www.w3.org/2005/Atom" xmlns:ns1="http://schemas.google.com/g/2005" xmlns:ns2="http://schemas.google.com/docs/2007"><ns0:content
src="https://foo.com/fm?fmcmd=102&key=supercalifragilisticexpealidocious" type="text/html"/>
<ns0:author><ns0:name>test.user</ns0:name><ns0:email>[email protected]</ns0:email></ns0:author>
<ns0:category label="spreadsheet" scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/docs/2007#spreadsheet"
/><ns0:id>https://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpealidocious</ns0:id>
<ns0:link href="https://foo.com/ccc?key=supercalifragilisticexpealidocious"
rel="alternate" type="text/html" /><ns0:link
href="https://foo.com/feeds/worksheets/supercalifragilisticexpealidocious/private/full"
rel="http://schemas.google.com/spreadsheets/2006#worksheetsfeed"
type="application/atom+xml" /><ns0:link
href="https://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpealidocious"
rel="self" type="application/atom+xml" />
<ns0:link rel="http://schemas.google.com/docs/2007#parent" type="application/atom+xml"
href="http://docs.google.com/feeds/default/private/full/folder%3A12345" title="AFolderName" />
<ns0:title type="text">Test Spreadsheet</ns0:title><ns0:updated>2007-07-03T18:03:32.045Z</ns0:updated>
<ns1:resourceId>spreadsheet:supercalifragilisticexpealidocious</ns1:resourceId>
<ns1:lastModifiedBy>
<ns0:name>test.user</ns0:name>
<ns0:email>[email protected]</ns0:email>
</ns1:lastModifiedBy>
<ns1:lastViewed>2009-03-05T07:48:21.493Z</ns1:lastViewed>
<ns2:writersCanInvite value='true'/>
<ns1:quotaBytesUsed>1000</ns1:quotaBytesUsed>
<ns1:feedLink rel="http://schemas.google.com/acl/2007#accessControlList" href="https://docs.google.com/feeds/default/private/full/spreadsheet%3Asupercalifragilisticexpealidocious/acl" />
<ns1:feedLink rel="http://schemas.google.com/docs/2007/revisions" href="https://docs.google.com/feeds/default/private/full/spreadsheet%3Asupercalifragilisticexpealidocious/revisions" />
<ns0:category scheme="http://schemas.google.com/g/2005/labels" term="http://schemas.google.com/g/2005/labels#starred" label="starred"/>
<ns0:category scheme="http://schemas.google.com/g/2005/labels" term="http://schemas.google.com/g/2005/labels#viewed" label="viewed"/>
<ns0:category scheme="http://schemas.google.com/g/2005/labels" term="http://schemas.google.com/g/2005/labels#hidden" label="hidden"/>
<ns0:category scheme="http://schemas.google.com/g/2005/labels" term="http://schemas.google.com/g/2005/labels#trashed" label="trashed"/>
<ns0:category scheme="http://schemas.google.com/g/2005/labels" term="http://schemas.google.com/g/2005/labels#mine" label="mine"/>
<ns0:category scheme="http://schemas.google.com/g/2005/labels" term="http://schemas.google.com/g/2005/labels#private" label="private"/>
<ns0:category scheme="http://schemas.google.com/g/2005/labels" term="http://schemas.google.com/g/2005/labels#shared-with-domain" label="shared-with-domain"/>
<ns0:category scheme="http://schemas.google.com/g/2005/labels" term="http://schemas.google.com/g/2005/labels#restricted-download" label="restricted-download"/>
</ns0:entry>
"""
DOCUMENT_LIST_ACL_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:gAcl='http://schemas.google.com/acl/2007'>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/acl/2007#accessRule'/>
<gAcl:role value='writer'/>
<gAcl:scope type='user' value='[email protected]'/>
</entry>"""
DOCUMENT_LIST_ACL_WITHKEY_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:gAcl='http://schemas.google.com/acl/2007'>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/acl/2007#accessRule'/>
<gAcl:withKey key='somekey'><gAcl:role value='writer' /></gAcl:withKey>
<gAcl:scope type='domain' value='example.com' />
</entry>"""
DOCUMENT_LIST_ACL_ADDITIONAL_ROLE_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:gAcl='http://schemas.google.com/acl/2007'>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/acl/2007#accessRule'/>
<gAcl:additionalRole value='commenter' />
<gAcl:withKey key='somekey'>
<gAcl:role value='writer' />
<gAcl:additionalRole value='commenter' />
</gAcl:withKey>
<gAcl:scope type='domain' value='example.com' />
</entry>"""
DOCUMENT_LIST_ACL_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gAcl="http://schemas.google.com/acl/2007"
xmlns:batch="http://schemas.google.com/gdata/batch">
<id>http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQYCgZpwUQ</id>
<updated>2009-02-22T03:48:25.895Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/acl/2007#accessRule"/>
<title type="text">Document Permissions</title>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQYCgZpwUQ"/>
<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQYCgZpwUQ"/>
<link rel="http://schemas.google.com/g/2005#batch" type="application/atom+xml" href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQYCgZpwUQ/batch"/>
<link rel="self" type="application/atom+xml" href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQYCgZpwUQ"/>
<openSearch:totalResults>2</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQp4pwUwUQ/user%3Auser%40gmail.com</id>
<updated>2009-02-22T03:48:25.896Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/acl/2007#accessRule"/>
<title type="text">Document Permission - [email protected]</title>
<link rel="self" type="application/atom+xml" href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQp4pwUwUQ/user%3Auser%40gmail.com"/>
<link rel="edit" type="application/atom+xml" href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQp4pwUwUQ/user%3Auser%40gmail.com"/>
<gAcl:role value="owner"/>
<gAcl:scope type="user" value="[email protected]"/>
</entry>
<entry>
<id>http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8fCgZp4pwUwUQ/user%3Auser2%40google.com</id>
<updated>2009-02-22T03:48:26.257Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/acl/2007#accessRule"/>
<title type="text">Document Permission - [email protected]</title>
<link rel="self" type="application/atom+xml" href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQYCgZp4pwUwUQ/user%3Auser2%40google.com"/>
<link rel="edit" type="application/atom+xml" href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQYCgZp4pwUwUQ/user%3Auser2%40google.com"/>
<gAcl:role value="writer"/>
<gAcl:scope type="domain" value="google.com"/>
</entry>
</feed>"""
DOCUMENT_LIST_REVISION_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/"
xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005"
xmlns:docs="http://schemas.google.com/docs/2007"
gd:etag="W/"CE4HQX08cCt7ImA9WxNTFEU."">
<id>https://docs.google.com/feeds/default/private/full/resource_id/revisions</id>
<updated>2009-08-17T04:22:10.378Z</updated>
<title>Document Revisions</title>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml"
href="https://docs.google.com/feeds/default/private/full/resource_id/revisions"/>
<link rel="http://schemas.google.com/g/2005#batch" type="application/atom+xml"
href="https://docs.google.com/feeds/default/private/full/resource_id/revisions/batch"/>
<link rel="self" type="application/atom+xml"
href="https://docs.google.com/feeds/default/private/full/resource_id/revisions"/>
<openSearch:totalResults>6</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>https://docs.google.com/feeds/id/resource_id/revisions/2</id>
<updated>2009-08-17T04:22:10.440Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-08-14T07:11:34.197Z</app:edited>
<title>Revision 2</title>
<content type="text/html" src="https://docs.google.com/feeds/download/documents/Export?docId=doc_id&revision=2"/>
<link rel="alternate" type="text/html"
href="https://docs.google.com/Doc?id=doc_id&revision=2"/>
<link rel="self" type="application/atom+xml"
href="https://docs.google.com/feeds/default/private/full/resource_id/revisions/2"/>
<link rel='http://schemas.google.com/docs/2007#publish' type='text/html' href='https://docs.google.com/View?docid=dfr4&pageview=1&hgd=1'/>
<author>
<name>another_user</name>
<email>[email protected]</email>
</author>
<docs:publish value="true"/>
<docs:publishAuto value="true"/>
<docs:publishOutsideDomain value="false"/>
</entry>
</feed>
"""
DOCUMENT_LIST_METADATA = """
<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:docs="http://schemas.google.com/docs/2007"
xmlns:gd="http://schemas.google.com/g/2005"
xmlns:gAcl="http://schemas.google.com/acl/2007"
gd:etag="W/"AkYNRnc_eSt7ImA9WxBUFks."">
<docs:additionalRoleInfo kind='document'>
<docs:additionalRoleSet primaryRole='reader'>
<gAcl:additionalRole value='commenter' />
</docs:additionalRoleSet>
</docs:additionalRoleInfo>
</entry>
"""
BATCH_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:batch="http://schemas.google.com/gdata/batch"
xmlns:g="http://base.google.com/ns/1.0">
<id>http://www.google.com/base/feeds/items/2173859253842813008</id>
<published>2006-07-11T14:51:43.560Z</published>
<updated>2006-07-11T14:51: 43.560Z</updated>
<title type="text">title</title>
<content type="html">content</content>
<link rel="self"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/2173859253842813008"/>
<link rel="edit"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/2173859253842813008"/>
<g:item_type>recipes</g:item_type>
<batch:operation type="insert"/>
<batch:id>itemB</batch:id>
<batch:status code="201" reason="Created"/>
</entry>"""
BATCH_FEED_REQUEST = """<?xml version="1.0" encoding="UTF-8"?>
<feed
xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:g="http://base.google.com/ns/1.0"
xmlns:batch="http://schemas.google.com/gdata/batch">
<title type="text">My Batch Feed</title>
<entry>
<id>http://www.google.com/base/feeds/items/13308004346459454600</id>
<batch:operation type="delete"/>
</entry>
<entry>
<id>http://www.google.com/base/feeds/items/17437536661927313949</id>
<batch:operation type="delete"/>
</entry>
<entry>
<title type="text">...</title>
<content type="html">...</content>
<batch:id>itemA</batch:id>
<batch:operation type="insert"/>
<g:item_type>recipes</g:item_type>
</entry>
<entry>
<title type="text">...</title>
<content type="html">...</content>
<batch:id>itemB</batch:id>
<batch:operation type="insert"/>
<g:item_type>recipes</g:item_type>
</entry>
</feed>"""
BATCH_FEED_RESULT = """<?xml version="1.0" encoding="UTF-8"?>
<feed
xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:g="http://base.google.com/ns/1.0"
xmlns:batch="http://schemas.google.com/gdata/batch">
<id>http://www.google.com/base/feeds/items</id>
<updated>2006-07-11T14:51:42.894Z</updated>
<title type="text">My Batch</title>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items"/>
<link rel="http://schemas.google.com/g/2005#post"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items"/>
<link rel=" http://schemas.google.com/g/2005#batch"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/batch"/>
<entry>
<id>http://www.google.com/base/feeds/items/2173859253842813008</id>
<published>2006-07-11T14:51:43.560Z</published>
<updated>2006-07-11T14:51: 43.560Z</updated>
<title type="text">...</title>
<content type="html">...</content>
<link rel="self"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/2173859253842813008"/>
<link rel="edit"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/2173859253842813008"/>
<g:item_type>recipes</g:item_type>
<batch:operation type="insert"/>
<batch:id>itemB</batch:id>
<batch:status code="201" reason="Created"/>
</entry>
<entry>
<id>http://www.google.com/base/feeds/items/11974645606383737963</id>
<published>2006-07-11T14:51:43.247Z</published>
<updated>2006-07-11T14:51: 43.247Z</updated>
<title type="text">...</title>
<content type="html">...</content>
<link rel="self"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/11974645606383737963"/>
<link rel="edit"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/11974645606383737963"/>
<g:item_type>recipes</g:item_type>
<batch:operation type="insert"/>
<batch:id>itemA</batch:id>
<batch:status code="201" reason="Created"/>
</entry>
<entry>
<id>http://www.google.com/base/feeds/items/13308004346459454600</id>
<updated>2006-07-11T14:51:42.894Z</updated>
<title type="text">Error</title>
<content type="text">Bad request</content>
<batch:status code="404"
reason="Bad request"
content-type="application/xml">
<errors>
<error type="request" reason="Cannot find item"/>
</errors>
</batch:status>
</entry>
<entry>
<id>http://www.google.com/base/feeds/items/17437536661927313949</id>
<updated>2006-07-11T14:51:43.246Z</updated>
<content type="text">Deleted</content>
<batch:operation type="delete"/>
<batch:status code="200" reason="Success"/>
</entry>
</feed>"""
ALBUM_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/" xmlns:exif="http://schemas.google.com/photos/exif/2007" xmlns:geo="http://www.w3.org/2003/01/geo/wgs84_pos#" xmlns:gml="http://www.opengis.net/gml" xmlns:georss="http://www.georss.org/georss" xmlns:photo="http://www.pheed.com/pheed/" xmlns:media="http://search.yahoo.com/mrss/" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gphoto="http://schemas.google.com/photos/2007">
<id>http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/1</id>
<updated>2007-09-21T18:23:05.000Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/photos/2007#album"/>
<title type="text">Test</title>
<subtitle type="text"/>
<rights type="text">public</rights>
<icon>http://lh6.google.com/sample.user/Rt8WNoDZEJE/AAAAAAAAABk/HQGlDhpIgWo/s160-c/Test.jpg</icon>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/1"/>
<link rel="alternate" type="text/html" href="http://picasaweb.google.com/sample.user/Test"/>
<link rel="http://schemas.google.com/photos/2007#slideshow" type="application/x-shockwave-flash" href="http://picasaweb.google.com/s/c/bin/slideshow.swf?host=picasaweb.google.com&RGB=0x000000&feed=http%3A%2F%2Fpicasaweb.google.com%2Fdata%2Ffeed%2Fapi%2Fuser%2Fsample.user%2Falbumid%2F1%3Falt%3Drss"/>
<link rel="self" type="application/atom+xml" href="http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/1?start-index=1&max-results=500&kind=photo%2Ctag"/>
<author>
<name>sample</name>
<uri>http://picasaweb.google.com/sample.user</uri>
</author>
<generator version="1.00" uri="http://picasaweb.google.com/">Picasaweb</generator> <openSearch:totalResults>4</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>500</openSearch:itemsPerPage>
<gphoto:id>1</gphoto:id>
<gphoto:name>Test</gphoto:name>
<gphoto:location/>
<gphoto:access>public</gphoto:access> <gphoto:timestamp>1188975600000</gphoto:timestamp>
<gphoto:numphotos>2</gphoto:numphotos>
<gphoto:user>sample.user</gphoto:user>
<gphoto:nickname>sample</gphoto:nickname>
<gphoto:commentingEnabled>true</gphoto:commentingEnabled>
<gphoto:commentCount>0</gphoto:commentCount>
<entry> <id>http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/2</id>
<published>2007-09-05T20:49:23.000Z</published>
<updated>2007-09-21T18:23:05.000Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/photos/2007#photo"/>
<title type="text">Aqua Blue.jpg</title>
<summary type="text">Blue</summary>
<content type="image/jpeg" src="http://lh4.google.com/sample.user/Rt8WU4DZEKI/AAAAAAAAABY/IVgLqmnzJII/Aqua%20Blue.jpg"/> <link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/1/photoid/2"/>
<link rel="alternate" type="text/html" href="http://picasaweb.google.com/sample.user/Test/photo#2"/>
<link rel="self" type="application/atom+xml" href="http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/2"/>
<gphoto:id>2</gphoto:id>
<gphoto:version>1190398985145172</gphoto:version>
<gphoto:position>0.0</gphoto:position>
<gphoto:albumid>1</gphoto:albumid> <gphoto:width>2560</gphoto:width>
<gphoto:height>1600</gphoto:height>
<gphoto:size>883405</gphoto:size>
<gphoto:client/>
<gphoto:checksum/>
<gphoto:timestamp>1189025362000</gphoto:timestamp>
<exif:tags> <exif:flash>true</exif:flash>
<exif:imageUniqueID>c041ce17aaa637eb656c81d9cf526c24</exif:imageUniqueID>
</exif:tags>
<gphoto:commentingEnabled>true</gphoto:commentingEnabled>
<gphoto:commentCount>1</gphoto:commentCount>
<media:group>
<media:title type="plain">Aqua Blue.jpg</media:title> <media:description type="plain">Blue</media:description>
<media:keywords>tag, test</media:keywords>
<media:content url="http://lh4.google.com/sample.user/Rt8WU4DZEKI/AAAAAAAAABY/IVgLqmnzJII/Aqua%20Blue.jpg" height="1600" width="2560" type="image/jpeg" medium="image"/>
<media:thumbnail url="http://lh4.google.com/sample.user/Rt8WU4DZEKI/AAAAAAAAABY/IVgLqmnzJII/s72/Aqua%20Blue.jpg" height="45" width="72"/>
<media:thumbnail url="http://lh4.google.com/sample.user/Rt8WU4DZEKI/AAAAAAAAABY/IVgLqmnzJII/s144/Aqua%20Blue.jpg" height="90" width="144"/>
<media:thumbnail url="http://lh4.google.com/sample.user/Rt8WU4DZEKI/AAAAAAAAABY/IVgLqmnzJII/s288/Aqua%20Blue.jpg" height="180" width="288"/>
<media:credit>sample</media:credit>
</media:group>
</entry>
<entry>
<id>http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/3</id>
<published>2007-09-05T20:49:24.000Z</published>
<updated>2007-09-21T18:19:38.000Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/photos/2007#photo"/>
<title type="text">Aqua Graphite.jpg</title>
<summary type="text">Gray</summary>
<content type="image/jpeg" src="http://lh5.google.com/sample.user/Rt8WVIDZELI/AAAAAAAAABg/d7e0i7gvhNU/Aqua%20Graphite.jpg"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/1/photoid/3"/>
<link rel="alternate" type="text/html" href="http://picasaweb.google.com/sample.user/Test/photo#3"/>
<link rel="self" type="application/atom+xml" href="http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/3"/>
<gphoto:id>3</gphoto:id>
<gphoto:version>1190398778006402</gphoto:version>
<gphoto:position>1.0</gphoto:position>
<gphoto:albumid>1</gphoto:albumid>
<gphoto:width>2560</gphoto:width>
<gphoto:height>1600</gphoto:height>
<gphoto:size>798334</gphoto:size>
<gphoto:client/>
<gphoto:checksum/>
<gphoto:timestamp>1189025363000</gphoto:timestamp>
<exif:tags>
<exif:flash>true</exif:flash>
<exif:imageUniqueID>a5ce2e36b9df7d3cb081511c72e73926</exif:imageUniqueID>
</exif:tags>
<gphoto:commentingEnabled>true</gphoto:commentingEnabled>
<gphoto:commentCount>0</gphoto:commentCount>
<media:group>
<media:title type="plain">Aqua Graphite.jpg</media:title>
<media:description type="plain">Gray</media:description>
<media:keywords/>
<media:content url="http://lh5.google.com/sample.user/Rt8WVIDZELI/AAAAAAAAABg/d7e0i7gvhNU/Aqua%20Graphite.jpg" height="1600" width="2560" type="image/jpeg" medium="image"/>
<media:thumbnail url="http://lh5.google.com/sample.user/Rt8WVIDZELI/AAAAAAAAABg/d7e0i7gvhNU/s72/Aqua%20Graphite.jpg" height="45" width="72"/>
<media:thumbnail url="http://lh5.google.com/sample.user/Rt8WVIDZELI/AAAAAAAAABg/d7e0i7gvhNU/s144/Aqua%20Graphite.jpg" height="90" width="144"/>
<media:thumbnail url="http://lh5.google.com/sample.user/Rt8WVIDZELI/AAAAAAAAABg/d7e0i7gvhNU/s288/Aqua%20Graphite.jpg" height="180" width="288"/>
<media:credit>sample</media:credit>
</media:group>
</entry>
<entry>
<id>http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/tag/tag</id>
<updated>2007-09-05T20:49:24.000Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/photos/2007#tag"/>
<title type="text">tag</title>
<summary type="text">tag</summary>
<link rel="alternate" type="text/html" href="http://picasaweb.google.com/lh/searchbrowse?q=tag&psc=G&uname=sample.user&filter=0"/>
<link rel="self" type="application/atom+xml" href="http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/tag/tag"/>
<author>
<name>sample</name>
<uri>http://picasaweb.google.com/sample.user</uri>
</author>
</entry>
<entry>
<id>http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/tag/test</id>
<updated>2007-09-05T20:49:24.000Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/photos/2007#tag"/>
<title type="text">test</title>
<summary type="text">test</summary>
<link rel="alternate" type="text/html" href="http://picasaweb.google.com/lh/searchbrowse?q=test&psc=G&uname=sample.user&filter=0"/>
<link rel="self" type="application/atom+xml" href="http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/tag/test"/>
<author>
<name>sample</name>
<uri>http://picasaweb.google.com/sample.user</uri>
</author>
</entry>
</feed>"""
CODE_SEARCH_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:opensearch="http://a9.com/-/spec/opensearchrss/1.0/" xmlns:gcs="http://schemas.google.com/codesearch/2006" xml:base="http://www.google.com">
<id>http://www.google.com/codesearch/feeds/search?q=malloc</id>
<updated>2007-12-19T16:08:04Z</updated>
<title type="text">Google Code Search</title>
<generator version="1.0" uri="http://www.google.com/codesearch">Google Code Search</generator>
<opensearch:totalResults>2530000</opensearch:totalResults>
<opensearch:startIndex>1</opensearch:startIndex>
<author>
<name>Google Code Search</name>
<uri>http://www.google.com/codesearch</uri>
</author>
<link rel="http://schemas.google.com/g/2006#feed" type="application/atom+xml" href="http://schemas.google.com/codesearch/2006"/>
<link rel="self" type="application/atom+xml" href="http://www.google.com/codesearch/feeds/search?q=malloc"/>
<link rel="next" type="application/atom+xml" href="http://www.google.com/codesearch/feeds/search?q=malloc&start-index=11"/>
<link rel="alternate" type="text/html" href="http://www.google.com/codesearch?q=malloc"/>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:LDjwp-Iqc7U:84hEYaYsZk8:xDGReDhvNi0&sa=N&ct=rx&cd=1&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoconf-2.60/autoconf.html-002&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoconf-2.60/autoconf.html-002#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">software/autoconf/manual/autoconf-2.60/autoconf.html</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:LDjwp-Iqc7U:84hEYaYsZk8:xDGReDhvNi0&sa=N&ct=rx&cd=1&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoconf-2.60/autoconf.html-002&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoconf-2.60/autoconf.html-002#first"/><gcs:package name="http://www.gnu.org" uri="http://www.gnu.org"></gcs:package><gcs:file name="software/autoconf/manual/autoconf-2.60/autoconf.html-002"></gcs:file><content type="text/html"><pre> 8: void *<b>malloc</b> ();
</pre></content><gcs:match lineNumber="4" type="text/html"><pre> #undef <b>malloc</b>
</pre></gcs:match><gcs:match lineNumber="8" type="text/html"><pre> void *<b>malloc</b> ();
</pre></gcs:match><gcs:match lineNumber="14" type="text/html"><pre> rpl_<b>malloc</b> (size_t n)
</pre></gcs:match><gcs:match lineNumber="18" type="text/html"><pre> return <b>malloc</b> (n);
</pre></gcs:match></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:h4hfh-fV-jI:niBq_bwWZNs:H0OhClf0HWQ&sa=N&ct=rx&cd=2&cs_p=ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz&cs_f=guile-1.6.8/libguile/mallocs.c&cs_p=ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz&cs_f=guile-1.6.8/libguile/mallocs.c#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">guile-1.6.8/libguile/mallocs.c</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:h4hfh-fV-jI:niBq_bwWZNs:H0OhClf0HWQ&sa=N&ct=rx&cd=2&cs_p=ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz&cs_f=guile-1.6.8/libguile/mallocs.c&cs_p=ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz&cs_f=guile-1.6.8/libguile/mallocs.c#first"/><gcs:package name="ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz" uri="ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz"></gcs:package><gcs:file name="guile-1.6.8/libguile/mallocs.c"></gcs:file><content type="text/html"><pre> 86: {
scm_t_bits mem = n ? (scm_t_bits) <b>malloc</b> (n) : 0;
if (n &amp;&amp; !mem)
</pre></content><gcs:match lineNumber="54" type="text/html"><pre>#include &lt;<b>malloc</b>.h&gt;
</pre></gcs:match><gcs:match lineNumber="62" type="text/html"><pre>scm_t_bits scm_tc16_<b>malloc</b>;
</pre></gcs:match><gcs:match lineNumber="66" type="text/html"><pre><b>malloc</b>_free (SCM ptr)
</pre></gcs:match><gcs:match lineNumber="75" type="text/html"><pre><b>malloc</b>_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
</pre></gcs:match><gcs:match lineNumber="77" type="text/html"><pre> scm_puts(&quot;#&lt;<b>malloc</b> &quot;, port);
</pre></gcs:match><gcs:match lineNumber="87" type="text/html"><pre> scm_t_bits mem = n ? (scm_t_bits) <b>malloc</b> (n) : 0;
</pre></gcs:match><gcs:match lineNumber="90" type="text/html"><pre> SCM_RETURN_NEWSMOB (scm_tc16_<b>malloc</b>, mem);
</pre></gcs:match><gcs:match lineNumber="98" type="text/html"><pre> scm_tc16_<b>malloc</b> = scm_make_smob_type (&quot;<b>malloc</b>&quot;, 0);
</pre></gcs:match><gcs:match lineNumber="99" type="text/html"><pre> scm_set_smob_free (scm_tc16_<b>malloc</b>, <b>malloc</b>_free);
</pre></gcs:match><rights>GPL</rights></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:9wyZUG-N_30:7_dFxoC1ZrY:C0_iYbFj90M&sa=N&ct=rx&cd=3&cs_p=http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz&cs_f=bash-3.0/lib/malloc/alloca.c&cs_p=http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz&cs_f=bash-3.0/lib/malloc/alloca.c#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">bash-3.0/lib/malloc/alloca.c</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:9wyZUG-N_30:7_dFxoC1ZrY:C0_iYbFj90M&sa=N&ct=rx&cd=3&cs_p=http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz&cs_f=bash-3.0/lib/malloc/alloca.c&cs_p=http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz&cs_f=bash-3.0/lib/malloc/alloca.c#first"/><gcs:package name="http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz" uri="http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz"></gcs:package><gcs:file name="bash-3.0/lib/malloc/alloca.c"></gcs:file><content type="text/html"><pre> 78: #ifndef emacs
#define <b>malloc</b> x<b>malloc</b>
extern pointer x<b>malloc</b> ();
</pre></content><gcs:match lineNumber="69" type="text/html"><pre> <b>malloc</b>. The Emacs executable needs alloca to call x<b>malloc</b>, because
</pre></gcs:match><gcs:match lineNumber="70" type="text/html"><pre> ordinary <b>malloc</b> isn&#39;t protected from input signals. On the other
</pre></gcs:match><gcs:match lineNumber="71" type="text/html"><pre> hand, the utilities in lib-src need alloca to call <b>malloc</b>; some of
</pre></gcs:match><gcs:match lineNumber="72" type="text/html"><pre> them are very simple, and don&#39;t have an x<b>malloc</b> routine.
</pre></gcs:match><gcs:match lineNumber="76" type="text/html"><pre> Callers below should use <b>malloc</b>. */
</pre></gcs:match><gcs:match lineNumber="79" type="text/html"><pre>#define <b>malloc</b> x<b>malloc</b>
</pre></gcs:match><gcs:match lineNumber="80" type="text/html"><pre>extern pointer x<b>malloc</b> ();
</pre></gcs:match><gcs:match lineNumber="132" type="text/html"><pre> It is very important that sizeof(header) agree with <b>malloc</b>
</pre></gcs:match><gcs:match lineNumber="198" type="text/html"><pre> register pointer new = <b>malloc</b> (sizeof (header) + size);
</pre></gcs:match><rights>GPL</rights></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:uhVCKyPcT6k:8juMxxzmUJw:H7_IDsTB2L4&sa=N&ct=rx&cd=4&cs_p=http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2&cs_f=mozilla/xpcom/build/malloc.c&cs_p=http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2&cs_f=mozilla/xpcom/build/malloc.c#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">mozilla/xpcom/build/malloc.c</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:uhVCKyPcT6k:8juMxxzmUJw:H7_IDsTB2L4&sa=N&ct=rx&cd=4&cs_p=http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2&cs_f=mozilla/xpcom/build/malloc.c&cs_p=http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2&cs_f=mozilla/xpcom/build/malloc.c#first"/><gcs:package name="http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2" uri="http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2"></gcs:package><gcs:file name="mozilla/xpcom/build/malloc.c"></gcs:file><content type="text/html"><pre> 54: http://gee.cs.oswego.edu/dl/html/<b>malloc</b>.html
You may already by default be using a c library containing a <b>malloc</b>
</pre></content><gcs:match lineNumber="4" type="text/html"><pre>/* ---------- To make a <b>malloc</b>.h, start cutting here ------------ */
</pre></gcs:match><gcs:match lineNumber="22" type="text/html"><pre> Note: There may be an updated version of this <b>malloc</b> obtainable at
</pre></gcs:match><gcs:match lineNumber="23" type="text/html"><pre> ftp://gee.cs.oswego.edu/pub/misc/<b>malloc</b>.c
</pre></gcs:match><gcs:match lineNumber="34" type="text/html"><pre>* Why use this <b>malloc</b>?
</pre></gcs:match><gcs:match lineNumber="37" type="text/html"><pre> most tunable <b>malloc</b> ever written. However it is among the fastest
</pre></gcs:match><gcs:match lineNumber="40" type="text/html"><pre> allocator for <b>malloc</b>-intensive programs.
</pre></gcs:match><gcs:match lineNumber="54" type="text/html"><pre> http://gee.cs.oswego.edu/dl/html/<b>malloc</b>.html
</pre></gcs:match><gcs:match lineNumber="56" type="text/html"><pre> You may already by default be using a c library containing a <b>malloc</b>
</pre></gcs:match><gcs:match lineNumber="57" type="text/html"><pre> that is somehow based on some version of this <b>malloc</b> (for example in
</pre></gcs:match><rights>Mozilla</rights></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:4n1P2HVOISs:Ybbpph0wR2M:OhIN_sDrG0U&sa=N&ct=rx&cd=5&cs_p=http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz&cs_f=hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh&cs_p=http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz&cs_f=hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:4n1P2HVOISs:Ybbpph0wR2M:OhIN_sDrG0U&sa=N&ct=rx&cd=5&cs_p=http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz&cs_f=hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh&cs_p=http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz&cs_f=hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh#first"/><gcs:package name="http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz" uri="http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz"></gcs:package><gcs:file name="hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh"></gcs:file><content type="text/html"><pre> 11: echo ================ unit-must-<b>malloc</b> tests ================
./unit-must-<b>malloc</b>
echo ...passed
</pre></content><gcs:match lineNumber="2" type="text/html"><pre># tag: Tom Lord Tue Dec 4 14:54:29 2001 (mem-tests/unit-must-<b>malloc</b>.sh)
</pre></gcs:match><gcs:match lineNumber="11" type="text/html"><pre>echo ================ unit-must-<b>malloc</b> tests ================
</pre></gcs:match><gcs:match lineNumber="12" type="text/html"><pre>./unit-must-<b>malloc</b>
</pre></gcs:match><rights>GPL</rights></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:GzkwiWG266M:ykuz3bG00ws:2sTvVSif08g&sa=N&ct=rx&cd=6&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2&cs_f=tar-1.14/lib/malloc.c&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2&cs_f=tar-1.14/lib/malloc.c#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">tar-1.14/lib/malloc.c</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:GzkwiWG266M:ykuz3bG00ws:2sTvVSif08g&sa=N&ct=rx&cd=6&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2&cs_f=tar-1.14/lib/malloc.c&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2&cs_f=tar-1.14/lib/malloc.c#first"/><gcs:package name="http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2" uri="http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2"></gcs:package><gcs:file name="tar-1.14/lib/malloc.c"></gcs:file><content type="text/html"><pre> 22: #endif
#undef <b>malloc</b>
</pre></content><gcs:match lineNumber="1" type="text/html"><pre>/* Work around bug on some systems where <b>malloc</b> (0) fails.
</pre></gcs:match><gcs:match lineNumber="23" type="text/html"><pre>#undef <b>malloc</b>
</pre></gcs:match><gcs:match lineNumber="31" type="text/html"><pre>rpl_<b>malloc</b> (size_t n)
</pre></gcs:match><gcs:match lineNumber="35" type="text/html"><pre> return <b>malloc</b> (n);
</pre></gcs:match><rights>GPL</rights></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:o_TFIeBY6dY:ktI_dt8wPao:AI03BD1Dz0Y&sa=N&ct=rx&cd=7&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz&cs_f=tar-1.16.1/lib/malloc.c&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz&cs_f=tar-1.16.1/lib/malloc.c#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">tar-1.16.1/lib/malloc.c</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:o_TFIeBY6dY:ktI_dt8wPao:AI03BD1Dz0Y&sa=N&ct=rx&cd=7&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz&cs_f=tar-1.16.1/lib/malloc.c&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz&cs_f=tar-1.16.1/lib/malloc.c#first"/><gcs:package name="http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz" uri="http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz"></gcs:package><gcs:file name="tar-1.16.1/lib/malloc.c"></gcs:file><content type="text/html"><pre> 21: #include &lt;config.h&gt;
#undef <b>malloc</b>
</pre></content><gcs:match lineNumber="1" type="text/html"><pre>/* <b>malloc</b>() function that is glibc compatible.
</pre></gcs:match><gcs:match lineNumber="22" type="text/html"><pre>#undef <b>malloc</b>
</pre></gcs:match><gcs:match lineNumber="30" type="text/html"><pre>rpl_<b>malloc</b> (size_t n)
</pre></gcs:match><gcs:match lineNumber="34" type="text/html"><pre> return <b>malloc</b> (n);
</pre></gcs:match><rights>GPL</rights></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:_ibw-VLkMoI:jBOtIJSmFd4:-0NUEVeCwfY&sa=N&ct=rx&cd=8&cs_p=http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2&cs_f=uClibc-0.9.29/include/malloc.h&cs_p=http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2&cs_f=uClibc-0.9.29/include/malloc.h#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">uClibc-0.9.29/include/malloc.h</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:_ibw-VLkMoI:jBOtIJSmFd4:-0NUEVeCwfY&sa=N&ct=rx&cd=8&cs_p=http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2&cs_f=uClibc-0.9.29/include/malloc.h&cs_p=http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2&cs_f=uClibc-0.9.29/include/malloc.h#first"/><gcs:package name="http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2" uri="http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2"></gcs:package><gcs:file name="uClibc-0.9.29/include/malloc.h"></gcs:file><content type="text/html"><pre> 1: /* Prototypes and definition for <b>malloc</b> implementation.
Copyright (C) 1996, 1997, 1999, 2000 Free Software Foundation, Inc.
</pre></content><gcs:match lineNumber="1" type="text/html"><pre>/* Prototypes and definition for <b>malloc</b> implementation.
</pre></gcs:match><gcs:match lineNumber="26" type="text/html"><pre> `pt<b>malloc</b>&#39;, a <b>malloc</b> implementation for multiple threads without
</pre></gcs:match><gcs:match lineNumber="28" type="text/html"><pre> See the files `pt<b>malloc</b>.c&#39; or `COPYRIGHT&#39; for copying conditions.
</pre></gcs:match><gcs:match lineNumber="32" type="text/html"><pre> This work is mainly derived from <b>malloc</b>-2.6.4 by Doug Lea
</pre></gcs:match><gcs:match lineNumber="35" type="text/html"><pre> ftp://g.oswego.edu/pub/misc/<b>malloc</b>.c
</pre></gcs:match><gcs:match lineNumber="40" type="text/html"><pre> `pt<b>malloc</b>.c&#39;.
</pre></gcs:match><gcs:match lineNumber="45" type="text/html"><pre># define __<b>malloc</b>_ptr_t void *
</pre></gcs:match><gcs:match lineNumber="51" type="text/html"><pre># define __<b>malloc</b>_ptr_t char *
</pre></gcs:match><gcs:match lineNumber="56" type="text/html"><pre># define __<b>malloc</b>_size_t size_t
</pre></gcs:match><rights>LGPL</rights></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:F6qHcZ9vefo:bTX7o9gKfks:hECF4r_eKC0&sa=N&ct=rx&cd=9&cs_p=http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz&cs_f=glibc-2.0.1/hurd/hurdmalloc.h&cs_p=http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz&cs_f=glibc-2.0.1/hurd/hurdmalloc.h#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">glibc-2.0.1/hurd/hurdmalloc.h</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:F6qHcZ9vefo:bTX7o9gKfks:hECF4r_eKC0&sa=N&ct=rx&cd=9&cs_p=http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz&cs_f=glibc-2.0.1/hurd/hurdmalloc.h&cs_p=http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz&cs_f=glibc-2.0.1/hurd/hurdmalloc.h#first"/><gcs:package name="http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz" uri="http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz"></gcs:package><gcs:file name="glibc-2.0.1/hurd/hurdmalloc.h"></gcs:file><content type="text/html"><pre> 15: #define <b>malloc</b> _hurd_<b>malloc</b>
#define realloc _hurd_realloc
</pre></content><gcs:match lineNumber="3" type="text/html"><pre> All hurd-internal code which uses <b>malloc</b> et al includes this file so it
</pre></gcs:match><gcs:match lineNumber="4" type="text/html"><pre> will use the internal <b>malloc</b> routines _hurd_{<b>malloc</b>,realloc,free}
</pre></gcs:match><gcs:match lineNumber="7" type="text/html"><pre> of <b>malloc</b> et al is the unixoid one using sbrk.
</pre></gcs:match><gcs:match lineNumber="11" type="text/html"><pre>extern void *_hurd_<b>malloc</b> (size_t);
</pre></gcs:match><gcs:match lineNumber="15" type="text/html"><pre>#define <b>malloc</b> _hurd_<b>malloc</b>
</pre></gcs:match><rights>GPL</rights></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:CHUvHYzyLc8:pdcAfzDA6lY:wjofHuNLTHg&sa=N&ct=rx&cd=10&cs_p=ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2&cs_f=httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h&cs_p=ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2&cs_f=httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:CHUvHYzyLc8:pdcAfzDA6lY:wjofHuNLTHg&sa=N&ct=rx&cd=10&cs_p=ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2&cs_f=httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h&cs_p=ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2&cs_f=httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h#first"/><gcs:package name="ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2" uri="ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2"></gcs:package><gcs:file name="httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h"></gcs:file><content type="text/html"><pre> 173: #undef <b>malloc</b>
#define <b>malloc</b>(x) library_<b>malloc</b>(gLibHandle,x)
</pre></content><gcs:match lineNumber="170" type="text/html"><pre>/* Redefine <b>malloc</b> to use the library <b>malloc</b> call so
</pre></gcs:match><gcs:match lineNumber="173" type="text/html"><pre>#undef <b>malloc</b>
</pre></gcs:match><gcs:match lineNumber="174" type="text/html"><pre>#define <b>malloc</b>(x) library_<b>malloc</b>(gLibHandle,x)
</pre></gcs:match><rights>Apache</rights></entry>
</feed>"""
YOUTUBE_VIDEO_FEED = """<?xml version='1.0' encoding='UTF-8'?><feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gml='http://www.opengis.net/gml' xmlns:georss='http://www.georss.org/georss' xmlns:media='http://search.yahoo.com/mrss/' xmlns:yt='http://gdata.youtube.com/schemas/2007' xmlns:gd='http://schemas.google.com/g/2005'><id>http://gdata.youtube.com/feeds/api/standardfeeds/top_rated</id><updated>2008-05-14T02:24:07.000-07:00</updated><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#video'/><title type='text'>Top Rated</title><logo>http://www.youtube.com/img/pic_youtubelogo_123x63.gif</logo><link rel='alternate' type='text/html' href='http://www.youtube.com/browse?s=tr'/><link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/standardfeeds/top_rated'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/standardfeeds/top_rated?start-index=1&max-results=25'/><link rel='next' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/standardfeeds/top_rated?start-index=26&max-results=25'/><author><name>YouTube</name><uri>http://www.youtube.com/</uri></author><generator version='beta' uri='http://gdata.youtube.com/'>YouTube data API</generator><openSearch:totalResults>100</openSearch:totalResults><openSearch:startIndex>1</openSearch:startIndex><openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry><id>http://gdata.youtube.com/feeds/api/videos/C71ypXYGho8</id><published>2008-03-20T10:17:27.000-07:00</published><updated>2008-05-14T04:26:37.000-07:00</updated><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='karyn'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='garcia'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='me'/><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#video'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='boyfriend'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='por'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='te'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='odeio'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='amar'/><category scheme='http://gdata.youtube.com/schemas/2007/categories.cat' term='Music' label='Music'/><title type='text'>Me odeio por te amar - KARYN GARCIA</title><content type='text'>http://www.karyngarcia.com.br</content><link rel='alternate' type='text/html' href='http://www.youtube.com/watch?v=C71ypXYGho8'/><link rel='http://gdata.youtube.com/schemas/2007#video.related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/videos/C71ypXYGho8/related'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/standardfeeds/top_rated/C71ypXYGho8'/><author><name>TvKarynGarcia</name><uri>http://gdata.youtube.com/feeds/api/users/tvkaryngarcia</uri></author><media:group><media:title type='plain'>Me odeio por te amar - KARYN GARCIA</media:title><media:description type='plain'>http://www.karyngarcia.com.br</media:description><media:keywords>amar, boyfriend, garcia, karyn, me, odeio, por, te</media:keywords><yt:duration seconds='203'/><media:category label='Music' scheme='http://gdata.youtube.com/schemas/2007/categories.cat'>Music</media:category><media:category label='test111' scheme='http://gdata.youtube.com/schemas/2007/developertags.cat'>test111</media:category><media:category label='test222' scheme='http://gdata.youtube.com/schemas/2007/developertags.cat'>test222</media:category><media:content url='http://www.youtube.com/v/C71ypXYGho8' type='application/x-shockwave-flash' medium='video' isDefault='true' expression='full' duration='203' yt:format='5'/><media:content url='rtsp://rtsp2.youtube.com/ChoLENy73wIaEQmPhgZ2pXK9CxMYDSANFEgGDA==/0/0/0/video.3gp' type='video/3gpp' medium='video' expression='full' duration='203' yt:format='1'/><media:content url='rtsp://rtsp2.youtube.com/ChoLENy73wIaEQmPhgZ2pXK9CxMYESARFEgGDA==/0/0/0/video.3gp' type='video/3gpp' medium='video' expression='full' duration='203' yt:format='6'/><media:player url='http://www.youtube.com/watch?v=C71ypXYGho8'/><media:thumbnail url='http://img.youtube.com/vi/C71ypXYGho8/2.jpg' height='97' width='130' time='00:01:41.500'/><media:thumbnail url='http://img.youtube.com/vi/C71ypXYGho8/1.jpg' height='97' width='130' time='00:00:50.750'/><media:thumbnail url='http://img.youtube.com/vi/C71ypXYGho8/3.jpg' height='97' width='130' time='00:02:32.250'/><media:thumbnail url='http://img.youtube.com/vi/C71ypXYGho8/0.jpg' height='240' width='320' time='00:01:41.500'/></media:group><yt:statistics viewCount='138864' favoriteCount='2474'/><gd:rating min='1' max='5' numRaters='4626' average='4.95'/><gd:comments><gd:feedLink href='http://gdata.youtube.com/feeds/api/videos/C71ypXYGho8/comments' countHint='27'/></gd:comments></entry>
<entry><id>http://gdata.youtube.com/feeds/api/videos/gsVaTyb1tBw</id><published>2008-02-15T04:31:45.000-08:00</published><updated>2008-05-14T05:09:42.000-07:00</updated><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='extreme'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='cam'/><category scheme='http://gdata.youtube.com/schemas/2007/categories.cat' term='Sports' label='Sports'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='alcala'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='kani'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='helmet'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='campillo'/><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#video'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='pato'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='dirt'/><title type='text'>extreme helmet cam Kani, Keil and Pato</title><content type='text'>trimmed</content><link rel='alternate' type='text/html' href='http://www.youtube.com/watch?v=gsVaTyb1tBw'/><link rel='http://gdata.youtube.com/schemas/2007#video.responses' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/videos/gsVaTyb1tBw/responses'/><link rel='http://gdata.youtube.com/schemas/2007#video.related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/videos/gsVaTyb1tBw/related'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/standardfeeds/recently_featured/gsVaTyb1tBw'/><author><name>peraltamagic</name><uri>http://gdata.youtube.com/feeds/api/users/peraltamagic</uri></author><media:group><media:title type='plain'>extreme helmet cam Kani, Keil and Pato</media:title><media:description type='plain'>trimmed</media:description><media:keywords>alcala, cam, campillo, dirt, extreme, helmet, kani, pato</media:keywords><yt:duration seconds='31'/><media:category label='Sports' scheme='http://gdata.youtube.com/schemas/2007/categories.cat'>Sports</media:category><media:content url='http://www.youtube.com/v/gsVaTyb1tBw' type='application/x-shockwave-flash' medium='video' isDefault='true' expression='full' duration='31' yt:format='5'/><media:content url='rtsp://rtsp2.youtube.com/ChoLENy73wIaEQkctPUmT1rFghMYDSANFEgGDA==/0/0/0/video.3gp' type='video/3gpp' medium='video' expression='full' duration='31' yt:format='1'/><media:content url='rtsp://rtsp2.youtube.com/ChoLENy73wIaEQkctPUmT1rFghMYESARFEgGDA==/0/0/0/video.3gp' type='video/3gpp' medium='video' expression='full' duration='31' yt:format='6'/><media:player url='http://www.youtube.com/watch?v=gsVaTyb1tBw'/><media:thumbnail url='http://img.youtube.com/vi/gsVaTyb1tBw/2.jpg' height='97' width='130' time='00:00:15.500'/><media:thumbnail url='http://img.youtube.com/vi/gsVaTyb1tBw/1.jpg' height='97' width='130' time='00:00:07.750'/><media:thumbnail url='http://img.youtube.com/vi/gsVaTyb1tBw/3.jpg' height='97' width='130' time='00:00:23.250'/><media:thumbnail url='http://img.youtube.com/vi/gsVaTyb1tBw/0.jpg' height='240' width='320' time='00:00:15.500'/></media:group><yt:statistics viewCount='489941' favoriteCount='561'/><gd:rating min='1' max='5' numRaters='1255' average='4.11'/><gd:comments><gd:feedLink href='http://gdata.youtube.com/feeds/api/videos/gsVaTyb1tBw/comments' countHint='1116'/></gd:comments></entry>
</feed>"""
YOUTUBE_ENTRY_PRIVATE = """<?xml version='1.0' encoding='utf-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:media='http://search.yahoo.com/mrss/'
xmlns:gd='http://schemas.google.com/g/2005'
xmlns:yt='http://gdata.youtube.com/schemas/2007'
xmlns:gml='http://www.opengis.net/gml'
xmlns:georss='http://www.georss.org/georss'
xmlns:app='http://purl.org/atom/app#'>
<id>http://gdata.youtube.com/feeds/videos/UMFI1hdm96E</id>
<published>2007-01-07T01:50:15.000Z</published>
<updated>2007-01-07T01:50:15.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://gdata.youtube.com/schemas/2007#video' />
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat'
term='barkley' />
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat'
term='singing' />
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat'
term='acoustic' />
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat'
term='cover' />
<category scheme='http://gdata.youtube.com/schemas/2007/categories.cat'
term='Music' label='Music' />
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat'
term='gnarls' />
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat'
term='music' />
<title type='text'>"Crazy (Gnarles Barkley)" - Acoustic Cover</title>
<content type='html'><div style="color: #000000;font-family:
Arial, Helvetica, sans-serif; font-size:12px; font-size: 12px;
width: 555px;"><table cellspacing="0" cellpadding="0"
border="0"><tbody><tr><td width="140"
valign="top" rowspan="2"><div style="border: 1px solid
#999999; margin: 0px 10px 5px 0px;"><a
href="http://www.youtube.com/watch?v=UMFI1hdm96E"><img
alt=""
src="http://img.youtube.com/vi/UMFI1hdm96E/2.jpg"></a></div></td>
<td width="256" valign="top"><div style="font-size:
12px; font-weight: bold;"><a style="font-size: 15px;
font-weight: bold; font-decoration: none;"
href="http://www.youtube.com/watch?v=UMFI1hdm96E">&quot;Crazy
(Gnarles Barkley)&quot; - Acoustic Cover</a>
<br></div> <div style="font-size: 12px; margin:
3px 0px;"><span>Gnarles Barkley acoustic cover
http://www.myspace.com/davidchoimusic</span></div></td>
<td style="font-size: 11px; line-height: 1.4em; padding-left:
20px; padding-top: 1px;" width="146"
valign="top"><div><span style="color: #666666;
font-size: 11px;">From:</span> <a
href="http://www.youtube.com/profile?user=davidchoimusic">davidchoimusic</a></div>
<div><span style="color: #666666; font-size:
11px;">Views:</span> 113321</div> <div
style="white-space: nowrap;text-align: left"><img
style="border: 0px none; margin: 0px; padding: 0px;
vertical-align: middle; font-size: 11px;" align="top" alt=""
src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif">
<img style="border: 0px none; margin: 0px; padding: 0px;
vertical-align: middle; font-size: 11px;" align="top" alt=""
src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif">
<img style="border: 0px none; margin: 0px; padding: 0px;
vertical-align: middle; font-size: 11px;" align="top" alt=""
src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif">
<img style="border: 0px none; margin: 0px; padding: 0px;
vertical-align: middle; font-size: 11px;" align="top" alt=""
src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif">
<img style="border: 0px none; margin: 0px; padding: 0px;
vertical-align: middle; font-size: 11px;" align="top" alt=""
src="http://gdata.youtube.com/static/images/icn_star_half_11x11.gif"></div>
<div style="font-size: 11px;">1005 <span style="color:
#666666; font-size:
11px;">ratings</span></div></td></tr>
<tr><td><span style="color: #666666; font-size:
11px;">Time:</span> <span style="color: #000000;
font-size: 11px; font-weight:
bold;">04:15</span></td> <td style="font-size:
11px; padding-left: 20px;"><span style="color: #666666;
font-size: 11px;">More in</span> <a
href="http://www.youtube.com/categories_portal?c=10">Music</a></td></tr></tbody></table></div></content>
<link rel='self' type='application/atom+xml'
href='http://gdata.youtube.com/feeds/videos/UMFI1hdm96E' />
<link rel='alternate' type='text/html'
href='http://www.youtube.com/watch?v=UMFI1hdm96E' />
<link rel='http://gdata.youtube.com/schemas/2007#video.responses'
type='application/atom+xml'
href='http://gdata.youtube.com/feeds/videos/UMFI1hdm96E/responses' />
<link rel='http://gdata.youtube.com/schemas/2007#video.related'
type='application/atom+xml'
href='http://gdata.youtube.com/feeds/videos/UMFI1hdm96E/related' />
<author>
<name>davidchoimusic</name>
<uri>http://gdata.youtube.com/feeds/users/davidchoimusic</uri>
</author>
<media:group>
<media:title type='plain'>"Crazy (Gnarles Barkley)" - Acoustic Cover</media:title>
<media:description type='plain'>Gnarles Barkley acoustic cover http://www.myspace.com/davidchoimusic</media:description>
<media:keywords>music, singing, gnarls, barkley, acoustic, cover</media:keywords>
<yt:duration seconds='255' />
<media:category label='Music'
scheme='http://gdata.youtube.com/schemas/2007/categories.cat'>
Music</media:category>
<media:category
scheme='http://gdata.youtube.com/schemas/2007/developertags.cat'>
DeveloperTag1</media:category>
<media:content url='http://www.youtube.com/v/UMFI1hdm96E'
type='application/x-shockwave-flash' medium='video'
isDefault='true' expression='full' duration='255'
yt:format='5' />
<media:player url='http://www.youtube.com/watch?v=UMFI1hdm96E' />
<media:thumbnail url='http://img.youtube.com/vi/UMFI1hdm96E/2.jpg'
height='97' width='130' time='00:02:07.500' />
<media:thumbnail url='http://img.youtube.com/vi/UMFI1hdm96E/1.jpg'
height='97' width='130' time='00:01:03.750' />
<media:thumbnail url='http://img.youtube.com/vi/UMFI1hdm96E/3.jpg'
height='97' width='130' time='00:03:11.250' />
<media:thumbnail url='http://img.youtube.com/vi/UMFI1hdm96E/0.jpg'
height='240' width='320' time='00:02:07.500' />
<yt:private />
</media:group>
<yt:statistics viewCount='113321' />
<gd:rating min='1' max='5' numRaters='1005' average='4.77' />
<georss:where>
<gml:Point>
<gml:pos>37.398529052734375 -122.0635986328125</gml:pos>
</gml:Point>
</georss:where>
<gd:comments>
<gd:feedLink href='http://gdata.youtube.com/feeds/videos/UMFI1hdm96E/comments' />
</gd:comments>
<yt:noembed />
<app:control>
<app:draft>yes</app:draft>
<yt:state
name="rejected"
reasonCode="inappropriate"
helpUrl="http://www.youtube.com/t/community_guidelines">
The content of this video may violate the terms of use.</yt:state>
</app:control>
</entry>"""
YOUTUBE_COMMENT_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'><id>http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments</id><updated>2008-05-19T21:45:45.261Z</updated><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#comment'/><title type='text'>Comments</title><logo>http://www.youtube.com/img/pic_youtubelogo_123x63.gif</logo><link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU'/><link rel='alternate' type='text/html' href='http://www.youtube.com/watch?v=2Idhz9ef5oU'/><link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments?start-index=1&max-results=25'/><author><name>YouTube</name><uri>http://www.youtube.com/</uri></author><generator version='beta' uri='http://gdata.youtube.com/'>YouTube data API</generator><openSearch:totalResults>0</openSearch:totalResults><openSearch:startIndex>1</openSearch:startIndex><openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/91F809A3DE2EB81B</id>
<published>2008-02-22T15:27:15.000-08:00</published><updated>2008-02-22T15:27:15.000-08:00</updated>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#comment'/>
<title type='text'>test66</title>
<content type='text'>test66</content>
<link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU'/>
<link rel='alternate' type='text/html' href='http://www.youtube.com/watch?v=2Idhz9ef5oU'/>
<link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/91F809A3DE2EB81B'/>
<author><name>apitestjhartmann</name><uri>http://gdata.youtube.com/feeds/users/apitestjhartmann</uri></author>
</entry>
<entry>
<id>http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/A261AEEFD23674AA</id>
<published>2008-02-22T15:27:01.000-08:00</published><updated>2008-02-22T15:27:01.000-08:00</updated>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#comment'/>
<title type='text'>test333</title>
<content type='text'>test333</content>
<link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU'/>
<link rel='alternate' type='text/html' href='http://www.youtube.com/watch?v=2Idhz9ef5oU'/>
<link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/A261AEEFD23674AA'/>
<author><name>apitestjhartmann</name><uri>http://gdata.youtube.com/feeds/users/apitestjhartmann</uri></author>
</entry>
<entry>
<id>http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/0DCF1E3531B3FF85</id>
<published>2008-02-22T15:11:06.000-08:00</published><updated>2008-02-22T15:11:06.000-08:00</updated>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#comment'/>
<title type='text'>test2</title>
<content type='text'>test2</content>
<link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU'/>
<link rel='alternate' type='text/html' href='http://www.youtube.com/watch?v=2Idhz9ef5oU'/>
<link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/0DCF1E3531B3FF85'/>
<author><name>apitestjhartmann</name><uri>http://gdata.youtube.com/feeds/users/apitestjhartmann</uri></author>
</entry>
</feed>"""
YOUTUBE_PLAYLIST_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:media='http://search.yahoo.com/mrss/'
xmlns:yt='http://gdata.youtube.com/schemas/2007'
xmlns:gd='http://schemas.google.com/g/2005'>
<id>http://gdata.youtube.com/feeds/users/andyland74/playlists?start-index=1&max-results=25</id>
<updated>2008-02-26T00:26:15.635Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#playlistLink'/>
<title type='text'>andyland74's Playlists</title>
<logo>http://www.youtube.com/img/pic_youtubelogo_123x63.gif</logo>
<link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/andyland74'/>
<link rel='alternate' type='text/html' href='http://www.youtube.com/profile_play_list?user=andyland74'/>
<link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/andyland74/playlists'/>
<link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/andyland74/playlists?start-index=1&max-results=25'/>
<author>
<name>andyland74</name>
<uri>http://gdata.youtube.com/feeds/users/andyland74</uri>
</author>
<generator version='beta' uri='http://gdata.youtube.com/'>YouTube data API</generator>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<yt:description>My new playlist Description</yt:description>
<gd:feedLink rel='http://gdata.youtube.com/schemas/2007#playlist' href='http://gdata.youtube.com/feeds/playlists/8BCDD04DE8F771B2'/>
<id>http://gdata.youtube.com/feeds/users/andyland74/playlists/8BCDD04DE8F771B2</id>
<published>2007-11-04T17:30:27.000-08:00</published>
<updated>2008-02-22T09:55:14.000-08:00</updated>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#playlistLink'/>
<title type='text'>My New Playlist Title</title>
<content type='text'>My new playlist Description</content>
<link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/andyland74'/>
<link rel='alternate' type='text/html' href='http://www.youtube.com/view_play_list?p=8BCDD04DE8F771B2'/>
<link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/andyland74/playlists/8BCDD04DE8F771B2'/>
<author>
<name>andyland74</name>
<uri>http://gdata.youtube.com/feeds/users/andyland74</uri>
</author>
</entry>
</feed>"""
YOUTUBE_PLAYLIST_VIDEO_FEED = """<?xml version='1.0' encoding='UTF-8'?><feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gml='http://www.opengis.net/gml' xmlns:georss='http://www.georss.org/georss' xmlns:media='http://search.yahoo.com/mrss/' xmlns:yt='http://gdata.youtube.com/schemas/2007' xmlns:gd='http://schemas.google.com/g/2005'><id>http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B505</id><updated>2008-05-16T12:03:17.000-07:00</updated><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#playlist'/><category scheme='http://gdata.youtube.com/schemas/2007/tags.cat' term='videos'/><category scheme='http://gdata.youtube.com/schemas/2007/tags.cat' term='python'/><title type='text'>Test Playlist</title><subtitle type='text'>Test playlist 1</subtitle><logo>http://www.youtube.com/img/pic_youtubelogo_123x63.gif</logo><link rel='alternate' type='text/html' href='http://www.youtube.com/view_play_list?p=BCB3BB96DF51B505'/><link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B505'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B505?start-index=1&max-results=25'/><author><name>gdpython</name><uri>http://gdata.youtube.com/feeds/api/users/gdpython</uri></author><generator version='beta' uri='http://gdata.youtube.com/'>YouTube data API</generator><openSearch:totalResults>1</openSearch:totalResults><openSearch:startIndex>1</openSearch:startIndex><openSearch:itemsPerPage>25</openSearch:itemsPerPage><media:group><media:title type='plain'>Test Playlist</media:title><media:description type='plain'>Test playlist 1</media:description><media:content url='http://www.youtube.com/ep.swf?id=BCB3BB96DF51B505' type='application/x-shockwave-flash' yt:format='5'/></media:group><entry><id>http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B505/B0F29389E537F888</id><updated>2008-05-16T20:54:08.520Z</updated><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#playlist'/><title type='text'>Uploading YouTube Videos with the PHP Client Library</title><content type='text'>Jochen Hartmann demonstrates the basics of how to use the PHP Client Library with the YouTube Data API.
PHP Developer's Guide:
http://code.google.com/apis/youtube/developers_guide_php.html
Other documentation:
http://code.google.com/apis/youtube/</content><link rel='alternate' type='text/html' href='http://www.youtube.com/watch?v=iIp7OnHXBlo'/><link rel='http://gdata.youtube.com/schemas/2007#video.responses' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/videos/iIp7OnHXBlo/responses'/><link rel='http://gdata.youtube.com/schemas/2007#video.related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/videos/iIp7OnHXBlo/related'/><link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/videos/iIp7OnHXBlo'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B505/B0F29389E537F888'/><author><name>GoogleDevelopers</name><uri>http://gdata.youtube.com/feeds/api/users/googledevelopers</uri></author><media:group><media:title type='plain'>Uploading YouTube Videos with the PHP Client Library</media:title><media:description type='plain'>Jochen Hartmann demonstrates the basics of how to use the PHP Client Library with the YouTube Data API.
PHP Developer's Guide:
http://code.google.com/apis/youtube/developers_guide_php.html
Other documentation:
http://code.google.com/apis/youtube/</media:description><media:keywords>api, data, demo, php, screencast, tutorial, uploading, walkthrough, youtube</media:keywords><yt:duration seconds='466'/><media:category label='Education' scheme='http://gdata.youtube.com/schemas/2007/categories.cat'>Education</media:category><media:content url='http://www.youtube.com/v/iIp7OnHXBlo' type='application/x-shockwave-flash' medium='video' isDefault='true' expression='full' duration='466' yt:format='5'/><media:content url='rtsp://rtsp2.youtube.com/ChoLENy73wIaEQlaBtdxOnuKiBMYDSANFEgGDA==/0/0/0/video.3gp' type='video/3gpp' medium='video' expression='full' duration='466' yt:format='1'/><media:content url='rtsp://rtsp2.youtube.com/ChoLENy73wIaEQlaBtdxOnuKiBMYESARFEgGDA==/0/0/0/video.3gp' type='video/3gpp' medium='video' expression='full' duration='466' yt:format='6'/><media:player url='http://www.youtube.com/watch?v=iIp7OnHXBlo'/><media:thumbnail url='http://img.youtube.com/vi/iIp7OnHXBlo/2.jpg' height='97' width='130' time='00:03:53'/><media:thumbnail url='http://img.youtube.com/vi/iIp7OnHXBlo/1.jpg' height='97' width='130' time='00:01:56.500'/><media:thumbnail url='http://img.youtube.com/vi/iIp7OnHXBlo/3.jpg' height='97' width='130' time='00:05:49.500'/><media:thumbnail url='http://img.youtube.com/vi/iIp7OnHXBlo/0.jpg' height='240' width='320' time='00:03:53'/></media:group><yt:statistics viewCount='1550' favoriteCount='5'/><gd:rating min='1' max='5' numRaters='3' average='4.67'/><yt:location>undefined</yt:location><gd:comments><gd:feedLink href='http://gdata.youtube.com/feeds/api/videos/iIp7OnHXBlo/comments' countHint='2'/></gd:comments><yt:position>1</yt:position></entry></feed>"""
YOUTUBE_SUBSCRIPTION_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:media='http://search.yahoo.com/mrss/'
xmlns:yt='http://gdata.youtube.com/schemas/2007'
xmlns:gd='http://schemas.google.com/g/2005'>
<id>http://gdata.youtube.com/feeds/users/andyland74/subscriptions?start-index=1&max-results=25</id>
<updated>2008-02-26T00:26:15.635Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://gdata.youtube.com/schemas/2007#subscription'/>
<title type='text'>andyland74's Subscriptions</title>
<logo>http://www.youtube.com/img/pic_youtubelogo_123x63.gif</logo>
<link rel='related' type='application/atom+xml'
href='http://gdata.youtube.com/feeds/users/andyland74'/>
<link rel='alternate' type='text/html'
href='http://www.youtube.com/profile_subscriptions?user=andyland74'/>
<link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml'
href='http://gdata.youtube.com/feeds/users/andyland74/subscriptions'/>
<link rel='self' type='application/atom+xml'
href='http://gdata.youtube.com/feeds/users/andyland74/subscriptions?start-index=1&max-results=25'/>
<author>
<name>andyland74</name>
<uri>http://gdata.youtube.com/feeds/users/andyland74</uri>
</author>
<generator version='beta' uri='http://gdata.youtube.com/'>YouTube data API</generator>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>http://gdata.youtube.com/feeds/users/andyland74/subscriptions/d411759045e2ad8c</id>
<published>2007-11-04T17:30:27.000-08:00</published>
<updated>2008-02-22T09:55:14.000-08:00</updated>
<category scheme='http://gdata.youtube.com/schemas/2007/subscriptiontypes.cat'
term='channel'/>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://gdata.youtube.com/schemas/2007#subscription'/>
<title type='text'>Videos published by : NBC</title>
<link rel='related' type='application/atom+xml'
href='http://gdata.youtube.com/feeds/users/andyland74'/>
<link rel='alternate' type='text/html'
href='http://www.youtube.com/profile_videos?user=NBC'/>
<link rel='self' type='application/atom+xml'
href='http://gdata.youtube.com/feeds/users/andyland74/subscriptions/d411759045e2ad8c'/>
<author>
<name>andyland74</name>
<uri>http://gdata.youtube.com/feeds/users/andyland74</uri>
</author>
<yt:username>NBC</yt:username>
<gd:feedLink rel='http://gdata.youtube.com/schemas/2007#user.uploads'
href='http://gdata.youtube.com/feeds/api/users/nbc/uploads'/>
</entry>
</feed>"""
YOUTUBE_VIDEO_RESPONSE_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gml='http://www.opengis.net/gml' xmlns:georss='http://www.georss.org/georss' xmlns:media='http://search.yahoo.com/mrss/' xmlns:yt='http://gdata.youtube.com/schemas/2007' xmlns:gd='http://schemas.google.com/g/2005'>
<id>http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY/responses</id><updated>2008-05-19T22:37:34.076Z</updated><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#video'/><title type='text'>Videos responses to 'Giant NES controller coffee table'</title><logo>http://www.youtube.com/img/pic_youtubelogo_123x63.gif</logo><link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY'/><link rel='alternate' type='text/html' href='http://www.youtube.com/video_response_view_all?v=2c3q9K4cHzY'/><link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY/responses'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY/responses?start-index=1&max-results=25'/><author><name>YouTube</name><uri>http://www.youtube.com/</uri></author><generator version='beta' uri='http://gdata.youtube.com/'>YouTube data API</generator><openSearch:totalResults>8</openSearch:totalResults><openSearch:startIndex>1</openSearch:startIndex><openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>http://gdata.youtube.com/feeds/videos/7b9EnRI9VbY</id><published>2008-03-11T19:08:53.000-07:00</published><updated>2008-05-18T21:33:10.000-07:00</updated>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='OD'/><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#video'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='chat'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='Uncle'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='sex'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='catmint'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='kato'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='kissa'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='katt'/>
<category scheme='http://gdata.youtube.com/schemas/2007/categories.cat' term='Animals' label='Pets & Animals'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='kat'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='cat'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='cats'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='kedi'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='gato'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='Brattman'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='drug'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='overdose'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='catnip'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='party'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='Katze'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='gatto'/>
<title type='text'>Catnip Party</title><content type='html'>snipped</content>
<link rel='alternate' type='text/html' href='http://www.youtube.com/watch?v=7b9EnRI9VbY'/>
<link rel='http://gdata.youtube.com/schemas/2007#video.responses' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/7b9EnRI9VbY/responses'/>
<link rel='http://gdata.youtube.com/schemas/2007#video.related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/7b9EnRI9VbY/related'/>
<link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY/responses/7b9EnRI9VbY'/>
<author><name>PismoBeach</name><uri>http://gdata.youtube.com/feeds/users/pismobeach</uri></author>
<media:group>
<media:title type='plain'>Catnip Party</media:title>
<media:description type='plain'>Uncle, Hillary, Hankette, and B4 all but overdose on the patio</media:description><media:keywords>Brattman, cat, catmint, catnip, cats, chat, drug, gato, gatto, kat, kato, katt, Katze, kedi, kissa, OD, overdose, party, sex, Uncle</media:keywords>
<yt:duration seconds='139'/>
<media:category label='Pets & Animals' scheme='http://gdata.youtube.com/schemas/2007/categories.cat'>Animals</media:category>
<media:content url='http://www.youtube.com/v/7b9EnRI9VbY' type='application/x-shockwave-flash' medium='video' isDefault='true' expression='full' duration='139' yt:format='5'/>
<media:content url='rtsp://rtsp2.youtube.com/ChoLENy73wIaEQm2VT0SnUS_7RMYDSANFEgGDA==/0/0/0/video.3gp' type='video/3gpp' medium='video' expression='full' duration='139' yt:format='1'/>
<media:content url='rtsp://rtsp2.youtube.com/ChoLENy73wIaEQm2VT0SnUS_7RMYESARFEgGDA==/0/0/0/video.3gp' type='video/3gpp' medium='video' expression='full' duration='139' yt:format='6'/>
<media:player url='http://www.youtube.com/watch?v=7b9EnRI9VbY'/>
<media:thumbnail url='http://img.youtube.com/vi/7b9EnRI9VbY/2.jpg' height='97' width='130' time='00:01:09.500'/>
<media:thumbnail url='http://img.youtube.com/vi/7b9EnRI9VbY/1.jpg' height='97' width='130' time='00:00:34.750'/>
<media:thumbnail url='http://img.youtube.com/vi/7b9EnRI9VbY/3.jpg' height='97' width='130' time='00:01:44.250'/>
<media:thumbnail url='http://img.youtube.com/vi/7b9EnRI9VbY/0.jpg' height='240' width='320' time='00:01:09.500'/>
</media:group>
<yt:statistics viewCount='4235' favoriteCount='3'/>
<gd:rating min='1' max='5' numRaters='24' average='3.54'/>
<gd:comments>
<gd:feedLink href='http://gdata.youtube.com/feeds/videos/7b9EnRI9VbY/comments' countHint='14'/>
</gd:comments>
</entry>
</feed>
"""
YOUTUBE_PROFILE = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:media='http://search.yahoo.com/mrss/'
xmlns:yt='http://gdata.youtube.com/schemas/2007'
xmlns:gd='http://schemas.google.com/g/2005'>
<id>http://gdata.youtube.com/feeds/users/andyland74</id>
<published>2006-10-16T00:09:45.000-07:00</published>
<updated>2008-02-26T11:48:21.000-08:00</updated>
<category scheme='http://gdata.youtube.com/schemas/2007/channeltypes.cat'
term='Standard'/>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://gdata.youtube.com/schemas/2007#userProfile'/>
<title type='text'>andyland74 Channel</title>
<link rel='alternate' type='text/html'
href='http://www.youtube.com/profile?user=andyland74'/>
<link rel='self' type='application/atom+xml'
href='http://gdata.youtube.com/feeds/users/andyland74'/>
<author>
<name>andyland74</name>
<uri>http://gdata.youtube.com/feeds/users/andyland74</uri>
</author>
<yt:age>33</yt:age>
<yt:username>andyland74</yt:username>
<yt:firstName>andy</yt:firstName>
<yt:lastName>example</yt:lastName>
<yt:books>Catch-22</yt:books>
<yt:gender>m</yt:gender>
<yt:company>Google</yt:company>
<yt:hobbies>Testing YouTube APIs</yt:hobbies>
<yt:hometown>Somewhere</yt:hometown>
<yt:location>US</yt:location>
<yt:movies>Aqua Teen Hungerforce</yt:movies>
<yt:music>Elliott Smith</yt:music>
<yt:occupation>Technical Writer</yt:occupation>
<yt:school>University of North Carolina</yt:school>
<media:thumbnail url='http://i.ytimg.com/vi/YFbSxcdOL-w/default.jpg'/>
<yt:statistics viewCount='9' videoWatchCount='21' subscriberCount='1'
lastWebAccess='2008-02-25T16:03:38.000-08:00'/>
<gd:feedLink rel='http://gdata.youtube.com/schemas/2007#user.favorites'
href='http://gdata.youtube.com/feeds/users/andyland74/favorites' countHint='4'/>
<gd:feedLink rel='http://gdata.youtube.com/schemas/2007#user.contacts'
href='http://gdata.youtube.com/feeds/users/andyland74/contacts' countHint='1'/>
<gd:feedLink rel='http://gdata.youtube.com/schemas/2007#user.inbox'
href='http://gdata.youtube.com/feeds/users/andyland74/inbox' countHint='0'/>
<gd:feedLink rel='http://gdata.youtube.com/schemas/2007#user.playlists'
href='http://gdata.youtube.com/feeds/users/andyland74/playlists'/>
<gd:feedLink rel='http://gdata.youtube.com/schemas/2007#user.subscriptions'
href='http://gdata.youtube.com/feeds/users/andyland74/subscriptions' countHint='4'/>
<gd:feedLink rel='http://gdata.youtube.com/schemas/2007#user.uploads'
href='http://gdata.youtube.com/feeds/users/andyland74/uploads' countHint='1'/>
</entry>"""
YOUTUBE_CONTACTS_FEED = """<?xml version='1.0' encoding='UTF-8'?><feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:yt='http://gdata.youtube.com/schemas/2007' xmlns:gd='http://schemas.google.com/g/2005'>
<id>http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts</id><updated>2008-05-16T19:24:34.916Z</updated><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#friend'/><title type='text'>apitestjhartmann's Contacts</title><logo>http://www.youtube.com/img/pic_youtubelogo_123x63.gif</logo><link rel='alternate' type='text/html' href='http://www.youtube.com/profile_friends?user=apitestjhartmann'/><link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts'/><link rel='http://schemas.google.com/g/2005#post' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts?start-index=1&max-results=25'/><author><name>apitestjhartmann</name><uri>http://gdata.youtube.com/feeds/users/apitestjhartmann</uri></author><generator version='beta' uri='http://gdata.youtube.com/'>YouTube data API</generator><openSearch:totalResults>2</openSearch:totalResults><openSearch:startIndex>1</openSearch:startIndex><openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/test89899090</id><published>2008-02-04T11:27:54.000-08:00</published><updated>2008-05-16T19:24:34.916Z</updated><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#friend'/><title type='text'>test89899090</title><link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/test89899090'/><link rel='alternate' type='text/html' href='http://www.youtube.com/profile?user=test89899090'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/test89899090'/><link rel='edit' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/test89899090'/><author><name>apitestjhartmann</name><uri>http://gdata.youtube.com/feeds/users/apitestjhartmann</uri></author><yt:username>test89899090</yt:username><yt:status>requested</yt:status></entry>
<entry>
<id>http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/testjfisher</id><published>2008-02-26T14:13:03.000-08:00</published><updated>2008-05-16T19:24:34.916Z</updated><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#friend'/><title type='text'>testjfisher</title><link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/testjfisher'/><link rel='alternate' type='text/html' href='http://www.youtube.com/profile?user=testjfisher'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/testjfisher'/><link rel='edit' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/testjfisher'/><author><name>apitestjhartmann</name><uri>http://gdata.youtube.com/feeds/users/apitestjhartmann</uri></author><yt:username>testjfisher</yt:username><yt:status>pending</yt:status></entry>
</feed>"""
NEW_CONTACT = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:gd='http://schemas.google.com/g/2005'
xmlns:gContact='http://schemas.google.com/contact/2008'>
<id>http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base/8411573</id>
<updated>2008-02-28T18:47:02.303Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/contact/2008#contact' />
<title type='text'>Fitzgerald</title>
<content type='text'>Notes</content>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full/8411573' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full/8411573/1204224422303000' />
<gd:email rel='http://schemas.google.com/g/2005#work'
address='[email protected]' />
<gd:email rel='http://schemas.google.com/g/2005#home'
address='[email protected]' />
<gd:phoneNumber rel='http://schemas.google.com/g/2005#work'
primary='true'>(206)555-1212</gd:phoneNumber>
<gd:phoneNumber rel='http://schemas.google.com/g/2005#other'
primary='true'>456-123-2133</gd:phoneNumber>
<gd:phoneNumber rel='http://schemas.google.com/g/2005#home'>(206)555-1213</gd:phoneNumber>
<gd:extendedProperty name="pet" value="hamster" />
<gd:extendedProperty name="cousine">
<italian />
</gd:extendedProperty>
<gContact:groupMembershipInfo deleted="false" href="http://google.com/m8/feeds/groups/liz%40gmail.com/base/270f" />
<gd:im address='[email protected]'
protocol='http://schemas.google.com/g/2005#GOOGLE_TALK'
rel='http://schemas.google.com/g/2005#home' />
<gd:postalAddress rel='http://schemas.google.com/g/2005#work'
primary='true'>1600 Amphitheatre Pkwy Mountain View</gd:postalAddress>
</entry>"""
CONTACTS_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gd='http://schemas.google.com/g/2005'
xmlns:gContact='http://schemas.google.com/contact/2008'
xmlns:batch='http://schemas.google.com/gdata/batch'>
<id>http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base</id>
<updated>2008-03-05T12:36:38.836Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/contact/2008#contact' />
<title type='text'>Contacts</title>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full' />
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full' />
<link rel='http://schemas.google.com/g/2005#batch'
type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full/batch' />
<link rel='self' type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full?max-results=25' />
<author>
<name>Elizabeth Bennet</name>
<email>[email protected]</email>
</author>
<generator version='1.0' uri='http://www.google.com/m8/feeds/contacts'>
Contacts
</generator>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>
http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base/c9012de
</id>
<updated>2008-03-05T12:36:38.835Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/contact/2008#contact' />
<title type='text'>Fitzgerald</title>
<link rel="http://schemas.google.com/contacts/2008/rel#photo" type="image/*"
href="http://google.com/m8/feeds/photos/media/liz%40gmail.com/c9012de"/>
<link rel="http://schemas.google.com/contacts/2008/rel#edit-photo" type="image/*"
href="http://www.google.com/m8/feeds/photos/media/liz%40gmail.com/c9012de/photo4524"/>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full/c9012de' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full/c9012de/1204720598835000' />
<gd:phoneNumber rel='http://schemas.google.com/g/2005#home'
primary='true'>
456
</gd:phoneNumber>
<gd:extendedProperty name="pet" value="hamster" />
<gContact:groupMembershipInfo deleted="false" href="http://google.com/m8/feeds/groups/liz%40gmail.com/base/270f" />
</entry>
</feed>"""
CONTACT_GROUPS_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gContact="http://schemas.google.com/contact/2008"
xmlns:batch="http://schemas.google.com/gdata/batch"
xmlns:gd="http://schemas.google.com/g/2005">
<id>[email protected]</id>
<updated>2008-05-21T21:11:25.237Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/contact/2008#group"/>
<title type="text">Jo's Contact Groups</title>
<link rel="alternate" type="text/html" href="http://www.google.com/"/>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml"
href="http://google.m/m8/feeds/groups/jo%40gmail.com/thin"/>
<link rel="http://schemas.google.com/g/2005#post"
type="application/atom+xml"
href="http://google.m/m8/feeds/groups/jo%40gmail.com/thin"/>
<link rel="http://schemas.google.com/g/2005#batch"
type="application/atom+xml"
href="http://googleom/m8/feeds/groups/jo%40gmail.com/thin/batch"/>
<link rel="self"
type="application/atom+xml"
href="http://google.com/m8/feeds/groups/jo%40gmail.com/thin?max-results=25"/>
<author>
<name>Jo Brown</name>
<email>[email protected]</email>
</author>
<generator version="1.0" uri="http://google.com/m8/feeds">Contacts</generator>
<openSearch:totalResults>3</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>http://google.com/m8/feeds/groups/jo%40gmail.com/base/270f</id>
<updated>2008-05-14T13:10:19.070Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/contact/2008#group"/>
<title type="text">joggers</title>
<content type="text">joggers</content>
<link rel="self" type="application/atom+xml"
href="http://google.com/m8/feeds/groups/jo%40gmail.com/thin/270f"/>
<link rel="edit" type="application/atom+xml"
href="http://google.com/m8/feeds/groups/jo%40gmail.com/thin/270f/1210770619070000"/>
</entry>
</feed>"""
CONTACT_GROUP_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:gd="http://schemas.google.com/g/2005">
<category scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/g/2005#group"/>
<id>http://www.google.com/feeds/groups/jo%40gmail.com/base/1234</id>
<published>2005-01-18T21:00:00Z</published>
<updated>2006-01-01T00:00:00Z</updated>
<title type="text">Salsa group</title>
<content type="text">Salsa group</content>
<link rel='self' type='application/atom+xml'
href= 'http://www.google.com/m8/feeds/groups/jo%40gmail.com/full/2' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/m8/feeds/groups/jo%40gmail.com/full/2/0'/>
<gd:extendedProperty name="more info about the group">
<info>Very nice people.</info>
</gd:extendedProperty>
</entry>"""
CALENDAR_RESOURCE_ENTRY = """<?xml version="1.0"?>
<atom:entry xmlns:atom="http://www.w3.org/2005/Atom" xmlns:apps="http://schemas.google.com/apps/2006">
<apps:property name="resourceId" value="CR-NYC-14-12-BR"/>
<apps:property name="resourceCommonName" value="Boardroom"/>
<apps:property name="resourceDescription" value="This conference room is in New York City, building 14, floor 12, Boardroom"/>
<apps:property name="resourceType" value="CR"/>
</atom:entry>"""
CALENDAR_RESOURCES_FEED = """<?xml version="1.0"?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/" xmlns:apps="http://schemas.google.com/apps/2006">
<id>https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com</id>
<updated>2008-10-17T15:29:21.064Z</updated>
<link rel="next" type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/?start=the next resourceId"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com"/>
<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com"/>
<link rel="self" type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com?start=CR-NYC-14-12-BR"/>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/CR-NYC-14-12-BR</id>
<updated>2008-10-17T15:29:21.064Z</updated>
<link rel="self" type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/CR-NYC-14-12-BR"/>
<link rel="edit" type="application/atom+xml" href="https://apps-apis.google.com/feeds/calendar/resource/2.0/yourdomain.com/CR-NYC-14-12-BR"/>
<apps:property name="resourceId" value="CR-NYC-14-12-BR"/>
<apps:property name="resourceCommonName" value="Boardroom"/>
<apps:property name="resourceEmail" value="[email protected]"/>
<apps:property name="resourceDescription" value="This conference room is in New York City, building 14, floor 12, Boardroom"/>
<apps:property name="resourceType" value="CR"/>
</entry>
<entry>
<id>https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/?start=(Bike)-London-43-Lobby-Bike-1</id>
<updated>2008-10-17T15:29:21.064Z</updated>
<link rel="self" type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/(Bike)-London-43-Lobby-Bike-1"/>
<link rel="edit" type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/(Bike)-London-43-Lobby-Bike-1"/>
<apps:property name="resourceId" value="(Bike)-London-43-Lobby-Bike-1"/>
<apps:property name="resourceCommonName" value="London bike-1"/>
<apps:property name="resourceEmail" value="[email protected]"/>
<apps:property name="resourceDescription" value="Bike is in London at building 43's lobby."/>
<apps:property name="resourceType" value="(Bike)"/>
</entry>
</feed>"""
BLOG_ENTRY = """<entry xmlns='http://www.w3.org/2005/Atom'>
<id>tag:blogger.com,1999:blog-blogID.post-postID</id>
<published>2006-08-02T18:44:43.089-07:00</published>
<updated>2006-11-08T18:10:23.020-08:00</updated>
<title type='text'>Lizzy's Diary</title>
<summary type='html'>Being the journal of Elizabeth Bennet</summary>
<link rel='alternate' type='text/html'
href='http://blogName.blogspot.com/'>
</link>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://blogName.blogspot.com/feeds/posts/default'>
</link>
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://www.blogger.com/feeds/blogID/posts/default'>
</link>
<link rel='self' type='application/atom+xml'
href='http://www.blogger.com/feeds/userID/blogs/blogID'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.blogger.com/feeds/userID/blogs/blogID'>
</link>
<author>
<name>Elizabeth Bennet</name>
<email>[email protected]</email>
</author>
</entry>"""
BLOG_POST = """<entry xmlns='http://www.w3.org/2005/Atom'>
<title type='text'>Marriage!</title>
<content type='xhtml'>
<div xmlns="http://www.w3.org/1999/xhtml">
<p>Mr. Darcy has <em>proposed marriage</em> to me!</p>
<p>He is the last man on earth I would ever desire to marry.</p>
<p>Whatever shall I do?</p>
</div>
</content>
<author>
<name>Elizabeth Bennet</name>
<email>[email protected]</email>
</author>
</entry>"""
BLOG_POSTS_FEED = """<feed xmlns='http://www.w3.org/2005/Atom'>
<id>tag:blogger.com,1999:blog-blogID</id>
<updated>2006-11-08T18:10:23.020-08:00</updated>
<title type='text'>Lizzy's Diary</title>
<link rel='alternate' type='text/html'
href='http://blogName.blogspot.com/index.html'>
</link>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://blogName.blogspot.com/feeds/posts/default'>
</link>
<link rel='self' type='application/atom+xml'
href='http://blogName.blogspot.com/feeds/posts/default'>
</link>
<author>
<name>Elizabeth Bennet</name>
<email>[email protected]</email>
</author>
<generator version='7.00' uri='http://www2.blogger.com'>Blogger</generator>
<entry>
<id>tag:blogger.com,1999:blog-blogID.post-postID</id>
<published>2006-11-08T18:10:00.000-08:00</published>
<updated>2006-11-08T18:10:14.954-08:00</updated>
<title type='text'>Quite disagreeable</title>
<content type='html'><p>I met Mr. Bingley's friend Mr. Darcy
this evening. I found him quite disagreeable.</p></content>
<link rel='alternate' type='text/html'
href='http://blogName.blogspot.com/2006/11/quite-disagreeable.html'>
</link>
<link rel='self' type='application/atom+xml'
href='http://blogName.blogspot.com/feeds/posts/default/postID'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.blogger.com/feeds/blogID/posts/default/postID'>
</link>
<author>
<name>Elizabeth Bennet</name>
<email>[email protected]</email>
</author>
</entry>
</feed>"""
BLOG_COMMENTS_FEED = """<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/">
<id>tag:blogger.com,1999:blog-blogID.postpostID..comments</id>
<updated>2007-04-04T21:56:29.803-07:00</updated>
<title type="text">My Blog : Time to relax</title>
<link rel="alternate" type="text/html" href="http://blogName.blogspot.com/2007/04/first-post.html"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://blogName.blogspot.com/feeds/postID/comments/default"/>
<link rel="self" type="application/atom+xml" href="http://blogName.blogspot.com/feeds/postID/comments/default"/>
<author>
<name>Blog Author name</name>
</author>
<generator version="7.00" uri="http://www2.blogger.com">Blogger</generator>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>tag:blogger.com,1999:blog-blogID.post-commentID</id>
<published>2007-04-04T21:56:00.000-07:00</published>
<updated>2007-04-04T21:56:29.803-07:00</updated>
<title type="text">This is my first comment</title>
<content type="html">This is my first comment</content>
<link rel="alternate" type="text/html" href="http://a-blogName.blogspot.com/2007/04/first-post.html#commentID"/>
<link rel="self" type="application/atom+xml" href="http://blogName.blogspot.com/feeds/postID/comments/default/commentID"/>
<link rel="edit" type="application/atom+xml" href="http://www.blogger.com/feeds/blogID/postID/comments/default/commentID"/>
<author>
<name>Blog Author name</name>
</author>
<thr:in-reply-to xmlns:thr='http://purl.org/syndication/thread/1.0'
href='http://blogName.blogspot.com/2007/04/first-post.html'
ref='tag:blogger.com,1999:blog-blogID.post-postID'
source='http://blogName.blogspot.com/feeds/posts/default/postID'
type='text/html' />
</entry>
</feed>"""
SITES_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gd="http://schemas.google.com/g/2005"
xmlns:wt="http://schemas.google.com/webmasters/tools/2007">
<id>https://www.google.com/webmasters/tools/feeds/sites</id>
<title>Sites</title>
<openSearch:startIndex>1</openSearch:startIndex>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/webmasters/tools/2007#sites-feed" />
<link href="http://www.google.com/webmasters/tools/feeds/sites" rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" />
<link href="http://www.google.com/webmasters/tools/feeds/sites" rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" />
<link href="http://www.google.com/webmasters/tools/feeds/sites" rel="self" type="application/atom+xml" />
<updated>2008-10-02T07:26:51.833Z</updated>
<entry>
<id>http://www.example.com</id>
<title type="text">http://www.example.com</title>
<link href="http://www.google.com/webmasters/tools/feeds/sites/http%3A%2F%2Fwww.example.com%2F" rel="self" type="application/atom+xml"/>
<link href="http://www.google.com/webmasters/tools/feeds/sites/http%3A%2F%2Fwww.example.com%2F" rel="edit" type="application/atom+xml"/>
<content src="http://www.example.com"/>
<updated>2007-11-17T18:27:32.543Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/webmasters/tools/2007#site-info"/>
<gd:entryLink rel="http://schemas.google.com/webmasters/tools/2007#verification"
href="https://www.google.com/webmasters/tools/feeds/http%3A%2F%2Fwww%2Eexample%2Ecom%2F/verification" />
<gd:entryLink rel="http://schemas.google.com/webmasters/tools/2007#sitemaps"
href="https://www.google.com/webmasters/tools/feeds/http%3A%2F%2Fwww%2Eexample%2Ecom%2F/sitemaps" />
<wt:indexed>true</wt:indexed>
<wt:crawled>2008-09-14T08:59:28.000</wt:crawled>
<wt:geolocation>US</wt:geolocation>
<wt:preferred-domain>none</wt:preferred-domain>
<wt:crawl-rate>normal</wt:crawl-rate>
<wt:enhanced-image-search>true</wt:enhanced-image-search>
<wt:verified>false</wt:verified>
<wt:verification-method type="metatag" in-use="false"><meta name="verify-v1" content="a2Ai"/>
</wt:verification-method>
<wt:verification-method type="htmlpage" in-use="false">456456-google.html</wt:verification-method>
</entry>
</feed>"""
SITEMAPS_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:wt="http://schemas.google.com/webmasters/tools/2007">
<id>http://www.example.com</id>
<title type="text">http://www.example.com/</title>
<updated>2006-11-17T18:27:32.543Z</updated>
<link rel="self" type="application/atom+xml"
href="https://www.google.com/webmasters/tools/feeds/http%3A%2F%2Fwww%2Eexample%2Ecom%2F/sitemaps" />
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/webmasters/tools/2007#sitemaps-feed'/>
<wt:sitemap-mobile>
<wt:markup-language>HTML</wt:markup-language>
<wt:markup-language>WAP</wt:markup-language>
</wt:sitemap-mobile>
<wt:sitemap-news>
<wt:publication-label>Value1</wt:publication-label>
<wt:publication-label>Value2</wt:publication-label>
<wt:publication-label>Value3</wt:publication-label>
</wt:sitemap-news>
<entry>
<id>http://www.example.com/sitemap-index.xml</id>
<title type="text">http://www.example.com/sitemap-index.xml</title>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/webmasters/tools/2007#sitemap-regular'/>
<updated>2006-11-17T18:27:32.543Z</updated>
<wt:sitemap-type>WEB</wt:sitemap-type>
<wt:sitemap-status>StatusValue</wt:sitemap-status>
<wt:sitemap-last-downloaded>2006-11-18T19:27:32.543Z</wt:sitemap-last-downloaded>
<wt:sitemap-url-count>102</wt:sitemap-url-count>
</entry>
<entry>
<id>http://www.example.com/mobile/sitemap-index.xml</id>
<title type="text">http://www.example.com/mobile/sitemap-index.xml</title>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/webmasters/tools/2007#sitemap-mobile'/>
<updated>2006-11-17T18:27:32.543Z</updated>
<wt:sitemap-status>StatusValue</wt:sitemap-status>
<wt:sitemap-last-downloaded>2006-11-18T19:27:32.543Z</wt:sitemap-last-downloaded>
<wt:sitemap-url-count>102</wt:sitemap-url-count>
<wt:sitemap-mobile-markup-language>HTML</wt:sitemap-mobile-markup-language>
</entry>
<entry>
<id>http://www.example.com/news/sitemap-index.xml</id>
<title type="text">http://www.example.com/news/sitemap-index.xml</title>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/webmasters/tools/2007#sitemap-news'/>
<updated>2006-11-17T18:27:32.543Z</updated>
<wt:sitemap-status>StatusValue</wt:sitemap-status>
<wt:sitemap-last-downloaded>2006-11-18T19:27:32.543Z</wt:sitemap-last-downloaded>
<wt:sitemap-url-count>102</wt:sitemap-url-count>
<wt:sitemap-news-publication-label>LabelValue</wt:sitemap-news-publication-label>
</entry>
</feed>"""
HEALTH_CCR_NOTICE_PAYLOAD = """<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<Body>
<Problems>
<Problem>
<DateTime>
<Type><Text>Start date</Text></Type>
<ExactDateTime>2007-04-04T07:00:00Z</ExactDateTime>
</DateTime>
<Description>
<Text>Aortic valve disorders</Text>
<Code>
<Value>410.10</Value>
<CodingSystem>ICD9</CodingSystem>
<Version>2004</Version>
</Code>
</Description>
<Status><Text>Active</Text></Status>
</Problem>
</Problems>
</Body>
</ContinuityOfCareRecord>"""
HEALTH_PROFILE_ENTRY_DIGEST = """<?xml version="1.0" encoding="UTF-8"?>
<entry xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/" xmlns:ccr="urn:astm-org:CCR" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:h9m="http://schemas.google.com/health/metadata">
<id>https://www.google.com/health/feeds/profile/default/vneCn5qdEIY_digest</id>
<updated>2008-09-29T07:52:17.176Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile" />
<link rel="alternate" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default?digest=true" />
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/vneCn5qdEIY_digest" />
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/vneCn5qdEIY_digest" />
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>vneCn5qdEIY</CCRDocumentObjectID>
<Language>
<Text>English</Text>
<Code>
<Value>en</Value>
<CodingSystem>ISO-639-1</CodingSystem>
</Code>
</Language>
<Version>V1.0</Version>
<DateTime>
<ExactDateTime>2008-09-29T07:52:17.176Z</ExactDateTime>
</DateTime>
<Patient>
<ActorID>Google Health Profile</ActorID>
</Patient>
<Body>
<FunctionalStatus>
<Function>
<Type>
<Text>Pregnancy status</Text>
</Type>
<Description>
<Text>Not pregnant</Text>
</Description>
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Function>
<Function>
<Type>
<Text>Breastfeeding status</Text>
</Type>
<Description>
<Text>Not breastfeeding</Text>
</Description>
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Function>
</FunctionalStatus>
<Problems>
<Problem>
<CCRDataObjectID>Hn0FE0IlcY-FMFFgSTxkvA/CONDITION/0</CCRDataObjectID>
<DateTime>
<Type>
<Text>Start date</Text>
</Type>
<ExactDateTime>2007-04-04T07:00:00Z</ExactDateTime>
</DateTime>
<Description>
<Text>Aortic valve disorders</Text>
<Code>
<Value>410.10</Value>
<CodingSystem>ICD9</CodingSystem>
<Version>2004</Version>
</Code>
</Description>
<Status>
<Text>Active</Text>
</Status>
<Source>
<Actor>
<ActorID>example.com</ActorID>
<ActorRole>
<Text>Information Provider</Text>
</ActorRole>
</Actor>
</Source>
</Problem>
<Problem>
<Type />
<Description>
<Text>Malaria</Text>
<Code>
<Value>136.9</Value>
<CodingSystem>ICD9_Broader</CodingSystem>
</Code>
<Code>
<Value>084.6</Value>
<CodingSystem>ICD9</CodingSystem>
</Code>
</Description>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<HealthStatus>
<Description />
</HealthStatus>
</Problem>
</Problems>
<SocialHistory>
<SocialHistoryElement>
<Type>
<Text>Race</Text>
<Code>
<Value>S15814</Value>
<CodingSystem>HL7</CodingSystem>
</Code>
</Type>
<Description>
<Text>White</Text>
</Description>
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Episodes>
<Frequency>
<Units />
</Frequency>
</Episodes>
</SocialHistoryElement>
</SocialHistory>
<Alerts>
<Alert>
<Type>
<Text>Allergy</Text>
</Type>
<Description>
<Text>A-Fil</Text>
</Description>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Reaction>
<Description />
<Severity>
<Text>Severe</Text>
</Severity>
</Reaction>
</Alert>
<Alert>
<Type>
<Text>Allergy</Text>
</Type>
<Description>
<Text>A.E.R Traveler</Text>
</Description>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Reaction>
<Description />
<Severity>
<Text>Severe</Text>
</Severity>
</Reaction>
</Alert>
</Alerts>
<Medications>
<Medication>
<Type />
<Description />
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Product>
<ProductName>
<Text>A& D</Text>
</ProductName>
<Strength>
<Units />
<StrengthSequencePosition>0</StrengthSequencePosition>
<VariableStrengthModifier />
</Strength>
</Product>
<Directions>
<Direction>
<Description />
<DeliveryMethod />
<Dose>
<Units />
<DoseSequencePosition>0</DoseSequencePosition>
<VariableDoseModifier />
</Dose>
<Route>
<Text>To skin</Text>
<Code>
<Value>C38305</Value>
<CodingSystem>FDA</CodingSystem>
</Code>
<RouteSequencePosition>0</RouteSequencePosition>
<MultipleRouteModifier />
</Route>
</Direction>
</Directions>
<Refills />
</Medication>
<Medication>
<Type />
<Description />
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Product>
<ProductName>
<Text>A-Fil</Text>
</ProductName>
<Strength>
<Units />
<StrengthSequencePosition>0</StrengthSequencePosition>
<VariableStrengthModifier />
</Strength>
</Product>
<Directions>
<Direction>
<Description />
<DeliveryMethod />
<Dose>
<Units />
<DoseSequencePosition>0</DoseSequencePosition>
<VariableDoseModifier />
</Dose>
<Route>
<Text>To skin</Text>
<Code>
<Value>C38305</Value>
<CodingSystem>FDA</CodingSystem>
</Code>
<RouteSequencePosition>0</RouteSequencePosition>
<MultipleRouteModifier />
</Route>
</Direction>
</Directions>
<Refills />
</Medication>
<Medication>
<Type />
<Description />
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Product>
<ProductName>
<Text>Lipitor</Text>
</ProductName>
<Strength>
<Units />
<StrengthSequencePosition>0</StrengthSequencePosition>
<VariableStrengthModifier />
</Strength>
</Product>
<Directions>
<Direction>
<Description />
<DeliveryMethod />
<Dose>
<Units />
<DoseSequencePosition>0</DoseSequencePosition>
<VariableDoseModifier />
</Dose>
<Route>
<Text>By mouth</Text>
<Code>
<Value>C38288</Value>
<CodingSystem>FDA</CodingSystem>
</Code>
<RouteSequencePosition>0</RouteSequencePosition>
<MultipleRouteModifier />
</Route>
</Direction>
</Directions>
<Refills />
</Medication>
</Medications>
<Immunizations>
<Immunization>
<Type />
<Description />
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Product>
<ProductName>
<Text>Chickenpox Vaccine</Text>
<Code>
<Value>21</Value>
<CodingSystem>HL7</CodingSystem>
</Code>
</ProductName>
</Product>
<Directions>
<Direction>
<Description />
<DeliveryMethod />
</Direction>
</Directions>
<Refills />
</Immunization>
</Immunizations>
<VitalSigns>
<Result>
<Type />
<Description />
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Substance />
<Test>
<Type />
<Description>
<Text>Height</Text>
</Description>
<Status />
<TestResult>
<ResultSequencePosition>0</ResultSequencePosition>
<VariableResultModifier />
<Value>70</Value>
<Units>
<Unit>inches</Unit>
</Units>
</TestResult>
<ConfidenceValue />
</Test>
</Result>
<Result>
<Type />
<Description />
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Substance />
<Test>
<Type />
<Description>
<Text>Weight</Text>
</Description>
<Status />
<TestResult>
<ResultSequencePosition>0</ResultSequencePosition>
<VariableResultModifier />
<Value>2480</Value>
<Units>
<Unit>ounces</Unit>
</Units>
</TestResult>
<ConfidenceValue />
</Test>
</Result>
<Result>
<Type />
<Description />
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Substance />
<Test>
<Type />
<Description>
<Text>Blood Type</Text>
</Description>
<Status />
<TestResult>
<ResultSequencePosition>0</ResultSequencePosition>
<VariableResultModifier />
<Value>O+</Value>
<Units />
</TestResult>
<ConfidenceValue />
</Test>
</Result>
</VitalSigns>
<Results>
<Result>
<Type />
<Description />
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Substance />
<Test>
<DateTime>
<Type>
<Text>Collection start date</Text>
</Type>
<ExactDateTime>2008-09-03</ExactDateTime>
</DateTime>
<Type />
<Description>
<Text>Acetaldehyde - Blood</Text>
</Description>
<Status />
<TestResult>
<ResultSequencePosition>0</ResultSequencePosition>
<VariableResultModifier />
<Units />
</TestResult>
<ConfidenceValue />
</Test>
</Result>
</Results>
<Procedures>
<Procedure>
<Type />
<Description>
<Text>Abdominal Ultrasound</Text>
</Description>
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Procedure>
<Procedure>
<Type />
<Description>
<Text>Abdominoplasty</Text>
</Description>
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Procedure>
</Procedures>
</Body>
<Actors>
<Actor>
<ActorObjectID>Google Health Profile</ActorObjectID>
<Person>
<Name>
<BirthName />
<CurrentName />
</Name>
<DateOfBirth>
<Type />
<ExactDateTime>1984-07-22</ExactDateTime>
</DateOfBirth>
<Gender>
<Text>Male</Text>
</Gender>
</Person>
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Actor>
</Actors>
</ContinuityOfCareRecord>
</entry>"""
HEALTH_PROFILE_FEED = """<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:ccr="urn:astm-org:CCR" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:h9m="http://schemas.google.com/health/metadata">
<id>https://www.google.com/health/feeds/profile/default</id>
<updated>2008-09-30T01:07:17.888Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<title type="text">Profile Feed</title>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default"/>
<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default"/>
<link rel="http://schemas.google.com/g/2005#batch" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/batch"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default?digest=false"/>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>https://www.google.com/health/feeds/profile/default/DysasdfARnFAao</id>
<published>2008-09-29T03:12:50.850Z</published>
<updated>2008-09-29T03:12:50.850Z</updated>
<category term="MEDICATION"/>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category scheme="http://schemas.google.com/health/item" term="A& D"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/MEDICATION/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DA%26+D"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/DysasdfARnFAao"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/DysasdfARnFAao"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>hiD9sEigSzdk8nNT0evR4g</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Medications>
<Medication>
<Type/>
<Description/>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Product>
<ProductName>
<Text>A& D</Text>
</ProductName>
<Strength>
<Units/>
<StrengthSequencePosition>0</StrengthSequencePosition>
<VariableStrengthModifier/>
</Strength>
</Product>
<Directions>
<Direction>
<Description/>
<DeliveryMethod/>
<Dose>
<Units/>
<DoseSequencePosition>0</DoseSequencePosition>
<VariableDoseModifier/>
</Dose>
<Route>
<Text>To skin</Text>
<Code>
<Value>C38305</Value>
<CodingSystem>FDA</CodingSystem>
</Code>
<RouteSequencePosition>0</RouteSequencePosition>
<MultipleRouteModifier/>
</Route>
</Direction>
</Directions>
<Refills/>
</Medication>
</Medications>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/7I1WQzZrgp4</id>
<published>2008-09-29T03:27:14.909Z</published>
<updated>2008-09-29T03:27:14.909Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category scheme="http://schemas.google.com/health/item" term="A-Fil"/>
<category term="ALLERGY"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DA-Fil/ALLERGY"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/7I1WQzZrgp4"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/7I1WQzZrgp4"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>YOyHDxQUiECCPgnsjV8SlQ</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Alerts>
<Alert>
<Type>
<Text>Allergy</Text>
</Type>
<Description>
<Text>A-Fil</Text>
</Description>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Reaction>
<Description/>
<Severity>
<Text>Severe</Text>
</Severity>
</Reaction>
</Alert>
</Alerts>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/Dz9wV83sKFg</id>
<published>2008-09-29T03:12:52.166Z</published>
<updated>2008-09-29T03:12:52.167Z</updated>
<category term="MEDICATION"/>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category scheme="http://schemas.google.com/health/item" term="A-Fil"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/MEDICATION/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DA-Fil"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/Dz9wV83sKFg"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/Dz9wV83sKFg"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>7w.XFEPeuIYN3Rn32pUiUw</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Medications>
<Medication>
<Type/>
<Description/>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Product>
<ProductName>
<Text>A-Fil</Text>
</ProductName>
<Strength>
<Units/>
<StrengthSequencePosition>0</StrengthSequencePosition>
<VariableStrengthModifier/>
</Strength>
</Product>
<Directions>
<Direction>
<Description/>
<DeliveryMethod/>
<Dose>
<Units/>
<DoseSequencePosition>0</DoseSequencePosition>
<VariableDoseModifier/>
</Dose>
<Route>
<Text>To skin</Text>
<Code>
<Value>C38305</Value>
<CodingSystem>FDA</CodingSystem>
</Code>
<RouteSequencePosition>0</RouteSequencePosition>
<MultipleRouteModifier/>
</Route>
</Direction>
</Directions>
<Refills/>
</Medication>
</Medications>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/lzsxVzqZUyw</id>
<published>2008-09-29T03:13:07.496Z</published>
<updated>2008-09-29T03:13:07.497Z</updated>
<category scheme="http://schemas.google.com/health/item" term="A.E.R Traveler"/>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category term="ALLERGY"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DA.E.R+Traveler/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/ALLERGY"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/lzsxVzqZUyw"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/lzsxVzqZUyw"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>5efFB0J2WgEHNUvk2z3A1A</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Alerts>
<Alert>
<Type>
<Text>Allergy</Text>
</Type>
<Description>
<Text>A.E.R Traveler</Text>
</Description>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Reaction>
<Description/>
<Severity>
<Text>Severe</Text>
</Severity>
</Reaction>
</Alert>
</Alerts>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/6PvhfKAXyYw</id>
<published>2008-09-29T03:13:02.123Z</published>
<updated>2008-09-29T03:13:02.124Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category term="PROCEDURE"/>
<category scheme="http://schemas.google.com/health/item" term="Abdominal Ultrasound"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/PROCEDURE/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DAbdominal+Ultrasound"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/6PvhfKAXyYw"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/6PvhfKAXyYw"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>W3Wbvx_QHwG5pxVchpuF1A</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Procedures>
<Procedure>
<Type/>
<Description>
<Text>Abdominal Ultrasound</Text>
</Description>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Procedure>
</Procedures>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/r2zGPGewCeU</id>
<published>2008-09-29T03:13:03.434Z</published>
<updated>2008-09-29T03:13:03.435Z</updated>
<category scheme="http://schemas.google.com/health/item" term="Abdominoplasty"/>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category term="PROCEDURE"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DAbdominoplasty/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/PROCEDURE"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/r2zGPGewCeU"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/r2zGPGewCeU"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>OUKgj5X0KMnbkC5sDL.yHA</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Procedures>
<Procedure>
<Type/>
<Description>
<Text>Abdominoplasty</Text>
</Description>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Procedure>
</Procedures>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/_cCCbQ0O3ug</id>
<published>2008-09-29T03:13:29.041Z</published>
<updated>2008-09-29T03:13:29.042Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category scheme="http://schemas.google.com/health/item" term="Acetaldehyde - Blood"/>
<category term="LABTEST"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DAcetaldehyde+-+Blood/LABTEST"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/_cCCbQ0O3ug"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/_cCCbQ0O3ug"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>YWtomFb8aG.DueZ7z7fyug</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Results>
<Result>
<Type/>
<Description/>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Substance/>
<Test>
<DateTime>
<Type>
<Text>Collection start date</Text>
</Type>
<ExactDateTime>2008-09-03</ExactDateTime>
</DateTime>
<Type/>
<Description>
<Text>Acetaldehyde - Blood</Text>
</Description>
<Status/>
<TestResult>
<ResultSequencePosition>0</ResultSequencePosition>
<VariableResultModifier/>
<Units/>
</TestResult>
<ConfidenceValue/>
</Test>
</Result>
</Results>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/BdyA3iJZyCc</id>
<published>2008-09-29T03:00:45.915Z</published>
<updated>2008-09-29T03:00:45.915Z</updated>
<category scheme="http://schemas.google.com/health/item" term="Aortic valve disorders"/>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category term="CONDITION"/>
<title type="text">Aortic valve disorders</title>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DAortic+valve+disorders/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/CONDITION"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/BdyA3iJZyCc"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/BdyA3iJZyCc"/>
<author>
<name>example.com</name>
<uri>example.com</uri>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>h1ljpoeKJ85li.1FHsG9Gw</CCRDocumentObjectID>
<Body>
<Problems>
<Problem>
<CCRDataObjectID>Hn0FE0IlcY-FMFFgSTxkvA/CONDITION/0</CCRDataObjectID>
<DateTime>
<Type>
<Text>Start date</Text>
</Type>
<ExactDateTime>2007-04-04T07:00:00Z</ExactDateTime>
</DateTime>
<Description>
<Text>Aortic valve disorders</Text>
<Code>
<Value>410.10</Value>
<CodingSystem>ICD9</CodingSystem>
<Version>2004</Version>
</Code>
</Description>
<Status>
<Text>Active</Text>
</Status>
<Source>
<Actor>
<ActorID>example.com</ActorID>
<ActorRole>
<Text>Information Provider</Text>
</ActorRole>
</Actor>
</Source>
</Problem>
</Problems>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/Cl.aMWIH5VA</id>
<published>2008-09-29T03:13:34.996Z</published>
<updated>2008-09-29T03:13:34.997Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category scheme="http://schemas.google.com/health/item" term="Chickenpox Vaccine"/>
<category term="IMMUNIZATION"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DChickenpox+Vaccine/IMMUNIZATION"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/Cl.aMWIH5VA"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/Cl.aMWIH5VA"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>KlhUqfftgELIitpKbqYalw</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Immunizations>
<Immunization>
<Type/>
<Description/>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Product>
<ProductName>
<Text>Chickenpox Vaccine</Text>
<Code>
<Value>21</Value>
<CodingSystem>HL7</CodingSystem>
</Code>
</ProductName>
</Product>
<Directions>
<Direction>
<Description/>
<DeliveryMethod/>
</Direction>
</Directions>
<Refills/>
</Immunization>
</Immunizations>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/l0a7.FlX3_0</id>
<published>2008-09-29T03:14:47.461Z</published>
<updated>2008-09-29T03:14:47.461Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category term="DEMOGRAPHICS"/>
<category scheme="http://schemas.google.com/health/item" term="Demographics"/>
<title type="text">Demographics</title>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/DEMOGRAPHICS/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DDemographics"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/l0a7.FlX3_0"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/l0a7.FlX3_0"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>U5GDAVOxFbexQw3iyvqPYg</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body/>
<Actors>
<Actor>
<Person>
<Name>
<BirthName/>
<CurrentName/>
</Name>
<DateOfBirth>
<Type/>
<ExactDateTime>1984-07-22</ExactDateTime>
</DateOfBirth>
<Gender>
<Text>Male</Text>
</Gender>
</Person>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Actor>
</Actors>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/oIBDdgwFLyo</id>
<published>2008-09-29T03:14:47.690Z</published>
<updated>2008-09-29T03:14:47.691Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category term="DEMOGRAPHICS"/>
<category scheme="http://schemas.google.com/health/item" term="FunctionalStatus"/>
<title type="text">FunctionalStatus</title>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/DEMOGRAPHICS/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DFunctionalStatus"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/oIBDdgwFLyo"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/oIBDdgwFLyo"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>W.EJcnhxb7W5M4eR4Tr1YA</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<FunctionalStatus>
<Function>
<Type>
<Text>Pregnancy status</Text>
</Type>
<Description>
<Text>Not pregnant</Text>
</Description>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Function>
<Function>
<Type>
<Text>Breastfeeding status</Text>
</Type>
<Description>
<Text>Not breastfeeding</Text>
</Description>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Function>
</FunctionalStatus>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/wwljIlXuTVg</id>
<published>2008-09-29T03:26:10.080Z</published>
<updated>2008-09-29T03:26:10.081Z</updated>
<category term="MEDICATION"/>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category scheme="http://schemas.google.com/health/item" term="Lipitor"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/MEDICATION/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DLipitor"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/wwljIlXuTVg"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/wwljIlXuTVg"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>OrpghzvvbG_YaO5koqT2ug</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Medications>
<Medication>
<Type/>
<Description/>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Product>
<ProductName>
<Text>Lipitor</Text>
</ProductName>
<Strength>
<Units/>
<StrengthSequencePosition>0</StrengthSequencePosition>
<VariableStrengthModifier/>
</Strength>
</Product>
<Directions>
<Direction>
<Description/>
<DeliveryMethod/>
<Dose>
<Units/>
<DoseSequencePosition>0</DoseSequencePosition>
<VariableDoseModifier/>
</Dose>
<Route>
<Text>By mouth</Text>
<Code>
<Value>C38288</Value>
<CodingSystem>FDA</CodingSystem>
</Code>
<RouteSequencePosition>0</RouteSequencePosition>
<MultipleRouteModifier/>
</Route>
</Direction>
</Directions>
<Refills/>
</Medication>
</Medications>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/dd09TR12SiY</id>
<published>2008-09-29T07:52:17.175Z</published>
<updated>2008-09-29T07:52:17.176Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category scheme="http://schemas.google.com/health/item" term="Malaria"/>
<category term="CONDITION"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DMalaria/CONDITION"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/dd09TR12SiY"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/dd09TR12SiY"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>XF99N6X4lpy.jfPUPLMMSQ</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Problems>
<Problem>
<Type/>
<Description>
<Text>Malaria</Text>
<Code>
<Value>136.9</Value>
<CodingSystem>ICD9_Broader</CodingSystem>
</Code>
<Code>
<Value>084.6</Value>
<CodingSystem>ICD9</CodingSystem>
</Code>
</Description>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<HealthStatus>
<Description/>
</HealthStatus>
</Problem>
</Problems>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/aS0Cf964DPs</id>
<published>2008-09-29T03:14:47.463Z</published>
<updated>2008-09-29T03:14:47.463Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category term="DEMOGRAPHICS"/>
<category scheme="http://schemas.google.com/health/item" term="SocialHistory (Drinking, Smoking)"/>
<title type="text">SocialHistory (Drinking, Smoking)</title>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/DEMOGRAPHICS/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DSocialHistory+%28Drinking%2C+Smoking%29"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/aS0Cf964DPs"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/aS0Cf964DPs"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>kXylGU5YXLBzriv61xPGZQ</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<SocialHistory>
<SocialHistoryElement>
<Type>
<Text>Race</Text>
<Code>
<Value>S15814</Value>
<CodingSystem>HL7</CodingSystem>
</Code>
</Type>
<Description>
<Text>White</Text>
</Description>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Episodes>
<Frequency>
<Units/>
</Frequency>
</Episodes>
</SocialHistoryElement>
</SocialHistory>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/s5lII5xfj_g</id>
<published>2008-09-29T03:14:47.544Z</published>
<updated>2008-09-29T03:14:47.545Z</updated>
<category scheme="http://schemas.google.com/health/item" term="VitalSigns"/>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category term="DEMOGRAPHICS"/>
<title type="text">VitalSigns</title>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DVitalSigns/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/DEMOGRAPHICS"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/s5lII5xfj_g"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/s5lII5xfj_g"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>FTTIiY0TVVj35kZqFFjPjQ</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<VitalSigns>
<Result>
<Type/>
<Description/>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Substance/>
<Test>
<Type/>
<Description>
<Text>Height</Text>
</Description>
<Status/>
<TestResult>
<ResultSequencePosition>0</ResultSequencePosition>
<VariableResultModifier/>
<Value>70</Value>
<Units>
<Unit>inches</Unit>
</Units>
</TestResult>
<ConfidenceValue/>
</Test>
</Result>
<Result>
<Type/>
<Description/>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Substance/>
<Test>
<Type/>
<Description>
<Text>Weight</Text>
</Description>
<Status/>
<TestResult>
<ResultSequencePosition>0</ResultSequencePosition>
<VariableResultModifier/>
<Value>2480</Value>
<Units>
<Unit>ounces</Unit>
</Units>
</TestResult>
<ConfidenceValue/>
</Test>
</Result>
<Result>
<Type/>
<Description/>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Substance/>
<Test>
<Type/>
<Description>
<Text>Blood Type</Text>
</Description>
<Status/>
<TestResult>
<ResultSequencePosition>0</ResultSequencePosition>
<VariableResultModifier/>
<Value>O+</Value>
<Units/>
</TestResult>
<ConfidenceValue/>
</Test>
</Result>
</VitalSigns>
</Body>
</ContinuityOfCareRecord>
</entry>
</feed>"""
HEALTH_PROFILE_LIST_ENTRY = """ <entry xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'>
<id>
https://www.google.com/health/feeds/profile/list/vndCn5sdfwdEIY</id>
<updated>1970-01-01T00:00:00.000Z</updated>
<title type='text'>profile name</title>
<content type='text'>vndCn5sdfwdEIY</content>
<link rel='self' type='application/atom+xml'
href='https://www.google.com/health/feeds/profile/list/vndCn5sdfwdEIY' />
<link rel='edit' type='application/atom+xml'
href='https://www.google.com/health/feeds/profile/list/vndCn5sdfwdEIY' />
<author>
<name>[email protected]</name>
</author>
</entry>"""
BOOK_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>"""\
"""<entry xmlns='http://www.w3.org/2005/Atom' xmlns:gbs='http://schemas.google.com/books/2008' xmlns:dc='http://purl.org/dc/terms' xmlns:gd='http://schemas.google.com/g/2005'>"""\
"""<id>http://www.google.com/books/feeds/volumes/b7GZr5Btp30C</id>"""\
"""<updated>2009-04-24T23:35:16.000Z</updated>"""\
"""<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/books/2008#volume'/>"""\
"""<title type='text'>A theory of justice</title>"""\
"""<link rel='http://schemas.google.com/books/2008/thumbnail' type='image/x-unknown' href='http://bks0.books.google.com/books?id=b7GZr5Btp30C&printsec=frontcover&img=1&zoom=5&sig=ACfU3U121bWZsbjBfVwVRSK2o982jJTd1w&source=gbs_gdata'/>"""\
"""<link rel='http://schemas.google.com/books/2008/info' type='text/html' href='http://books.google.com/books?id=b7GZr5Btp30C&ie=ISO-8859-1&source=gbs_gdata'/>"""\
"""<link rel='http://schemas.google.com/books/2008/annotation' type='application/atom+xml' href='http://www.google.com/books/feeds/users/me/volumes'/>"""\
"""<link rel='alternate' type='text/html' href='http://books.google.com/books?id=b7GZr5Btp30C&ie=ISO-8859-1'/>"""\
"""<link rel='self' type='application/atom+xml' href='http://www.google.com/books/feeds/volumes/b7GZr5Btp30C'/>"""\
"""<gbs:embeddability value='http://schemas.google.com/books/2008#embeddable'/>"""\
"""<gbs:openAccess value='http://schemas.google.com/books/2008#disabled'/>"""\
"""<gd:rating min='1' max='5' average='4.00'/>"""\
"""<gbs:viewability value='http://schemas.google.com/books/2008#view_partial'/>"""\
"""<dc:creator>John Rawls</dc:creator>"""\
"""<dc:date>1999</dc:date>"""\
"""<dc:description>p Since it appeared in 1971, John Rawls's i A Theory of Justice /i has become a classic. The author has now revised the original edition to clear up a number of difficulties he and others have found in the original book. /p p Rawls aims to express an essential part of the common core of the democratic tradition--justice as fairness--and to provide an alternative to utilitarianism, which had dominated the Anglo-Saxon tradition of political thought since the nineteenth century. Rawls substitutes the ideal of the social contract as a more satisfactory account of the basic rights and liberties of citizens as free and equal persons. "Each person," writes Rawls, "possesses an inviolability founded on justice that even the welfare of society as a whole cannot override." Advancing the ideas of Rousseau, Kant, Emerson, and Lincoln, Rawls's theory is as powerful today as it was when first published. /p</dc:description>"""\
"""<dc:format>538 pages</dc:format>"""\
"""<dc:identifier>b7GZr5Btp30C</dc:identifier>"""\
"""<dc:identifier>ISBN:0198250541</dc:identifier>"""\
"""<dc:identifier>ISBN:9780198250548</dc:identifier>"""\
"""<dc:language>en</dc:language>"""\
"""<dc:publisher>Oxford University Press</dc:publisher>"""\
"""<dc:title>A theory of justice</dc:title>"""\
"""</entry>"""
BOOK_FEED = """<?xml version='1.0' encoding='UTF-8'?>"""\
"""<feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gbs='http://schemas.google.com/books/2008' xmlns:dc='http://purl.org/dc/terms' xmlns:gd='http://schemas.google.com/g/2005'>"""\
"""<id>http://www.google.com/books/feeds/volumes</id>"""\
"""<updated>2009-04-24T23:39:47.000Z</updated>"""\
"""<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/books/2008#volume'/>"""\
"""<title type='text'>Search results for 9780198250548</title>"""\
"""<link rel='alternate' type='text/html' href='http://www.google.com'/>"""\
"""<link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://www.google.com/books/feeds/volumes'/>"""\
"""<link rel='self' type='application/atom+xml' href='http://www.google.com/books/feeds/volumes?q=9780198250548'/>"""\
"""<author>"""\
"""<name>Google Books Search</name>"""\
"""<uri>http://www.google.com</uri>"""\
"""</author>"""\
"""<generator version='beta'>Google Book Search data API</generator>"""\
"""<openSearch:totalResults>1</openSearch:totalResults>"""\
"""<openSearch:startIndex>1</openSearch:startIndex>"""\
"""<openSearch:itemsPerPage>20</openSearch:itemsPerPage>"""\
"""<entry>"""\
"""<id>http://www.google.com/books/feeds/volumes/b7GZr5Btp30C</id>"""\
"""<updated>2009-04-24T23:39:47.000Z</updated>"""\
"""<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/books/2008#volume'/>"""\
"""<title type='text'>A theory of justice</title>"""\
"""<link rel='http://schemas.google.com/books/2008/thumbnail' type='image/x-unknown' href='http://bks9.books.google.com/books?id=b7GZr5Btp30C&printsec=frontcover&img=1&zoom=5&sig=ACfU3U121bWZsbjBfVwVRSK2o982jJTd1w&source=gbs_gdata'/>"""\
"""<link rel='http://schemas.google.com/books/2008/info' type='text/html' href='http://books.google.com/books?id=b7GZr5Btp30C&dq=9780198250548&ie=ISO-8859-1&source=gbs_gdata'/>"""\
"""<link rel='http://schemas.google.com/books/2008/preview' type='text/html' href='http://books.google.com/books?id=b7GZr5Btp30C&pg=PA494&dq=9780198250548&ie=ISO-8859-1&source=gbs_gdata'/>"""\
"""<link rel='http://schemas.google.com/books/2008/annotation' type='application/atom+xml' href='http://www.google.com/books/feeds/users/me/volumes'/>"""\
"""<link rel='alternate' type='text/html' href='http://books.google.com/books?id=b7GZr5Btp30C&dq=9780198250548&ie=ISO-8859-1'/>"""\
"""<link rel='self' type='application/atom+xml' href='http://www.google.com/books/feeds/volumes/b7GZr5Btp30C'/>"""\
"""<gbs:embeddability value='http://schemas.google.com/books/2008#embeddable'/>"""\
"""<gbs:openAccess value='http://schemas.google.com/books/2008#disabled'/>"""\
"""<gbs:viewability value='http://schemas.google.com/books/2008#view_partial'/>"""\
"""<dc:creator>John Rawls</dc:creator>"""\
"""<dc:date>1999</dc:date>"""\
"""<dc:description>... 9780198250548 ...</dc:description>"""\
"""<dc:format>538 pages</dc:format>"""\
"""<dc:identifier>b7GZr5Btp30C</dc:identifier>"""\
"""<dc:identifier>ISBN:0198250541</dc:identifier>"""\
"""<dc:identifier>ISBN:9780198250548</dc:identifier>"""\
"""<dc:subject>Law</dc:subject>"""\
"""<dc:title>A theory of justice</dc:title>"""\
"""</entry>"""\
"""</feed>"""
MAP_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005" gd:etag="W/"CkIESHg4eSp7ImA9WxJbF08."">
<id>http://maps.google.com/maps/feeds/maps/208825816854482607313</id>
<updated>2009-07-27T18:48:29.631Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/maps/2008#map"/>
<title>My maps</title>
<link rel="alternate" type="text/html" href="http://maps.google.com/maps/ms?msa=1"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://maps.google.com/maps/feeds/maps/208825816854482607313/full"/>
<link rel="self" type="application/atom+xml" href="http://maps.google.com/maps/feeds/maps/208825816854482607313/full"/>
<link rel="http://schemas.google.com/g/2005#batch" type="application/atom+xml" href="http://maps.google.com/maps/feeds/maps/208825816854482607313/full/batch"/>
<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="http://maps.google.com/maps/feeds/maps/208825816854482607313/full"/>
<author>
<name>Roman</name>
</author>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>1</openSearch:itemsPerPage>
<entry gd:etag="W/"CkIESHg4eSp7ImA9WxJbF08."">
<id>http://maps.google.com/maps/feeds/maps/208825816854482607313/00046fb45f88fa910bcea</id>
<published>2009-07-27T18:46:34.451Z</published>
<updated>2009-07-27T18:48:29.631Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-07-27T18:48:29.631Z</app:edited>
<app:control xmlns:app="http://www.w3.org/2007/app">
<app:draft>yes</app:draft>
</app:control>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/maps/2008#map"/>
<title>Untitled</title>
<summary/>
<content src="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full"/>
<link rel="self" type="application/atom+xml" href="http://maps.google.com/maps/feeds/maps/208825816854482607313/full/00046fb45f88fa910bcea"/>
<link rel="alternate" type="text/html" href="http://maps.google.com/maps/ms?msa=0&msid=208825816854482607313.00046fb45f88fa910bcea"/>
<link rel="edit" type="application/atom+xml" href="http://maps.google.com/maps/feeds/maps/208825816854482607313/full/00046fb45f88fa910bcea"/>
<author>
<name>Roman</name>
</author>
</entry>
</feed>
"""
MAP_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<entry xmlns="http://www.w3.org/2005/Atom" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005" gd:etag="W/"CkIESHg4eSp7ImA9WxJbF08."">
<id>http://maps.google.com/maps/feeds/maps/208825816854482607313/00046fb45f88fa910bcea</id>
<published>2009-07-27T18:46:34.451Z</published>
<updated>2009-07-27T18:48:29.631Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-07-27T18:48:29.631Z</app:edited>
<app:control xmlns:app="http://www.w3.org/2007/app">
<app:draft>yes</app:draft>
</app:control>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/maps/2008#map"/>
<title>Untitled</title>
<summary/>
<content src="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full"/>
<link rel="self" type="application/atom+xml" href="http://maps.google.com/maps/feeds/maps/208825816854482607313/full/00046fb45f88fa910bcea"/>
<link rel="alternate" type="text/html" href="http://maps.google.com/maps/ms?msa=0&msid=208825816854482607313.00046fb45f88fa910bcea"/>
<link rel="edit" type="application/atom+xml" href="http://maps.google.com/maps/feeds/maps/208825816854482607313/full/00046fb45f88fa910bcea"/>
<author>
<name>Roman</name>
</author>
</entry>
"""
MAP_FEATURE_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<atom:feed xmlns="http://www.opengis.net/kml/2.2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005" gd:etag="W/"CkIESHg4eSp7ImA9WxJbF08."">
<atom:id>http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea</atom:id>
<atom:updated>2009-07-27T18:48:29.631Z</atom:updated>
<atom:category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/maps/2008#feature"/>
<atom:title>Untitled</atom:title>
<atom:link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full"/>
<atom:link rel="self" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full"/>
<atom:link rel="http://schemas.google.com/g/2005#batch" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/batch"/>
<atom:link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full"/>
<openSearch:totalResults>4</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>4</openSearch:itemsPerPage>
<atom:entry gd:etag="W/"CkMBRH44fyp7ImA9WxJbF08."">
<atom:id>http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/00046fb4632573b19e0b7</atom:id>
<atom:published>2009-07-27T18:47:35.037Z</atom:published>
<atom:updated>2009-07-27T18:47:35.037Z</atom:updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-07-27T18:47:35.037Z</app:edited>
<atom:category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/maps/2008#feature"/>
<atom:title>Some feature title</atom:title>
<atom:content type="application/vnd.google-earth.kml+xml">
<Placemark>
<name>Some feature title</name>
<description><![CDATA[<div dir="ltr">Some feature content</div>]]></description>
<Style>
<IconStyle>
<Icon>
<href>http://maps.gstatic.com/intl/en_us/mapfiles/ms/micons/ylw-pushpin.png</href>
</Icon>
</IconStyle>
</Style>
<Point>
<coordinates>-113.818359,41.442726,0.0</coordinates>
</Point>
</Placemark>
</atom:content>
<atom:link rel="self" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/00046fb4632573b19e0b7"/>
<atom:link rel="edit" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/00046fb4632573b19e0b7"/>
<atom:author>
<atom:name>Roman</atom:name>
</atom:author>
<atom:contributor>
<atom:name>Roman</atom:name>
</atom:contributor>
</atom:entry>
<atom:entry gd:etag="W/"CkIEQ38zfCp7ImA9WxJbF08."">
<atom:id>http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/00046fb46325e839a11e6</atom:id>
<atom:published>2009-07-27T18:47:35.067Z</atom:published>
<atom:updated>2009-07-27T18:48:22.184Z</atom:updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-07-27T18:48:22.184Z</app:edited>
<atom:category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/maps/2008#feature"/>
<atom:title>A cool poly!</atom:title>
<atom:content type="application/vnd.google-earth.kml+xml">
<Placemark>
<name>A cool poly!</name>
<description><![CDATA[<div dir="ltr">And a description</div>]]></description>
<Style>
<LineStyle>
<color>FF0066FF</color>
<width>3</width>
</LineStyle>
<PolyStyle>
<color>730099FF</color>
<fill>1</fill>
<outline>1</outline>
</PolyStyle>
</Style>
<Polygon>
<outerBoundaryIs>
<LinearRing>
<tessellate>1</tessellate>
<coordinates>-109.775391,47.457809,0.0 -99.755859,51.508742,0.0 -92.900391,48.04871,0.0 -92.8125,44.339565,0.0 -95.273437,44.402392,0.0 -97.207031,46.619261,0.0 -100.898437,46.073231,0.0 -102.480469,43.068888,0.0 -110.742187,45.274886,0.0 -109.775391,47.457809,0.0 </coordinates>
</LinearRing>
</outerBoundaryIs>
</Polygon>
</Placemark>
</atom:content>
<atom:link rel="self" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/00046fb46325e839a11e6"/>
<atom:link rel="edit" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/00046fb46325e839a11e6"/>
<atom:author>
<atom:name>Roman</atom:name>
</atom:author>
<atom:contributor>
<atom:name>Roman</atom:name>
</atom:contributor>
</atom:entry>
<atom:entry gd:etag="W/"CkIEQ38yfCp7ImA9WxJbF08."">
<atom:id>http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/00046fb465f5002e56b7a</atom:id>
<atom:published>2009-07-27T18:48:22.194Z</atom:published>
<atom:updated>2009-07-27T18:48:22.194Z</atom:updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-07-27T18:48:22.194Z</app:edited>
<atom:category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/maps/2008#feature"/>
<atom:title>New Mexico</atom:title>
<atom:content type="application/vnd.google-earth.kml+xml">
<Placemark>
<name>New Mexico</name>
<description><![CDATA[<div dir="ltr">Word.</div>]]></description>
<Style>
<LineStyle>
<color>73009900</color>
<width>5</width>
</LineStyle>
</Style>
<LineString>
<tessellate>1</tessellate>
<coordinates>-110.039062,37.788081,0.0 -103.183594,37.926868,0.0 -103.183594,32.472695,0.0 -108.896484,32.026706,0.0 -109.863281,31.203405,0.0 -110.039062,37.788081,0.0 </coordinates>
</LineString>
</Placemark>
</atom:content>
<atom:link rel="self" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/00046fb465f5002e56b7a"/>
<atom:link rel="edit" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/00046fb465f5002e56b7a"/>
<atom:author>
<atom:name>Roman</atom:name>
</atom:author>
<atom:contributor>
<atom:name>Roman</atom:name>
</atom:contributor>
</atom:entry>
</atom:feed>
"""
MAP_FEATURE_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<atom:entry xmlns="http://www.opengis.net/kml/2.2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005" gd:etag="W/"CkMBRH44fyp7ImA9WxJbF08."">
<atom:id>http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/00046fb4632573b19e0b7</atom:id>
<atom:published>2009-07-27T18:47:35.037Z</atom:published>
<atom:updated>2009-07-27T18:47:35.037Z</atom:updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-07-27T18:47:35.037Z</app:edited>
<atom:category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/maps/2008#feature"/>
<atom:title>Some feature title</atom:title>
<atom:content type="application/vnd.google-earth.kml+xml">
<Placemark>
<name>Some feature title</name>
<description><![CDATA[<div dir="ltr">Some feature content</div>]]></description>
<Style>
<IconStyle>
<Icon>
<href>http://maps.gstatic.com/intl/en_us/mapfiles/ms/micons/ylw-pushpin.png</href>
</Icon>
</IconStyle>
</Style>
<Point>
<coordinates>-113.818359,41.442726,0.0</coordinates>
</Point>
</Placemark>
</atom:content>
<atom:link rel="self" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/00046fb4632573b19e0b7"/>
<atom:link rel="edit" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/00046fb4632573b19e0b7"/>
<atom:author>
<atom:name>Roman</atom:name>
</atom:author>
<atom:contributor>
<atom:name>Roman</atom:name>
</atom:contributor>
</atom:entry>
"""
MAP_FEATURE_KML = """<Placemark>
<name>Some feature title</name>
<description><![CDATA[<div dir="ltr">Some feature content</div>]]></description>
<Style>
<IconStyle>
<Icon>
<href>http://maps.gstatic.com/intl/en_us/mapfiles/ms/micons/ylw-pushpin.png</href>
</Icon>
</IconStyle>
</Style>
<Point>
<coordinates>-113.818359,41.442726,0.0</coordinates>
</Point>
</Placemark>
"""
SITES_LISTPAGE_ENTRY = '''<?xml version="1.0" encoding="UTF-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<id>http:///sites.google.com/feeds/content/site/gdatatestsite/1712987567114738703</id>
<updated>2009-06-16T00:37:37.393Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#listpage"/>
<title type="text">ListPagesTitle</title>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<jot:section xmlns:jot="http://www.google.com/ns/jotspot/srvtmpl/" target="content-1">
<div dir="ltr">stuff go here<div>asdf</div>
<div>sdf</div>
<div>
<br/>
</div>
</div>
</jot:section>
</div>
</content>
<link rel="self" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/1712987567114738703"/>
<link rel="edit" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/1712987567114738703"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<gs:worksheet xmlns:gs="http://schemas.google.com/spreadsheets/2006" name="listpage"/>
<gs:header xmlns:gs="http://schemas.google.com/spreadsheets/2006" row="1"/>
<gs:data xmlns:gs="http://schemas.google.com/spreadsheets/2006" startRow="2">
<gs:column index="A" name="Owner"/>
<gs:column index="B" name="Description"/>
<gs:column index="C" name="Resolution"/>
<gs:column index="D" name="Complete"/>
<gs:column index="E" name="MyCo"/>
</gs:data>
<gd:feedLink xmlns:gd="http://schemas.google.com/g/2005" href="http:///sites.google.com/feeds/content/site/gdatatestsite?parent=abc"/>
</entry>'''
SITES_COMMENT_ENTRY = '''<?xml version="1.0" encoding="UTF-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id>
<updated>2009-06-15T18:40:22.407Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#comment"/>
<title type="text"/>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">first comment</div>
</content>
<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123parent"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<thr:in-reply-to xmlns:thr="http://purl.org/syndication/thread/1.0" href="http://sites.google.com/site/gdatatestsite/annoucment/testpost" ref="http://sites.google.com/feeds/content/site/gdatatestsite/abc123" source="http://sites.google.com/feeds/content/site/gdatatestsite" type="text/html"/>
</entry>'''
SITES_LISTITEM_ENTRY = '''<?xml version="1.0" encoding="UTF-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id>
<updated>2009-06-16T00:34:55.633Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#listitem"/>
<title type="text"/>
<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123def"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="A" name="Owner">test value</gs:field>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="B" name="Description">test</gs:field>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="C" name="Resolution">90</gs:field>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="D" name="Complete"/>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="E" name="MyCo">2009-05-31</gs:field>
</entry>'''
SITES_CONTENT_FEED = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/"
xmlns:sites="http://schemas.google.com/sites/2008" xmlns:gs="http://schemas.google.com/spreadsheets/2006"
xmlns:dc="http://purl.org/dc/terms" xmlns:batch="http://schemas.google.com/gdata/batch"
xmlns:gd="http://schemas.google.com/g/2005" xmlns:thr="http://purl.org/syndication/thread/1.0">
<id>http://sites.google.com/feeds/content/site/gdatatestsite</id>
<updated>2009-06-15T21:35:43.282Z</updated>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite"/>
<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite"/>
<generator version="1" uri="http://sites.google.com">Google Sites</generator>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>http:///sites.google.com/feeds/content/site/gdatatestsite/1712987567114738703</id>
<updated>2009-06-16T00:37:37.393Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#listpage"/>
<title type="text">ListPagesTitle</title>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<jot:section xmlns:jot="http://www.google.com/ns/jotspot/srvtmpl/" target="content-1">
<div dir="ltr">stuff go here<div>asdf</div>
<div>sdf</div>
<div>
<br/>
</div>
</div>
</jot:section>
</div>
</content>
<link rel="alternate" type="text/html" href="http:///sites.google.com/site/gdatatestsite/asdfsdfsdf"/>
<link rel="self" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/1712987567114738703"/>
<link rel="edit" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/1712987567114738703"/>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/12345"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<gs:worksheet xmlns:gs="http://schemas.google.com/spreadsheets/2006" name="listpage"/>
<gs:header xmlns:gs="http://schemas.google.com/spreadsheets/2006" row="1"/>
<gs:data xmlns:gs="http://schemas.google.com/spreadsheets/2006" startRow="2">
<gs:column index="A" name="Owner"/>
<gs:column index="B" name="Description"/>
<gs:column index="C" name="Resolution"/>
<gs:column index="D" name="Complete"/>
<gs:column index="E" name="MyCo"/>
</gs:data>
<sites:revision>2</sites:revision>
<gd:deleted/>
<sites:pageName>home</sites:pageName>
<gd:feedLink xmlns:gd="http://schemas.google.com/g/2005" href="http://sites.google.com/feeds/content/site/gdatatestsite?parent=abc"/>
</entry>
<entry>
<id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id>
<updated>2009-06-17T00:40:37.082Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#filecabinet"/>
<title type="text">filecabinet</title>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<jot:section xmlns:jot="http://www.google.com/ns/jotspot/srvtmpl/" target="content-1">
<div dir="ltr">sdf</div>
</jot:section>
</div>
</content>
<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<gd:feedLink xmlns:gd="http://schemas.google.com/g/2005" href="http://sites.google.com/feeds/content/site/gdatatestsite?parent=8472761212299270332"/>
</entry>
<entry>
<id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id>
<updated>2009-06-16T00:34:55.633Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#listitem"/>
<title type="text"/>
<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123def"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="A" name="Owner">test value</gs:field>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="B" name="Description">test</gs:field>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="C" name="Resolution">90</gs:field>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="D" name="Complete"/>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="E" name="MyCo">2009-05-31</gs:field>
</entry>
<entry>
<id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id>
<updated>2009-06-15T18:40:32.922Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#attachment"/>
<title type="text">testFile.ods</title>
<link rel="alternate" type="application/vnd.oasis.opendocument.spreadsheet" href="http://sites.google.com/feeds/SOMELONGURL"/>
<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<gd:deleted/>
<sites:pageName>something else</sites:pageName>
</entry>
<entry>
<id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id>
<updated>2009-06-15T18:40:22.407Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#comment"/>
<title type="text"/>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">first comment</div>
</content>
<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<thr:in-reply-to xmlns:thr="http://purl.org/syndication/thread/1.0" href="http://sites.google.com/site/gdatatestsite/annoucment/testpost" ref="http://sites.google.com/feeds/content/site/gdatatestsite/abc123" source="http://sites.google.com/feeds/content/site/gdatatestsite" type="text/html"/>
</entry>
<entry>
<id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id>
<updated>2009-06-15T18:40:16.388Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#announcement"/>
<title type="text">TestPost</title>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<jot:section xmlns:jot="http://www.google.com/ns/jotspot/srvtmpl/" target="content-1">
<div dir="ltr">content goes here</div>
</jot:section>
</div>
</content>
<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
</entry>
<entry>
<id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id>
<updated>2009-06-12T23:37:59.417Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#webpage"/>
<title type="text">Home</title>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<jot:section xmlns:jot="http://www.google.com/ns/jotspot/srvtmpl/" target="content-1">
<div dir="ltr">Some Content goes here<div>
<br/>
</div>
<div>
<jot:embed height="300" id="4981865780428052" props="align:left;width:250;maxDepth:6" src="http://www.google.com/chart?SOMELONGURL" style="display: block; text-align: left; " type="toc" width="250"/>
<br/>
</div>
</div>
</jot:section>
</div>
</content>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
</entry>
<entry>
<id>http://sites.google.com/feeds/content/site/gdatatestsite/2639323850129333500</id>
<updated>2009-06-12T23:32:09.191Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#announcementspage"/>
<title type="text">annoucment</title>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
</div>
</content>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<gd:feedLink xmlns:gd="http://schemas.google.com/g/2005" href="http://sites.google.com/feeds/content/site/gdatatestsite?parent=abc123"/>
</entry>
</feed>'''
SITES_ACTIVITY_FEED = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/">
<id>http://sites.google.com/feeds/activity/site/siteName</id>
<updated>2009-08-19T05:46:01.503Z</updated>
<title>Activity</title>
<link rel="alternate" type="text/html" href="http://sites.google.com/a/site/siteName/system/app/pages/recentChanges"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://sites.google.com/feeds/activity/site/siteName"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/activity/site/siteName"/>
<generator version="1" uri="http://sites.google.com">Google Sites</generator>
<openSearch:startIndex>1</openSearch:startIndex>
<entry xmlns:gd="http://schemas.google.com/g/2005" gd:etag="W/"DUENSH0zfyl7ImA9WxNTFEs."">
<id>http://sites.google.com/feeds/activity/site/siteName/197441951793148343</id>
<updated>2009-08-17T00:08:19.387Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#deletion" label="deletion"/>
<title>NewWebpage3</title>
<summary type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">User deleted <a href="http://sites.google.com/site/siteName/newwebpage">NewWebpage3</a>
</div>
</summary>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http://sites.google.com/feeds/revision/site/siteName/6397361387376148502"/>
<link rel="http://schemas.google.com/sites/2008#current" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/siteName/6397361387376148502"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/activity/site/siteName/197441951793148343"/>
<author>
<name>User</name>
<email>[email protected]</email>
</author>
</entry>
<entry xmlns:gd="http://schemas.google.com/g/2005" gd:etag="W/"DUEMQnk6eSl7ImA9WxNTFEs."">
<id>http://sites.google.com/feeds/activity/site/siteName/7299542210274956360</id>
<updated>2009-08-17T00:08:03.711Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#edit" label="edit"/>
<title>NewWebpage3</title>
<summary type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">User edited <a href="http://sites.google.com/site/siteName/newwebpage">NewWebpage3</a>
</div>
</summary>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http://sites.google.com/feeds/revision/site/siteName/6397361387376148502"/>
<link rel="http://schemas.google.com/sites/2008#current" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/siteName/6397361387376148502"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/activity/site/siteName/7299542210274956360"/>
<author>
<name>User</name>
<email>[email protected]</email>
</author>
</entry>
</feed>'''
SITES_REVISION_FEED = '''
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/" xmlns:sites="http://schemas.google.com/sites/2008" xmlns:gs="http://schemas.google.com/spreadsheets/2006" xmlns:dc="http://purl.org/dc/terms" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005" xmlns:thr="http://purl.org/syndication/thread/1.0">
<id>http://sites.google.com/feeds/revision/site/siteName/2947510322163358574</id>
<updated>2009-08-19T06:20:18.151Z</updated>
<title>Revisions</title>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://sites.google.com/feeds/revision/2947510322163358574"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/revision/site/siteName/2947510322163358574"/>
<generator version="1" uri="http://sites.google.com">Google Sites</generator>
<openSearch:startIndex>1</openSearch:startIndex>
<entry gd:etag="W/"DEQNRXY-fil7ImA9WxNTFkg."">
<id>http://sites.google.com/feeds/revision/site/siteName/2947510322163358574/1</id>
<updated>2009-08-19T04:33:14.856Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#comment" label="comment"/>
<title/>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<table cellspacing="0" class="sites-layout-name-one-column sites-layout-hbox">
<tbody>
<tr>
<td class="sites-layout-tile sites-tile-name-content-1">testcomment</td>
</tr>
</tbody>
</table>
</div>
</content>
<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/siteName/54395424125706119"/>
<link rel="alternate" type="text" href="http://sites.google.com/site/system/app/pages/admin/compare?wuid=wuid%3Agx%3A28e7a9057c581b6e&rev1=1"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/revision/site/siteName/2947510322163358574/1"/>
<author>
<name>User</name>
<email>[email protected]</email>
</author>
<thr:in-reply-to href="http://sites.google.com/site/siteName/code/js" ref="http://sites.google.com/feeds/content/site/siteName/54395424125706119" source="http://sites.google.com/feeds/content/google.com/siteName" type="text/html;charset=UTF-8"/>
<sites:revision>1</sites:revision>
</entry>
</feed>'''
SITES_SITE_FEED = '''
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/" xmlns:gAcl="http://schemas.google.com/acl/2007" xmlns:sites="http://schemas.google.com/sites/2008" xmlns:gs="http://schemas.google.com/spreadsheets/2006" xmlns:dc="http://purl.org/dc/terms" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005" xmlns:thr="http://purl.org/syndication/thread/1.0">
<id>https://sites.google.com/feeds/site/example.com</id>
<updated>2009-12-09T01:05:54.631Z</updated>
<title>Site</title>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="https://sites.google.com/feeds/site/example.com"/>
<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="https://sites.google.com/feeds/site/example.com"/>
<link rel="self" type="application/atom+xml" href="https://sites.google.com/feeds/site/example.com"/>
<generator version="1" uri="http://sites.google.com">Google Sites</generator>
<openSearch:startIndex>1</openSearch:startIndex>
<entry gd:etag="W/"DkIHQH4_eCl7I2A9WxNaF0Q."">
<id>https://sites.google.com/feeds/site/example.com/new-test-site</id>
<updated>2009-12-02T22:55:31.040Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-12-02T22:55:31.040Z</app:edited>
<title>New Test Site</title>
<summary>A new site to hold memories</summary>
<link rel="alternate" type="text/html" href="http://sites.google.com/a/example.com/new-test-site/"/>
<link rel="http://schemas.google.com/sites/2008#source" type="application/atom+xml" href="http://sites.google.com/feeds/site/example.com/source-site"/>
<link rel="http://schemas.google.com/acl/2007#accessControlList" type="application/atom+xml" href="http://sites.google.com/feeds/acl/site/example.com/new-test-site"/>
<link rel="self" type="application/atom+xml" href="https://sites.google.com/feeds/site/example.com/new-test-site"/>
<link rel="edit" type="application/atom+xml" href="https://sites.google.com/feeds/site/example.com/new-test-site"/>
<sites:siteName>new-test-site</sites:siteName>
<sites:theme>iceberg</sites:theme>
</entry>
<entry gd:etag="W/"CE8MQH48fyl7I2A9WxNaGUo."">
<id>https://sites.google.com/feeds/site/example.com/newautosite2</id>
<updated>2009-12-05T00:28:01.077Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-12-05T00:28:01.077Z</app:edited>
<title>newAutoSite3</title>
<summary>A new site to hold memories2</summary>
<link rel="alternate" type="text/html" href="http://sites.google.com/a/example.com/newautosite2/"/>
<link rel="http://schemas.google.com/acl/2007#accessControlList" type="application/atom+xml" href="http://sites.google.com/feeds/acl/site/examp.e.com/newautosite2"/>
<link rel="self" type="application/atom+xml" href="https://sites.google.com/feeds/site/example.com/newautosite2"/>
<link rel="edit" type="application/atom+xml" href="https://sites.google.com/feeds/site/example.com/newautosite2"/>
<sites:siteName>newautosite2</sites:siteName>
<sites:theme>default</sites:theme>
</entry>
</feed>'''
SITES_ACL_FEED = '''
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/" xmlns:gAcl="http://schemas.google.com/acl/2007" xmlns:sites="http://schemas.google.com/sites/2008" xmlns:gs="http://schemas.google.com/spreadsheets/2006" xmlns:dc="http://purl.org/dc/terms" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005" xmlns:thr="http://purl.org/syndication/thread/1.0">
<id>https://sites.google.comsites.google.com/feeds/acl/site/example.com/new-test-site</id>
<updated>2009-12-09T01:24:59.080Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/acl/2007#accessRule"/>
<title>Acl</title>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="https://sites.google.com/feeds/acl/site/example.com/new-test-site"/>
<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="https://sites.google.com/feeds/acl/site/example.com/new-test-site"/>
<link rel="self" type="application/atom+xml" href="https://sites.google.com/feeds/acl/site/example.com/new-test-site"/>
<generator version="1" uri="http://sites.google.com">Google Sites</generator>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>https://sites.google.com/feeds/acl/site/google.com/new-test-site/user%3Auser%40example.com</id>
<updated>2009-12-09T01:24:59.080Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-12-09T01:24:59.080Z</app:edited>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/acl/2007#accessRule"/>
<link rel="self" type="application/atom+xml" href="https://sites.google.com/feeds/acl/site/example.com/new-test-site/user%3Auser%40example.com"/>
<link rel="edit" type="application/atom+xml" href="https://sites.google.com/feeds/acl/site/example.com/new-test-site/user%3Auser%40example.com"/>
<gAcl:scope type="user" value="[email protected]"/>
<gAcl:role value="owner"/>
</entry>
</feed>'''
ANALYTICS_ACCOUNT_FEED_old = '''
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:dxp='http://schemas.google.com/analytics/2009'>
<id>http://www.google.com/analytics/feeds/accounts/[email protected]</id>
<updated>2009-06-25T03:55:22.000-07:00</updated>
<title type='text'>Profile list for [email protected]</title>
<link rel='self' type='application/atom+xml' href='http://www.google.com/analytics/feeds/accounts/default'/>
<author>
<name>Google Analytics</name>
</author>
<generator version='1.0'>Google Analytics</generator>
<openSearch:totalResults>12</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>12</openSearch:itemsPerPage>
<entry>
<id>http://www.google.com/analytics/feeds/accounts/ga:1174</id>
<updated>2009-06-25T03:55:22.000-07:00</updated>
<title type='text'>www.googlestore.com</title>
<link rel='alternate' type='text/html' href='http://www.google.com/analytics'/>
<dxp:tableId>ga:1174</dxp:tableId>
<dxp:property name='ga:accountId' value='30481'/>
<dxp:property name='ga:accountName' value='Google Store'/>
<dxp:property name='ga:profileId' value='1174'/>
<dxp:property name='ga:webPropertyId' value='UA-30481-1'/>
<dxp:property name='ga:currency' value='USD'/>
<dxp:property name='ga:timezone' value='America/Los_Angeles'/>
</entry>
</feed>'''
ANALYTICS_ACCOUNT_FEED = '''
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:dxp='http://schemas.google.com/analytics/2009' xmlns:ga='http://schemas.google.com/ga/2009' xmlns:openSearch='http://a9.com/-/spec/opensearch/1.1/' xmlns:gd='http://schemas.google\
.com/g/2005' gd:etag='W/"DE8CRH47eCp7I2A9WxNWFU4."' gd:kind='analytics#accounts'>
<id>http://www.google.com/analytics/feeds/accounts/[email protected]</id>
<updated>2009-10-14T09:14:25.000-07:00</updated>
<title>Profile list for [email protected]</title>
<link rel='self' type='application/atom+xml' href='http://www.google.com/analytics/feeds/accounts/default?v=2'/>
<author>
<name>Google Analytics</name>
</author>
<generator version='1.0'>Google Analytics</generator>
<openSearch:totalResults>37</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>37</openSearch:itemsPerPage>
<dxp:segment id='gaid::-11' name='Visits from iPhones'>
<dxp:definition>ga:operatingSystem==iPhone</dxp:definition>
</dxp:segment>
<entry gd:etag='W/"DE8CRH47eCp7I2A9WxNWFU4."' gd:kind='analytics#account'>
<id>http://www.google.com/analytics/feeds/accounts/ga:1174</id>
<updated>2009-10-14T09:14:25.000-07:00</updated>
<title>www.googlestore.com</title>
<link rel='alternate' type='text/html' href='http://www.google.com/analytics'/>
<ga:goal active='true' name='Completing Order' number='1' value='10.0'>
<ga:destination caseSensitive='false' expression='/purchaseComplete.html' matchType='regex' step1Required='false'>
<ga:step name='View Product Categories' number='1' path='/Apps|Accessories|Fun|Kid\+s|Office'/>
<ga:step name='View Product' number='2' path='/Apps|Accessories|Fun|Kid\+s|Office|Wearables'/>
</ga:destination>
</ga:goal>
<ga:goal active='true' name='Browsed my site over 5 minutes' number='6' value='0.0'>
<ga:engagement comparison='>' thresholdValue='300' type='timeOnSite'/>
</ga:goal>
<ga:goal active='true' name='Visited > 4 pages' number='7' value='0.25'>
<ga:engagement comparison='>' thresholdValue='4' type='pagesVisited'/>
</ga:goal>
<ga:customVariable index='1' name='My Custom Variable' scope='3'/>
<ga:customVariable index='2' name='My Seconds Variable' scope='1'/>
<dxp:property name='ga:accountId' value='30481'/>
<dxp:property name='ga:accountName' value='Google Store'/>
<dxp:property name='ga:profileId' value='1174'/>
<dxp:property name='ga:webPropertyId' value='UA-30481-1'/>
<dxp:property name='ga:currency' value='USD'/>
<dxp:property name='ga:timezone' value='America/Los_Angeles'/>
<dxp:tableId>ga:1174</dxp:tableId>
</entry>
</feed>'''
ANALYTICS_DATA_FEED = '''
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:dxp='http://schemas.google.com/analytics/2009'>
<id>http://www.google.com/analytics/feeds/data?ids=ga:1174&dimensions=ga:medium,ga:source&metrics=ga:bounces,ga:visits&filters=ga:medium%3D%3Dreferral&start-date=2008-10-01&end-date=2008-10-31</id>
<updated>2008-10-31T16:59:59.999-07:00</updated>
<title type='text'>Google Analytics Data for Profile 1174</title>
<link rel='self' type='application/atom+xml' href='http://www.google.com/analytics/feeds/data?max-results=5&sort=-ga%3Avisits&end-date=2008-10-31&start-date=2008-10-01&metrics=ga%3Avisits%2Cga%3Abounces&ids=ga%3A1174&dimensions=ga%3Asource%2Cga%3Amedium&filters=ga%3Amedium%3D%3Dreferral'/>
<link rel='next' type='application/atom+xml' href='http://www.google.com/analytics/feeds/data?start-index=6&max-results=5&sort=-ga%3Avisits&end-date=2008-10-31&start-date=2008-10-01&metrics=ga%3Avisits%2Cga%3Abounces&ids=ga%3A1174&dimensions=ga%3Asource%2Cga%3Amedium&filters=ga%3Amedium%3D%3Dreferral'/>
<author>
<name>Google Analytics</name>
</author>
<generator version='1.0'>Google Analytics</generator>
<openSearch:totalResults>6451</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>2</openSearch:itemsPerPage>
<dxp:startDate>2008-10-01</dxp:startDate>
<dxp:endDate>2008-10-31</dxp:endDate>
<dxp:segment id='gaid::-11' name='Visits from iPhones'>
<dxp:definition>ga:operatingSystem==iPhone</dxp:definition>
</dxp:segment>
<dxp:aggregates>
<dxp:metric confidenceInterval='0.0' name='ga:visits' type='integer' value='136540'/>
<dxp:metric confidenceInterval='0.0' name='ga:bounces' type='integer' value='101535'/>
</dxp:aggregates>
<dxp:containsSampledData>true</dxp:containsSampledData>
<dxp:dataSource>
<dxp:tableId>ga:1174</dxp:tableId>
<dxp:tableName>www.googlestore.com</dxp:tableName>
<dxp:property name='ga:profileId' value='1174'/>
<dxp:property name='ga:webPropertyId' value='UA-30481-1'/>
<dxp:property name='ga:accountName' value='Google Store'/>
</dxp:dataSource>
<entry>
<id>http://www.google.com/analytics/feeds/data?ids=ga:1174&ga:medium=referral&ga:source=blogger.com&filters=ga:medium%3D%3Dreferral&start-date=2008-10-01&end-date=2008-10-31</id>
<updated>2008-10-30T17:00:00.001-07:00</updated>
<title type='text'>ga:source=blogger.com | ga:medium=referral</title>
<link rel='alternate' type='text/html' href='http://www.google.com/analytics'/>
<dxp:dimension name='ga:source' value='blogger.com'/>
<dxp:dimension name='ga:medium' value='referral'/>
<dxp:metric confidenceInterval='0.0' name='ga:visits' type='integer' value='68140'/>
<dxp:metric confidenceInterval='0.0' name='ga:bounces' type='integer' value='61095'/>
</entry>
</feed>'''
ANALYTICS_MGMT_PROFILE_FEED = '''
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:dxp='http://schemas.google.com/analytics/2009' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gd='http://schemas.google.com/g/2005' gd:kind='analytics#profiles'>
<id>https://www.google.com/analytics/feeds/datasources/ga/accounts/~all/webproperties/~all/profiles</id>
<updated>2010-06-14T22:18:48.676Z</updated>
<title type='text'>Google Analytics Profiles for [email protected]</title>
<link rel='self' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/~all/webproperties/~all/profiles'/>
<author>
<name>Google Analytics</name>
</author>
<generator version='1.0'>Google Analytics</generator>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>1000</openSearch:itemsPerPage>
<entry gd:etag='W/"CkQAQ3Y-fSp7I2A9WxFXGEU."' gd:kind='analytics#profile'>
<id>https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174</id>
<updated>2010-06-09T05:58:15.436-07:00</updated>
<title type='text'>Google Analytics Profile www.googlestore.com</title>
<link rel='self' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174'/>
<link rel='http://schemas.google.com/ga/2009#parent' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1' gd:targetKind='analytics#webproperty'/>
<link rel='http://schemas.google.com/ga/2009#child' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174/goals' gd:targetKind='analytics#goals'/>
<dxp:property name='ga:accountId' value='30481'/>
<dxp:property name='ga:webPropertyId' value='UA-30481-1'/>
<dxp:property name='ga:profileName' value='www.googlestore.com'/>
<dxp:property name='ga:profileId' value='1174'/>
<dxp:property name='dxp:tableId' value='ga:1174'/>
<dxp:property name='ga:currency' value='USD'/>
<dxp:property name='ga:timezone' value='America/Los_Angeles'/>
</entry>
</feed>
'''
ANALYTICS_MGMT_GOAL_FEED = '''
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:dxp='http://schemas.google.com/analytics/2009' xmlns:ga='http://schemas.google.com/ga/2009' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gd='http://schemas.google.com/g/2005' gd:kind='analytics#goals'>
<id>https://www.google.com/analytics/feeds/datasources/ga/accounts/~all/webproperties/~all/profiles/~all/goals</id>
<updated>2010-06-14T22:21:18.485Z</updated>
<title type='text'>Google Analytics Goals for [email protected]</title>
<link rel='self' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/~all/webproperties/~all/profiles/~all/goals'/>
<author>
<name>Google Analytics</name>
</author>
<generator version='1.0'>Google Analytics</generator>
<openSearch:totalResults>3</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>1000</openSearch:itemsPerPage>
<entry gd:etag='W/"DUYCQn08fip7I2A9WxBWFUo."' gd:kind='analytics#goal'>
<id>https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174/goals/1</id>
<updated>2010-02-07T13:12:43.377-08:00</updated>
<title type='text'>Google Analytics Goal 1</title>
<link rel='self' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174/goals/1'/>
<link rel='http://schemas.google.com/ga/2009#parent' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174' gd:targetKind='analytics#profile'/>
<ga:goal active='true' name='Completing Order' number='1' value='10.0'>
<ga:destination caseSensitive='false' expression='/purchaseComplete.html' matchType='regex' step1Required='false'>
<ga:step name='View Product Categories' number='1' path='/Apps|Accessories'/>
</ga:destination>
</ga:goal>
<dxp:property name='ga:profileId' value='1174'/>
</entry>
<entry gd:etag='W/"DUYCQn08fip7I2A9WxBWFUo."' gd:kind='analytics#goal'>
<id>https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174/goals/2</id>
<updated>2010-02-07T13:12:43.376-08:00</updated>
<title type='text'>Google Analytics Goal 2</title>
<link rel='self' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174/goals/2'/>
<link rel='http://schemas.google.com/ga/2009#parent' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174' gd:targetKind='analytics#profile'/>
<ga:goal active='true' name='Browsed my site over 5 minutes' number='2' value='0.0'>
<ga:engagement comparison='>' thresholdValue='300' type='timeOnSite'/>
</ga:goal>
<dxp:property name='ga:profileId' value='1174'/>
</entry>
</feed>
'''
ANALYTICS_MGMT_ADV_SEGMENT_FEED = '''
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:dxp='http://schemas.google.com/analytics/2009' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gd='http://schemas.google.com/g/2005' gd:kind='analytics#segments'>
<id>https://www.google.com/analytics/feeds/datasources/ga/segments</id>
<updated>2010-06-14T22:22:02.728Z</updated>
<title type='text'>Google Analytics Advanced Segments for [email protected]</title>
<link rel='self' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/segments'/>
<author>
<name>Google Analytics</name>
</author>
<generator version='1.0'>Google Analytics</generator>
<openSearch:totalResults>2</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>1000</openSearch:itemsPerPage>
<entry gd:etag='W/"YDwqeyM."' gd:kind='analytics#segment'>
<id>https://www.google.com/analytics/feeds/datasources/ga/segments/gaid::0</id>
<updated>2009-10-26T13:00:44.915-07:00</updated>
<title type='text'>Google Analytics Advanced Segment Sources Form Google</title>
<link rel='self' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/segments/gaid::0'/>
<dxp:segment id='gaid::0' name='Sources Form Google'>
<dxp:definition>ga:source=~^\Qgoogle\E</dxp:definition>
</dxp:segment>
</entry>
</feed>
'''
MULTIDOMAIN_USER_ENTRY = """<?xml version="1.0"?>
<atom:entry xmlns:atom='http://www.w3.org/2005/Atom' xmlns:apps='http://schemas.google.com/apps/2006'>
<apps:property name="password" value="51eea05d46317fadd5cad6787a8f562be90b4446"/>
<apps:property name="hashFunction" value="SHA-1"/>
<apps:property name="userEmail" value="[email protected]"/>
<apps:property name="firstName" value="Liz"/>
<apps:property name="lastName" value="Smith"/>
<apps:property name="isAdmin" value="true"/>
</atom:entry>"""
MULTIDOMAIN_USER_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensesearchrss/1.0/"
xmlns:apps="http://schemas.google.com/apps/2006">
<id>https://apps-apis.google.com/a/feeds/user/2.0/example.com</id>
<updated>2010-01-26T23:38:13.215Z</updated>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/user/2.0/example.com" />
<link rel="http://schemas.google.com/g/2005#post"
type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/user/2.0/example.com" />
<link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/user/2.0/example.com?start=admin%40example.com" />
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>https://apps-apis.google.com/a/feeds/user/2.0/example.com/admin%40example.com</id>
<updated>2010-01-26T23:38:13.210Z</updated>
<link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/user/2.0/example.com/admin%40example.com" />
<link rel="edit" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/user/2.0/example.com/admin%40example.com" />
<apps:property name="lastName" value="Brown" />
<apps:property name="isChangePasswordAtNextLogin" value="false" />
<apps:property name="isSuspended" value="false" />
<apps:property name="userEmail" value="[email protected]" />
<apps:property name="isAdmin" value="true" />
<apps:property name="firstName" value="Joe" />
<apps:property name="ipWhitelisted" value="false" />
</entry>
<entry>
<id>https://apps-apis.google.com/a/feeds/user/2.0/example.com/liz%40example.com</id>
<updated>2010-01-26T23:38:13.210Z</updated>
<link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/user/2.0/example.com/liz%40example.com" />
<link rel="edit" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/user/2.0/example.com/liz%40example.com" />
<apps:property name="lastName" value="Smith" />
<apps:property name="isChangePasswordAtNextLogin" value="false" />
<apps:property name="isSuspended" value="false" />
<apps:property name="userEmail" value="[email protected]" />
<apps:property name="isAdmin" value="true" />
<apps:property name="firstName" value="Elizabeth" />
<apps:property name="ipWhitelisted" value="false" />
</entry>
</feed>"""
MULTIDOMAIN_USER_RENAME_REQUEST = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom' xmlns:apps='http://schemas.google.com/apps/2006'>
<apps:property name='newEmail' value='[email protected]'/>
</entry>"""
MULTIDOMAIN_ALIAS_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom' xmlns:apps='http://schemas.google.com/apps/2006'>
<id>https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/helpdesk%40gethelp_example.com</id>
<updated>2008-10-17T15:02:45.646Z</updated>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/helpdesk%40gethelp_example.com'/>
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/helpdesk%40gethelp_example.com'/>
<apps:property name="userEmail" value="[email protected]" />
<apps:property name="aliasEmail" value="helpdesk@gethelp_example.com" />
</entry>"""
MULTIDOMAIN_ALIAS_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/" xmlns:apps="http://schemas.google.com/apps/2006">
<id>https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com</id>
<updated>2010-01-26T23:38:13.215Z</updated>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com" />
<link rel="http://schemas.google.com/g/2005#post"
type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com" />
<link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com?start=helpdesk%40gethelp_example.com" />
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/helpdesk%40gethelp_example.com</id>
<updated>2010-01-26T23:38:13.210Z</updated>
<link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/helpdesk%40gethelp_example.com" />
<link rel="edit" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/helpdesk%40gethelp_example.com" />
<apps:property name="userEmail" value="[email protected]" />
<apps:property name="aliasEmail" value="helpdesk@gethelp_example.com" />
</entry>
<entry>
<id>https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/support%40gethelp_example.com</id>
<updated>2010-01-26T23:38:13.210Z</updated>
<link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/support%40gethelp_example.com" />
<link rel="edit" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/support%40gethelp_example.com" />
<apps:property name="userEmail" value="[email protected]" />
<apps:property name="aliasEmail" value="support@gethelp_example.com" />
</entry>
</feed>"""
USER_ENTRY1 = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom' xmlns:apps='http://schemas.google.com/apps/2006' xmlns:gd='http://schemas.google.com/g/2005'>
<id>http://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/abcd12310</id>
<updated>1970-01-01T00:00:00.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/apps/2006#user'/>
<title type='text'>abcd12310</title>
<link rel='self' type='application/atom+xml' href='http://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/abcd12310'/>
<link rel='edit' type='application/atom+xml' href='http://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/abcd12310'/>
<apps:login userName='abcd12310' suspended='false' ipWhitelisted='false' admin='false' changePasswordAtNextLogin='false' agreedToTerms='false'/><apps:quota limit='25600'/>
<apps:name familyName='efgh3' givenName='abcd33'/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.nicknames' href='http://apps-apis.google.com/a/feeds/srkapps.com/nickname/2.0?username=abcd12310'/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.emailLists' href='http://apps-apis.google.com/a/feeds/srkapps.com/emailList/2.0?recipient=abcd12310%40srkapps.com'/>
</entry>"""
USER_FEED1 = """<?xml version='1.0' encoding='utf-8'?>
<ns0:feed xmlns:ns0="http://www.w3.org/2005/Atom">
<ns0:category scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/apps/2006#user" />
<ns0:id>
https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0</ns0:id>
<ns1:startIndex xmlns:ns1="http://a9.com/-/spec/opensearchrss/1.0/">
1</ns1:startIndex>
<ns0:title type="text">Users</ns0:title>
<ns0:link href="https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0"
rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml" />
<ns0:link href="https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0"
rel="http://schemas.google.com/g/2005#post"
type="application/atom+xml" />
<ns0:link href="https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0?startUsername=user8306"
rel="self" type="application/atom+xml" />
<ns0:updated>1970-01-01T00:00:00.000Z</ns0:updated>
<ns0:entry>
<ns1:name familyName="LastName8306"
givenName="FirstName8306"
xmlns:ns1="http://schemas.google.com/apps/2006" />
<ns0:category scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/apps/2006#user" />
<ns0:id>
https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/user8306</ns0:id>
<ns0:updated>1970-01-01T00:00:00.000Z</ns0:updated>
<ns1:quota limit="25600"
xmlns:ns1="http://schemas.google.com/apps/2006" />
<ns1:feedLink href="https://apps-apis.google.com/a/feeds/srkapps.com/nickname/2.0?username=user8306"
rel="http://schemas.google.com/apps/2006#user.nicknames"
xmlns:ns1="http://schemas.google.com/g/2005" />
<ns1:feedLink href="https://apps-apis.google.com/a/feeds/srkapps.com/emailList/2.0?recipient=user8306%40srkapps.com"
rel="http://schemas.google.com/apps/2006#user.emailLists"
xmlns:ns1="http://schemas.google.com/g/2005" />
<ns0:title type="text">user8306</ns0:title>
<ns0:link href="https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/user8306"
rel="self" type="application/atom+xml" />
<ns0:link href="https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/user8306"
rel="edit" type="application/atom+xml" />
<ns1:login admin="false" agreedToTerms="false"
changePasswordAtNextLogin="false" ipWhitelisted="false"
suspended="false" userName="user8306"
xmlns:ns1="http://schemas.google.com/apps/2006" />
</ns0:entry>
<ns0:entry>
<ns1:name familyName="LastName8307"
givenName="FirstName8307"
xmlns:ns1="http://schemas.google.com/apps/2006" />
<ns0:category scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/apps/2006#user" />
<ns0:id>
https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/user8307</ns0:id>
<ns0:updated>1970-01-01T00:00:00.000Z</ns0:updated>
<ns1:quota limit="25600"
xmlns:ns1="http://schemas.google.com/apps/2006" />
<ns1:feedLink href="https://apps-apis.google.com/a/feeds/srkapps.com/nickname/2.0?username=user8307"
rel="http://schemas.google.com/apps/2006#user.nicknames"
xmlns:ns1="http://schemas.google.com/g/2005" />
<ns1:feedLink href="https://apps-apis.google.com/a/feeds/srkapps.com/emailList/2.0?recipient=user8307%40srkapps.com"
rel="http://schemas.google.com/apps/2006#user.emailLists"
xmlns:ns1="http://schemas.google.com/g/2005" />
<ns0:title type="text">user8307</ns0:title>
<ns0:link href="https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/user8307"
rel="self" type="application/atom+xml" />
<ns0:link href="https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/user8307"
rel="edit" type="application/atom+xml" />
<ns1:login admin="false" agreedToTerms="false"
changePasswordAtNextLogin="false" ipWhitelisted="false"
suspended="false" userName="user8307"
xmlns:ns1="http://schemas.google.com/apps/2006" />
</ns0:entry>
</ns0:feed>"""
NICKNAME_ENTRY = """<?xml version='1.0' encoding='utf-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/nehag</id>
<updated>1970-01-01T00:00:00.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname' />
<title type='text'>nehag</title>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/nehag' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/nehag' />
<apps:nickname name='nehag' />
<apps:login userName='neha' />
</entry>"""
NICKNAME_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0</id>
<updated>1970-01-01T00:00:00.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname' />
<title type='text'>Nicknames</title>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0' />
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0' />
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0' />
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>
https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/nehag</id>
<updated>1970-01-01T00:00:00.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname' />
<title type='text'>nehag</title>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/nehag' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/nehag' />
<apps:nickname name='nehag' />
<apps:login userName='neha' />
</entry>
<entry>
<id>
https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/richag</id>
<updated>1970-01-01T00:00:00.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname' />
<title type='text'>richag</title>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/richag' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/richag' />
<apps:nickname name='richag' />
<apps:login userName='richa' />
</entry>
</feed>"""
GROUP_ENTRY = """<?xml version='1.0' encoding='utf-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial%40srkapps.com</id>
<updated>2011-11-10T16:54:56.784Z</updated>
<link rel='self' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial%40srkapps.com' />
<link rel='edit' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial%40srkapps.com' />
<apps:property name='groupId' value='[email protected]' />
<apps:property name='groupName' value='Trial' />
<apps:property name='emailPermission' value='Domain' />
<apps:property name='permissionPreset' value='Custom' />
<apps:property name='description' value='For try' />
</entry>"""
GROUP_FEED= """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com</id>
<updated>2011-11-10T16:56:03.830Z</updated>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com' />
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com' />
<link rel='self' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com' />
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>
http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/firstgroup%40srkapps.com</id>
<updated>2011-11-10T16:56:03.830Z</updated>
<link rel='self' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/firstgroup%40srkapps.com' />
<link rel='edit' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/firstgroup%40srkapps.com' />
<apps:property name='groupId' value='[email protected]' />
<apps:property name='groupName' value='FirstGroup' />
<apps:property name='emailPermission' value='Domain' />
<apps:property name='permissionPreset' value='Custom' />
<apps:property name='description' value='First group' />
</entry>
<entry>
<id>
http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial%40srkapps.com</id>
<updated>2011-11-10T16:56:03.830Z</updated>
<link rel='self' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial%40srkapps.com' />
<link rel='edit' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial%40srkapps.com' />
<apps:property name='groupId' value='[email protected]' />
<apps:property name='groupName' value='Trial' />
<apps:property name='emailPermission' value='Domain' />
<apps:property name='permissionPreset' value='Custom' />
<apps:property name='description' value='For try' />
</entry>
</feed>"""
GROUP_MEMBER_ENTRY = """<?xml version='1.0' encoding='utf-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/abcd12310%40srkapps.com</id>
<updated>2011-11-10T16:58:40.804Z</updated>
<link rel='self' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/abcd12310%40srkapps.com' />
<link rel='edit' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/abcd12310%40srkapps.com' />
<apps:property name='memberType' value='User' />
<apps:property name='memberId' value='[email protected]' />
<apps:property name='directMember' value='true' />
</entry>"""
GROUP_MEMBER_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member</id>
<updated>2011-11-10T16:57:15.574Z</updated>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member' />
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member' />
<link rel='self' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member' />
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>
http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/abcd12310%40srkapps.com</id>
<updated>2011-11-10T16:57:15.574Z</updated>
<link rel='self' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/abcd12310%40srkapps.com' />
<link rel='edit' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/abcd12310%40srkapps.com' />
<apps:property name='memberType' value='User' />
<apps:property name='memberId' value='[email protected]' />
<apps:property name='directMember' value='true' />
</entry>
<entry>
<id>
http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/neha.technocrat%40srkapps.com</id>
<updated>2011-11-10T16:57:15.574Z</updated>
<link rel='self' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/neha.technocrat%40srkapps.com' />
<link rel='edit' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/neha.technocrat%40srkapps.com' />
<apps:property name='memberType' value='User' />
<apps:property name='memberId' value='[email protected]' />
<apps:property name='directMember' value='true' />
</entry>
</feed>"""
ORGANIZATION_UNIT_CUSTOMER_ID_ENTRY = """<?xml version='1.0' encoding='utf-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
https://apps-apis.google.com/a/feeds/customer/2.0/C123A456B</id>
<updated>2011-11-21T13:17:02.274Z</updated>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/customer/2.0/C123A456B' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/customer/2.0/C123A456B' />
<apps:property name='customerOrgUnitDescription'
value='example.com' />
<apps:property name='customerId' value='C123A456B' />
<apps:property name='customerOrgUnitName' value='example.com' />
<apps:property name='description' value='tempdescription' />
<apps:property name='name' value='example.com' />
</entry>"""
ORGANIZATION_UNIT_ORGUNIT_ENTRY = """<?xml version='1.0' encoding='utf-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/Test+Organization</id>
<updated>2011-11-21T13:32:12.334Z</updated>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/Test+Organization' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/Test+Organization' />
<apps:property name='description' value='New Test Org' />
<apps:property name='parentOrgUnitPath' value='Test' />
<apps:property name='name' value='Test Organization' />
<apps:property name='orgUnitPath' value='Test/Test+Organization' />
<apps:property name='blockInheritance' value='false' />
</entry>"""
ORGANIZATION_UNIT_ORGUNIT_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B</id>
<updated>2011-11-21T13:47:12.551Z</updated>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B' />
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B' />
<link rel='http://schemas.google.com/g/2005#batch'
type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/batch' />
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B?get=all' />
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>
https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/testOrgUnit92</id>
<updated>2011-11-21T13:42:45.349Z</updated>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/testOrgUnit92' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/testOrgUnit92' />
<apps:property name='description' value='test92' />
<apps:property name='parentOrgUnitPath' value='Test' />
<apps:property name='name' value='testOrgUnit92' />
<apps:property name='orgUnitPath' value='Test/testOrgUnit92' />
<apps:property name='blockInheritance' value='false' />
</entry>
<entry>
<id>
https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/testOrgUnit93</id>
<updated>2011-11-21T13:42:45.349Z</updated>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/testOrgUnit93' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/testOrgUnit93' />
<apps:property name='description' value='test93' />
<apps:property name='parentOrgUnitPath' value='Test' />
<apps:property name='name' value='testOrgUnit93' />
<apps:property name='orgUnitPath' value='Test/testOrgUnit93' />
<apps:property name='blockInheritance' value='false' />
</entry>
</feed>"""
ORGANIZATION_UNIT_ORGUSER_ENTRY = """<?xml version='1.0' encoding='utf-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/admin%40example.com</id>
<updated>2011-11-21T14:05:17.734Z</updated>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/admin%40example.com' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/admin%40example.com' />
<apps:property name='orgUserEmail' value='[email protected]' />
<apps:property name='orgUnitPath' value='Test' />
</entry>"""
ORGANIZATION_UNIT_ORGUSER_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B</id>
<updated>2011-11-21T14:10:48.206Z</updated>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B' />
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B' />
<link rel='http://schemas.google.com/g/2005#batch'
type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/batch' />
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B?get=all' />
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>
https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/user720430%40example.com</id>
<updated>2011-11-21T14:09:16.600Z</updated>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/user720430%40example.com' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/user720430%40example.com' />
<apps:property name='orgUserEmail'
value='[email protected]' />
<apps:property name='orgUnitPath' value='Test' />
</entry>
<entry>
<id>
https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/user832648%40example.com</id>
<updated>2011-11-21T14:09:16.600Z</updated>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/user832648%40example.com' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/user832648%40example.com' />
<apps:property name='orgUserEmail'
value='[email protected]' />
<apps:property name='orgUnitPath' value='Test' />
</entry>
</feed>"""
|
apache-2.0
|
KaiSzuttor/espresso
|
testsuite/python/actor.py
|
1
|
3068
|
#
# Copyright (C) 2018-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Testmodule for the actor base class.
"""
import unittest as ut
from espressomd import actors
class TestActor(actors.Actor):
def __init__(self, *args, **kwargs):
self._core_args = None
self._activated = False
self._deactivated = False
self._validated = False
super().__init__(*args, **kwargs)
def _get_params_from_es_core(self):
return self._core_args
def _set_params_in_es_core(self):
self._core_args = self._params
def valid_keys(self):
return "a", "b", "c"
def required_keys(self):
return "a", "c"
def default_params(self):
return {"a": False, "b": False, "c": False}
def _activate_method(self):
self._activated = True
def _deactivate_method(self):
self._deactivated = True
def validate_params(self):
self._validated = True
class ActorTest(ut.TestCase):
def test_ctor(self):
a = TestActor(a=False, c=False)
self.assertFalse(a.is_active())
self.assertEqual(a.get_params(), a.default_params())
self.assertEqual(a.system, None)
def test_params_non_active(self):
a = TestActor(a=True, c=True)
a.set_params(a=False, b=True, c=False)
params = a.get_params()
self.assertEqual(params["a"], False)
self.assertEqual(params["b"], True)
self.assertEqual(params["c"], False)
self.assertEqual(a._core_args, None)
def test_params_active(self):
a = TestActor(a=True, c=True)
a._activate()
a.set_params(a=False, b=True, c=False)
params = a.get_params()
self.assertEqual(params["a"], False)
self.assertEqual(params["b"], True)
self.assertEqual(params["c"], False)
self.assertEqual(a._core_args, params)
def test_activation(self):
a = TestActor(a=True, c=True)
a._activate()
self.assertTrue(a.is_active())
def test_deactivation(self):
a = TestActor(a=True, c=True)
a._activate()
self.assertTrue(a.is_active())
a._deactivate()
self.assertFalse(a.is_active())
params = a.get_params()
self.assertEqual(params["a"], True)
self.assertEqual(params["b"], False)
self.assertEqual(params["c"], True)
if __name__ == "__main__":
ut.main()
|
gpl-3.0
|
neno1978/pelisalacarta
|
python/main-classic/servers/rapidvideo.py
|
2
|
2476
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para rapidvideo
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------
import re
import urllib
from core import httptools
from core import logger
from core import scrapertools
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
try:
response = httptools.downloadpage(page_url)
except:
pass
if not response.data or "urlopen error [Errno 1]" in str(response.code):
from core import config
if config.is_xbmc():
return False, "[Rapidvideo] Este conector solo funciona a partir de Kodi 17"
elif config.get_platform() == "plex":
return False, "[Rapidvideo] Este conector no funciona con tu versión de Plex, intenta actualizarla"
elif config.get_platform() == "mediaserver":
return False, "[Rapidvideo] Este conector requiere actualizar python a la versión 2.7.9 o superior"
if "Object not found" in response.data:
return False, "[Rapidvideo] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
urls = scrapertools.find_multiple_matches(data, '"file":"([^"]+)","label":"[^"]*","res":"([^"]+)"')
for mediaurl, res in urls:
ext = scrapertools.get_filename_from_url(mediaurl)[-4:]
video_urls.append(['%s %sp [rapidvideo]' % (ext, res), mediaurl.replace("\\", "")])
return video_urls
# Encuentra vídeos de este servidor en el texto pasado
def find_videos(text):
encontrados = set()
devuelve = []
#http://www.rapidvideo.com/e/YK7A0L7FU3A
patronvideos = 'rapidvideo.(?:org|com)/(?:\?v=|e/|embed/)([A-z0-9]+)'
logger.info("#" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(text)
for match in matches:
titulo = "[rapidvideo]"
url = "https://www.rapidvideo.com/e/" + match
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'rapidvideo'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
return devuelve
|
gpl-3.0
|
fastinetserver/portage-idfetch
|
pym/_emerge/BinpkgPrefetcher.py
|
1
|
1186
|
# Copyright 1999-2009 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.BinpkgFetcher import BinpkgFetcher
from _emerge.CompositeTask import CompositeTask
from _emerge.BinpkgVerifier import BinpkgVerifier
from portage import os
class BinpkgPrefetcher(CompositeTask):
__slots__ = ("pkg",) + \
("pkg_path", "_bintree",)
def _start(self):
self._bintree = self.pkg.root_config.trees["bintree"]
fetcher = BinpkgFetcher(background=self.background,
logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
scheduler=self.scheduler)
self.pkg_path = fetcher.pkg_path
self._start_task(fetcher, self._fetcher_exit)
def _fetcher_exit(self, fetcher):
if self._default_exit(fetcher) != os.EX_OK:
self.wait()
return
verifier = BinpkgVerifier(background=self.background,
logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
self._start_task(verifier, self._verifier_exit)
def _verifier_exit(self, verifier):
if self._default_exit(verifier) != os.EX_OK:
self.wait()
return
self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
self._current_task = None
self.returncode = os.EX_OK
self.wait()
|
gpl-2.0
|
fernandog/Sick-Beard
|
cherrypy/lib/auth_basic.py
|
35
|
3639
|
# This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
__doc__ = """Module auth_basic.py provides a CherryPy 3.x tool which implements
the server-side of HTTP Basic Access Authentication, as described in RFC 2617.
Example usage, using the built-in checkpassword_dict function which uses a dict
as the credentials store:
userpassdict = {'bird' : 'bebop', 'ornette' : 'wayout'}
checkpassword = cherrypy.lib.auth_basic.checkpassword_dict(userpassdict)
basic_auth = {'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'earth',
'tools.auth_basic.checkpassword': checkpassword,
}
app_config = { '/' : basic_auth }
"""
__author__ = 'visteya'
__date__ = 'April 2009'
import binascii
import base64
import cherrypy
def checkpassword_dict(user_password_dict):
"""Returns a checkpassword function which checks credentials
against a dictionary of the form: {username : password}.
If you want a simple dictionary-based authentication scheme, use
checkpassword_dict(my_credentials_dict) as the value for the
checkpassword argument to basic_auth().
"""
def checkpassword(realm, user, password):
p = user_password_dict.get(user)
return p and p == password or False
return checkpassword
def basic_auth(realm, checkpassword, debug=False):
"""basic_auth is a CherryPy tool which hooks at before_handler to perform
HTTP Basic Access Authentication, as specified in RFC 2617.
If the request has an 'authorization' header with a 'Basic' scheme, this
tool attempts to authenticate the credentials supplied in that header. If
the request has no 'authorization' header, or if it does but the scheme is
not 'Basic', or if authentication fails, the tool sends a 401 response with
a 'WWW-Authenticate' Basic header.
Arguments:
realm: a string containing the authentication realm.
checkpassword: a callable which checks the authentication credentials.
Its signature is checkpassword(realm, username, password). where
username and password are the values obtained from the request's
'authorization' header. If authentication succeeds, checkpassword
returns True, else it returns False.
"""
if '"' in realm:
raise ValueError('Realm cannot contain the " (quote) character.')
request = cherrypy.serving.request
auth_header = request.headers.get('authorization')
if auth_header is not None:
try:
scheme, params = auth_header.split(' ', 1)
if scheme.lower() == 'basic':
# since CherryPy claims compability with Python 2.3, we must use
# the legacy API of base64
username_password = base64.decodestring(params)
username, password = username_password.split(':', 1)
if checkpassword(realm, username, password):
if debug:
cherrypy.log('Auth succeeded', 'TOOLS.AUTH_BASIC')
request.login = username
return # successful authentication
except (ValueError, binascii.Error): # split() error, base64.decodestring() error
raise cherrypy.HTTPError(400, 'Bad Request')
# Respond with 401 status and a WWW-Authenticate header
cherrypy.serving.response.headers['www-authenticate'] = 'Basic realm="%s"' % realm
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
|
gpl-3.0
|
Puneeth-n/anna-molly
|
test/test_collector.py
|
2
|
1172
|
import sys
import os.path
from mock import Mock
ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(os.path.join(ROOT_DIR, 'bin')) # add bin dir to stub collector internals
import collector
from lib.modules.models import TimeSeriesTuple
class TestCollector(object):
@classmethod
def setUpAll(cls):
options = Mock(**{'config': './collector.json'})
collector.setup(options)
def setUp(self):
self.writer = Mock()
# process
def test_collector_process_accepts_whitelisted_and_not_blacklisted_metrics(self):
collector.process(self.writer, TimeSeriesTuple('host.ip.127-0-0-1.serv1.cpu.avg', 1, 1))
self.writer.write.called.should.be.true
def test_collector_process_ignores_not_whitelisted_metrics(self):
collector.process(self.writer, TimeSeriesTuple('host.ip.127-0-0-1.serv2.cpu.avg', 1, 1))
self.writer.write.called.should.be.false
def test_collector_process_ignores_whitelisted_but_blacklisted_metrics(self):
collector.process(self.writer, TimeSeriesTuple('host.ip.127-0-0-1.serv1.cpu_crit.avg', 1, 1))
self.writer.write.called.should.be.false
|
mit
|
thanhacun/odoo
|
addons/mail/tests/test_mail_features.py
|
172
|
59265
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from ..mail_mail import mail_mail
from ..mail_thread import mail_thread
from .common import TestMail
from openerp.tools import mute_logger, email_split, html2plaintext
from openerp.tools.mail import html_sanitize
class test_mail(TestMail):
def test_000_alias_setup(self):
""" Test basic mail.alias setup works, before trying to use them for routing """
cr, uid = self.cr, self.uid
self.user_valentin_id = self.res_users.create(cr, uid,
{'name': 'Valentin Cognito', 'email': '[email protected]', 'login': 'valentin.cognito', 'alias_name': 'valentin.cognito'})
self.user_valentin = self.res_users.browse(cr, uid, self.user_valentin_id)
self.assertEquals(self.user_valentin.alias_name, self.user_valentin.login, "Login should be used as alias")
self.user_pagan_id = self.res_users.create(cr, uid,
{'name': 'Pagan Le Marchant', 'email': '[email protected]', 'login': '[email protected]', 'alias_name': '[email protected]'})
self.user_pagan = self.res_users.browse(cr, uid, self.user_pagan_id)
self.assertEquals(self.user_pagan.alias_name, 'plmarchant', "If login is an email, the alias should keep only the local part")
self.user_barty_id = self.res_users.create(cr, uid,
{'name': 'Bartholomew Ironside', 'email': '[email protected]', 'login': 'b4r+_#_R3wl$$', 'alias_name': 'b4r+_#_R3wl$$'})
self.user_barty = self.res_users.browse(cr, uid, self.user_barty_id)
self.assertEquals(self.user_barty.alias_name, 'b4r+_-_r3wl-', 'Disallowed chars should be replaced by hyphens')
def test_00_followers_function_field(self):
""" Tests designed for the many2many function field 'follower_ids'.
We will test to perform writes using the many2many commands 0, 3, 4,
5 and 6. """
cr, uid, user_admin, partner_bert_id, group_pigs = self.cr, self.uid, self.user_admin, self.partner_bert_id, self.group_pigs
# Data: create 'disturbing' values in mail.followers: same res_id, other res_model; same res_model, other res_id
group_dummy_id = self.mail_group.create(cr, uid,
{'name': 'Dummy group'}, {'mail_create_nolog': True})
self.mail_followers.create(cr, uid,
{'res_model': 'mail.thread', 'res_id': self.group_pigs_id, 'partner_id': partner_bert_id})
self.mail_followers.create(cr, uid,
{'res_model': 'mail.group', 'res_id': group_dummy_id, 'partner_id': partner_bert_id})
# Pigs just created: should be only Admin as follower
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([user_admin.partner_id.id]), 'Admin should be the only Pigs fan')
# Subscribe Bert through a '4' command
group_pigs.write({'message_follower_ids': [(4, partner_bert_id)]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([partner_bert_id, user_admin.partner_id.id]), 'Bert and Admin should be the only Pigs fans')
# Unsubscribe Bert through a '3' command
group_pigs.write({'message_follower_ids': [(3, partner_bert_id)]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([user_admin.partner_id.id]), 'Admin should be the only Pigs fan')
# Set followers through a '6' command
group_pigs.write({'message_follower_ids': [(6, 0, [partner_bert_id])]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([partner_bert_id]), 'Bert should be the only Pigs fan')
# Add a follower created on the fly through a '0' command
group_pigs.write({'message_follower_ids': [(0, 0, {'name': 'Patrick Fiori'})]})
partner_patrick_id = self.res_partner.search(cr, uid, [('name', '=', 'Patrick Fiori')])[0]
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([partner_bert_id, partner_patrick_id]), 'Bert and Patrick should be the only Pigs fans')
# Finally, unlink through a '5' command
group_pigs.write({'message_follower_ids': [(5, 0)]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertFalse(follower_ids, 'Pigs group should not have fans anymore')
# Test dummy data has not been altered
fol_obj_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.thread'), ('res_id', '=', self.group_pigs_id)])
follower_ids = set([follower.partner_id.id for follower in self.mail_followers.browse(cr, uid, fol_obj_ids)])
self.assertEqual(follower_ids, set([partner_bert_id]), 'Bert should be the follower of dummy mail.thread data')
fol_obj_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', group_dummy_id)])
follower_ids = set([follower.partner_id.id for follower in self.mail_followers.browse(cr, uid, fol_obj_ids)])
self.assertEqual(follower_ids, set([partner_bert_id, user_admin.partner_id.id]), 'Bert and Admin should be the followers of dummy mail.group data')
def test_05_message_followers_and_subtypes(self):
""" Tests designed for the subscriber API as well as message subtypes """
cr, uid, user_admin, user_raoul, group_pigs = self.cr, self.uid, self.user_admin, self.user_raoul, self.group_pigs
# Data: message subtypes
self.mail_message_subtype.create(cr, uid, {'name': 'mt_mg_def', 'default': True, 'res_model': 'mail.group'})
self.mail_message_subtype.create(cr, uid, {'name': 'mt_other_def', 'default': True, 'res_model': 'crm.lead'})
self.mail_message_subtype.create(cr, uid, {'name': 'mt_all_def', 'default': True, 'res_model': False})
mt_mg_nodef = self.mail_message_subtype.create(cr, uid, {'name': 'mt_mg_nodef', 'default': False, 'res_model': 'mail.group'})
mt_all_nodef = self.mail_message_subtype.create(cr, uid, {'name': 'mt_all_nodef', 'default': False, 'res_model': False})
default_group_subtypes = self.mail_message_subtype.search(cr, uid, [('default', '=', True), '|', ('res_model', '=', 'mail.group'), ('res_model', '=', False)])
# ----------------------------------------
# CASE1: test subscriptions with subtypes
# ----------------------------------------
# Do: subscribe Raoul, should have default subtypes
group_pigs.message_subscribe_users([user_raoul.id])
group_pigs.refresh()
# Test: 2 followers (Admin and Raoul)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([user_raoul.partner_id.id, user_admin.partner_id.id]),
'message_subscribe: Admin and Raoul should be the only 2 Pigs fans')
# Raoul follows default subtypes
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
('partner_id', '=', user_raoul.partner_id.id)
])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set(default_group_subtypes),
'message_subscribe: Raoul subscription subtypes are incorrect, should be all default ones')
# Do: subscribe Raoul with specified new subtypes
group_pigs.message_subscribe_users([user_raoul.id], subtype_ids=[mt_mg_nodef])
# Test: 2 followers (Admin and Raoul)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([user_raoul.partner_id.id, user_admin.partner_id.id]),
'message_subscribe: Admin and Raoul should be the only 2 Pigs fans')
# Test: 2 lines in mail.followers (no duplicate for Raoul)
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
])
self.assertEqual(len(fol_ids), 2,
'message_subscribe: subscribing an already-existing follower should not create new entries in mail.followers')
# Test: Raoul follows only specified subtypes
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
('partner_id', '=', user_raoul.partner_id.id)
])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set([mt_mg_nodef]),
'message_subscribe: Raoul subscription subtypes are incorrect, should be only specified')
# Do: Subscribe Raoul without specified subtypes: should not erase existing subscription subtypes
group_pigs.message_subscribe_users([user_raoul.id, user_raoul.id])
group_pigs.message_subscribe_users([user_raoul.id])
group_pigs.refresh()
# Test: 2 followers (Admin and Raoul)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([user_raoul.partner_id.id, user_admin.partner_id.id]),
'message_subscribe: Admin and Raoul should be the only 2 Pigs fans')
# Test: Raoul follows default subtypes
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
('partner_id', '=', user_raoul.partner_id.id)
])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set([mt_mg_nodef]),
'message_subscribe: Raoul subscription subtypes are incorrect, should be only specified')
# Do: Unsubscribe Raoul twice through message_unsubscribe_users
group_pigs.message_unsubscribe_users([user_raoul.id, user_raoul.id])
group_pigs.refresh()
# Test: 1 follower (Admin)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(follower_ids, [user_admin.partner_id.id], 'Admin must be the only Pigs fan')
# Test: 1 lines in mail.followers (no duplicate for Raoul)
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id)
])
self.assertEqual(len(fol_ids), 1,
'message_subscribe: group should have only 1 entry in mail.follower for 1 follower')
# Do: subscribe Admin with subtype_ids
group_pigs.message_subscribe_users([uid], [mt_mg_nodef, mt_all_nodef])
fol_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id), ('partner_id', '=', user_admin.partner_id.id)])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set([mt_mg_nodef, mt_all_nodef]), 'subscription subtypes are incorrect')
# ----------------------------------------
# CASE2: test mail_thread fields
# ----------------------------------------
subtype_data = group_pigs._get_subscription_data(None, None)[group_pigs.id]['message_subtype_data']
self.assertEqual(set(subtype_data.keys()), set(['Discussions', 'mt_mg_def', 'mt_all_def', 'mt_mg_nodef', 'mt_all_nodef']), 'mail.group available subtypes incorrect')
self.assertFalse(subtype_data['Discussions']['followed'], 'Admin should not follow Discussions in pigs')
self.assertTrue(subtype_data['mt_mg_nodef']['followed'], 'Admin should follow mt_mg_nodef in pigs')
self.assertTrue(subtype_data['mt_all_nodef']['followed'], 'Admin should follow mt_all_nodef in pigs')
def test_11_notification_url(self):
""" Tests designed to test the URL added in notification emails. """
cr, uid, group_pigs = self.cr, self.uid, self.group_pigs
# Test URL formatting
base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# Partner data
partner_raoul = self.res_partner.browse(cr, uid, self.partner_raoul_id)
partner_bert_id = self.res_partner.create(cr, uid, {'name': 'bert'})
partner_bert = self.res_partner.browse(cr, uid, partner_bert_id)
# Mail data
mail_mail_id = self.mail_mail.create(cr, uid, {'state': 'exception'})
mail = self.mail_mail.browse(cr, uid, mail_mail_id)
# Test: link for nobody -> None
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail)
self.assertEqual(url, None,
'notification email: mails not send to a specific partner should not have any URL')
# Test: link for partner -> None
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_bert)
self.assertEqual(url, None,
'notification email: mails send to a not-user partner should not have any URL')
# Test: link for user -> signin
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_raoul)
self.assertIn(base_url, url,
'notification email: link should contain web.base.url')
self.assertIn('db=%s' % cr.dbname, url,
'notification email: link should contain database name')
self.assertIn('action=mail.action_mail_redirect', url,
'notification email: link should contain the redirect action')
self.assertIn('login=%s' % partner_raoul.user_ids[0].login, url,
'notification email: link should contain the user login')
# Test: link for user -> with model and res_id
mail_mail_id = self.mail_mail.create(cr, uid, {'model': 'mail.group', 'res_id': group_pigs.id})
mail = self.mail_mail.browse(cr, uid, mail_mail_id)
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_raoul)
self.assertIn(base_url, url,
'notification email: link should contain web.base.url')
self.assertIn('db=%s' % cr.dbname, url,
'notification email: link should contain database name')
self.assertIn('action=mail.action_mail_redirect', url,
'notification email: link should contain the redirect action')
self.assertIn('login=%s' % partner_raoul.user_ids[0].login, url,
'notification email: link should contain the user login')
self.assertIn('model=mail.group', url,
'notification email: link should contain the model when having not notification email on a record')
self.assertIn('res_id=%s' % group_pigs.id, url,
'notification email: link should contain the res_id when having not notification email on a record')
# Test: link for user -> with model and res_id
mail_mail_id = self.mail_mail.create(cr, uid, {'notification': True, 'model': 'mail.group', 'res_id': group_pigs.id})
mail = self.mail_mail.browse(cr, uid, mail_mail_id)
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_raoul)
self.assertIn(base_url, url,
'notification email: link should contain web.base.url')
self.assertIn('db=%s' % cr.dbname, url,
'notification email: link should contain database name')
self.assertIn('action=mail.action_mail_redirect', url,
'notification email: link should contain the redirect action')
self.assertIn('login=%s' % partner_raoul.user_ids[0].login, url,
'notification email: link should contain the user login')
self.assertIn('message_id=%s' % mail.mail_message_id.id, url,
'notification email: link based on message should contain the mail_message id')
self.assertNotIn('model=mail.group', url,
'notification email: link based on message should not contain model')
self.assertNotIn('res_id=%s' % group_pigs.id, url,
'notification email: link based on message should not contain res_id')
@mute_logger('openerp.addons.mail.mail_thread', 'openerp.models')
def test_12_inbox_redirection(self):
""" Tests designed to test the inbox redirection of emails notification URLs. """
cr, uid, user_admin, group_pigs = self.cr, self.uid, self.user_admin, self.group_pigs
model, act_id = self.ir_model_data.get_object_reference(cr, uid, 'mail', 'action_mail_inbox_feeds')
# Data: post a message on pigs
msg_id = self.group_pigs.message_post(body='My body', partner_ids=[self.partner_bert_id], type='comment', subtype='mail.mt_comment')
# No specific parameters -> should redirect to Inbox
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_raoul_id, {'params': {}})
self.assertEqual(
action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox'
)
self.assertEqual(
action.get('id'), act_id,
'URL redirection: action without parameters should redirect to client action Inbox'
)
# Raoul has read access to Pigs -> should redirect to form view of Pigs
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_raoul_id, {'params': {'message_id': msg_id}})
self.assertEqual(
action.get('type'), 'ir.actions.act_window',
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
self.assertEqual(
action.get('res_id'), group_pigs.id,
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_raoul_id, {'params': {'model': 'mail.group', 'res_id': group_pigs.id}})
self.assertEqual(
action.get('type'), 'ir.actions.act_window',
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
self.assertEqual(
action.get('res_id'), group_pigs.id,
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
# Bert has no read access to Pigs -> should redirect to Inbox
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_bert_id, {'params': {'message_id': msg_id}})
self.assertEqual(
action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox'
)
self.assertEqual(
action.get('id'), act_id,
'URL redirection: action without parameters should redirect to client action Inbox'
)
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_bert_id, {'params': {'model': 'mail.group', 'res_id': group_pigs.id}})
self.assertEqual(
action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox'
)
self.assertEqual(
action.get('id'), act_id,
'URL redirection: action without parameters should redirect to client action Inbox'
)
def test_20_message_post(self):
""" Tests designed for message_post. """
cr, uid, user_raoul, group_pigs = self.cr, self.uid, self.user_raoul, self.group_pigs
# --------------------------------------------------
# Data creation
# --------------------------------------------------
# 0 - Update existing users-partners
self.res_users.write(cr, uid, [uid], {'email': 'a@a', 'notify_email': 'always'})
self.res_users.write(cr, uid, [self.user_raoul_id], {'email': 'r@r'})
# 1 - Bert Tartopoils, with email, should receive emails for comments and emails
p_b_id = self.res_partner.create(cr, uid, {'name': 'Bert Tartopoils', 'email': 'b@b'})
# 2 - Carine Poilvache, with email, should receive emails for emails
p_c_id = self.res_partner.create(cr, uid, {'name': 'Carine Poilvache', 'email': 'c@c', 'notify_email': 'none'})
# 3 - Dédé Grosbedon, without email, to test email verification; should receive emails for every message
p_d_id = self.res_partner.create(cr, uid, {'name': 'Dédé Grosbedon', 'email': 'd@d', 'notify_email': 'always'})
# 4 - Attachments
attach1_id = self.ir_attachment.create(cr, user_raoul.id, {
'name': 'Attach1', 'datas_fname': 'Attach1',
'datas': 'bWlncmF0aW9uIHRlc3Q=',
'res_model': 'mail.compose.message', 'res_id': 0})
attach2_id = self.ir_attachment.create(cr, user_raoul.id, {
'name': 'Attach2', 'datas_fname': 'Attach2',
'datas': 'bWlncmF0aW9uIHRlc3Q=',
'res_model': 'mail.compose.message', 'res_id': 0})
attach3_id = self.ir_attachment.create(cr, user_raoul.id, {
'name': 'Attach3', 'datas_fname': 'Attach3',
'datas': 'bWlncmF0aW9uIHRlc3Q=',
'res_model': 'mail.compose.message', 'res_id': 0})
# 5 - Mail data
_subject = 'Pigs'
_mail_subject = 'Re: %s' % (group_pigs.name)
_body1 = '<p>Pigs rules</p>'
_body2 = '<html>Pigs rocks</html>'
_attachments = [
('List1', 'My first attachment'),
('List2', 'My second attachment')
]
# --------------------------------------------------
# CASE1: post comment + partners + attachments
# --------------------------------------------------
# Data: set alias_domain to see emails with alias
self.registry('ir.config_parameter').set_param(self.cr, self.uid, 'mail.catchall.domain', 'schlouby.fr')
# Data: change Pigs name to test reply_to
self.mail_group.write(cr, uid, [self.group_pigs_id], {'name': '"Pigs" !ù $%-'})
# Do: subscribe Raoul
new_follower_ids = [self.partner_raoul_id]
group_pigs.message_subscribe(new_follower_ids)
# Test: group followers = Raoul + uid
group_fids = [follower.id for follower in group_pigs.message_follower_ids]
test_fids = new_follower_ids + [self.partner_admin_id]
self.assertEqual(set(test_fids), set(group_fids),
'message_subscribe: incorrect followers after subscribe')
# Do: Raoul message_post on Pigs
self._init_mock_build_email()
msg1_id = self.mail_group.message_post(cr, user_raoul.id, self.group_pigs_id,
body=_body1, subject=_subject, partner_ids=[p_b_id, p_c_id],
attachment_ids=[attach1_id, attach2_id], attachments=_attachments,
type='comment', subtype='mt_comment')
msg = self.mail_message.browse(cr, uid, msg1_id)
msg_message_id = msg.message_id
msg_pids = [partner.id for partner in msg.notified_partner_ids]
msg_aids = [attach.id for attach in msg.attachment_ids]
sent_emails = self._build_email_kwargs_list
# Test: mail_message: subject and body not modified
self.assertEqual(_subject, msg.subject, 'message_post: mail.message subject incorrect')
self.assertEqual(_body1, msg.body, 'message_post: mail.message body incorrect')
# Test: mail_message: notified_partner_ids = group followers + partner_ids - author
test_pids = set([self.partner_admin_id, p_b_id, p_c_id])
self.assertEqual(test_pids, set(msg_pids), 'message_post: mail.message notified partners incorrect')
# Test: mail_message: attachments (4, attachment_ids + attachments)
test_aids = set([attach1_id, attach2_id])
msg_attach_names = set([attach.name for attach in msg.attachment_ids])
test_attach_names = set(['Attach1', 'Attach2', 'List1', 'List2'])
self.assertEqual(len(msg_aids), 4,
'message_post: mail.message wrong number of attachments')
self.assertEqual(msg_attach_names, test_attach_names,
'message_post: mail.message attachments incorrectly added')
self.assertTrue(test_aids.issubset(set(msg_aids)),
'message_post: mail.message attachments duplicated')
for attach in msg.attachment_ids:
self.assertEqual(attach.res_model, 'mail.group',
'message_post: mail.message attachments were not linked to the document')
self.assertEqual(attach.res_id, group_pigs.id,
'message_post: mail.message attachments were not linked to the document')
if 'List' in attach.name:
self.assertIn((attach.name, attach.datas.decode('base64')), _attachments,
'message_post: mail.message attachment name / data incorrect')
dl_attach = self.mail_message.download_attachment(cr, user_raoul.id, id_message=msg.id, attachment_id=attach.id)
self.assertIn((dl_attach['filename'], dl_attach['base64'].decode('base64')), _attachments,
'message_post: mail.message download_attachment is incorrect')
# Test: followers: same as before (author was already subscribed)
group_pigs.refresh()
group_fids = [follower.id for follower in group_pigs.message_follower_ids]
test_fids = new_follower_ids + [self.partner_admin_id]
self.assertEqual(set(test_fids), set(group_fids),
'message_post: wrong followers after posting')
# Test: mail_mail: notifications have been deleted
self.assertFalse(self.mail_mail.search(cr, uid, [('mail_message_id', '=', msg1_id)]),
'message_post: mail.mail notifications should have been auto-deleted!')
# Test: notifications emails: to a and b, c is email only, r is author
test_emailto = ['Administrator <a@a>', 'Bert Tartopoils <b@b>']
# test_emailto = ['"Followers of -Pigs-" <a@a>', '"Followers of -Pigs-" <b@b>']
self.assertEqual(len(sent_emails), 2,
'message_post: notification emails wrong number of send emails')
self.assertEqual(set([m['email_to'][0] for m in sent_emails]), set(test_emailto),
'message_post: notification emails wrong recipients (email_to)')
for sent_email in sent_emails:
self.assertEqual(sent_email['email_from'], 'Raoul Grosbedon <[email protected]>',
'message_post: notification email wrong email_from: should use alias of sender')
self.assertEqual(len(sent_email['email_to']), 1,
'message_post: notification email sent to more than one email address instead of a precise partner')
self.assertIn(sent_email['email_to'][0], test_emailto,
'message_post: notification email email_to incorrect')
self.assertEqual(sent_email['reply_to'], u'"YourCompany \\"Pigs\\" !ù $%-" <[email protected]>',
'message_post: notification email reply_to incorrect')
self.assertEqual(_subject, sent_email['subject'],
'message_post: notification email subject incorrect')
self.assertIn(_body1, sent_email['body'],
'message_post: notification email body incorrect')
self.assertIn('Pigs rules', sent_email['body_alternative'],
'message_post: notification email body alternative should contain the body')
self.assertNotIn('<p>', sent_email['body_alternative'],
'message_post: notification email body alternative still contains html')
self.assertFalse(sent_email['references'],
'message_post: references should be False when sending a message that is not a reply')
# Test: notification linked to this message = group followers = notified_partner_ids
notif_ids = self.mail_notification.search(cr, uid, [('message_id', '=', msg1_id)])
notif_pids = set([notif.partner_id.id for notif in self.mail_notification.browse(cr, uid, notif_ids)])
self.assertEqual(notif_pids, test_pids,
'message_post: mail.message created mail.notification incorrect')
# Data: Pigs name back to normal
self.mail_group.write(cr, uid, [self.group_pigs_id], {'name': 'Pigs'})
# --------------------------------------------------
# CASE2: reply + parent_id + parent notification
# --------------------------------------------------
# Data: remove alias_domain to see emails with alias
param_ids = self.registry('ir.config_parameter').search(cr, uid, [('key', '=', 'mail.catchall.domain')])
self.registry('ir.config_parameter').unlink(cr, uid, param_ids)
# Do: Raoul message_post on Pigs
self._init_mock_build_email()
msg2_id = self.mail_group.message_post(cr, user_raoul.id, self.group_pigs_id,
body=_body2, type='email', subtype='mt_comment',
partner_ids=[p_d_id], parent_id=msg1_id, attachment_ids=[attach3_id],
context={'mail_post_autofollow': True})
msg = self.mail_message.browse(cr, uid, msg2_id)
msg_pids = [partner.id for partner in msg.notified_partner_ids]
msg_aids = [attach.id for attach in msg.attachment_ids]
sent_emails = self._build_email_kwargs_list
# Test: mail_message: subject is False, body, parent_id is msg_id
self.assertEqual(msg.subject, False, 'message_post: mail.message subject incorrect')
self.assertEqual(msg.body, html_sanitize(_body2), 'message_post: mail.message body incorrect')
self.assertEqual(msg.parent_id.id, msg1_id, 'message_post: mail.message parent_id incorrect')
# Test: mail_message: notified_partner_ids = group followers
test_pids = [self.partner_admin_id, p_d_id]
self.assertEqual(set(test_pids), set(msg_pids), 'message_post: mail.message partners incorrect')
# Test: mail_message: notifications linked to this message = group followers = notified_partner_ids
notif_ids = self.mail_notification.search(cr, uid, [('message_id', '=', msg2_id)])
notif_pids = [notif.partner_id.id for notif in self.mail_notification.browse(cr, uid, notif_ids)]
self.assertEqual(set(test_pids), set(notif_pids), 'message_post: mail.message notification partners incorrect')
# Test: mail_mail: notifications deleted
self.assertFalse(self.mail_mail.search(cr, uid, [('mail_message_id', '=', msg2_id)]), 'mail.mail notifications should have been auto-deleted!')
# Test: emails send by server (to a, b, c, d)
test_emailto = [u'Administrator <a@a>', u'Bert Tartopoils <b@b>', u'Carine Poilvache <c@c>', u'D\xe9d\xe9 Grosbedon <d@d>']
# test_emailto = [u'"Followers of Pigs" <a@a>', u'"Followers of Pigs" <b@b>', u'"Followers of Pigs" <c@c>', u'"Followers of Pigs" <d@d>']
# self.assertEqual(len(sent_emails), 3, 'sent_email number of sent emails incorrect')
for sent_email in sent_emails:
self.assertEqual(sent_email['email_from'], 'Raoul Grosbedon <r@r>',
'message_post: notification email wrong email_from: should use email of sender when no alias domain set')
self.assertEqual(len(sent_email['email_to']), 1,
'message_post: notification email sent to more than one email address instead of a precise partner')
self.assertIn(sent_email['email_to'][0], test_emailto,
'message_post: notification email email_to incorrect')
self.assertEqual(email_split(sent_email['reply_to']), ['r@r'], # was '"Followers of Pigs" <r@r>', but makes no sense
'message_post: notification email reply_to incorrect: should have raoul email')
self.assertEqual(_mail_subject, sent_email['subject'],
'message_post: notification email subject incorrect')
self.assertIn(html_sanitize(_body2), sent_email['body'],
'message_post: notification email does not contain the body')
self.assertIn('Pigs rocks', sent_email['body_alternative'],
'message_post: notification email body alternative should contain the body')
self.assertNotIn('<p>', sent_email['body_alternative'],
'message_post: notification email body alternative still contains html')
self.assertIn(msg_message_id, sent_email['references'],
'message_post: notification email references lacks parent message message_id')
# Test: attachments + download
for attach in msg.attachment_ids:
self.assertEqual(attach.res_model, 'mail.group',
'message_post: mail.message attachment res_model incorrect')
self.assertEqual(attach.res_id, self.group_pigs_id,
'message_post: mail.message attachment res_id incorrect')
# Test: Dédé has been notified -> should also have been notified of the parent message
msg = self.mail_message.browse(cr, uid, msg1_id)
msg_pids = set([partner.id for partner in msg.notified_partner_ids])
test_pids = set([self.partner_admin_id, p_b_id, p_c_id, p_d_id])
self.assertEqual(test_pids, msg_pids, 'message_post: mail.message parent notification not created')
# Do: reply to last message
msg3_id = self.mail_group.message_post(cr, user_raoul.id, self.group_pigs_id, body='Test', parent_id=msg2_id)
msg = self.mail_message.browse(cr, uid, msg3_id)
# Test: check that its parent will be the first message
self.assertEqual(msg.parent_id.id, msg1_id, 'message_post did not flatten the thread structure')
def test_25_message_compose_wizard(self):
""" Tests designed for the mail.compose.message wizard. """
cr, uid, user_raoul, group_pigs = self.cr, self.uid, self.user_raoul, self.group_pigs
mail_compose = self.registry('mail.compose.message')
# --------------------------------------------------
# Data creation
# --------------------------------------------------
# 0 - Update existing users-partners
self.res_users.write(cr, uid, [uid], {'email': 'a@a'})
self.res_users.write(cr, uid, [self.user_raoul_id], {'email': 'r@r'})
# 1 - Bert Tartopoils, with email, should receive emails for comments and emails
p_b_id = self.res_partner.create(cr, uid, {'name': 'Bert Tartopoils', 'email': 'b@b'})
# 2 - Carine Poilvache, with email, should receive emails for emails
p_c_id = self.res_partner.create(cr, uid, {'name': 'Carine Poilvache', 'email': 'c@c', 'notify_email': 'always'})
# 3 - Dédé Grosbedon, without email, to test email verification; should receive emails for every message
p_d_id = self.res_partner.create(cr, uid, {'name': 'Dédé Grosbedon', 'email': 'd@d', 'notify_email': 'always'})
# 4 - Create a Bird mail.group, that will be used to test mass mailing
group_bird_id = self.mail_group.create(cr, uid,
{
'name': 'Bird',
'description': 'Bird resistance',
}, context={'mail_create_nolog': True})
group_bird = self.mail_group.browse(cr, uid, group_bird_id)
# 5 - Mail data
_subject = 'Pigs'
_body = 'Pigs <b>rule</b>'
_reply_subject = 'Re: %s' % _subject
_attachments = [
{'name': 'First', 'datas_fname': 'first.txt', 'datas': 'My first attachment'.encode('base64')},
{'name': 'Second', 'datas_fname': 'second.txt', 'datas': 'My second attachment'.encode('base64')}
]
_attachments_test = [('first.txt', 'My first attachment'), ('second.txt', 'My second attachment')]
# 6 - Subscribe Bert to Pigs
group_pigs.message_subscribe([p_b_id])
# --------------------------------------------------
# CASE1: wizard + partners + context keys
# --------------------------------------------------
# Do: Raoul wizard-composes on Pigs with auto-follow for partners, not for author
compose_id = mail_compose.create(cr, user_raoul.id,
{
'subject': _subject,
'body': _body,
'partner_ids': [(4, p_c_id), (4, p_d_id)],
}, context={
'default_composition_mode': 'comment',
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
})
compose = mail_compose.browse(cr, uid, compose_id)
# Test: mail.compose.message: composition_mode, model, res_id
self.assertEqual(compose.composition_mode, 'comment', 'compose wizard: mail.compose.message incorrect composition_mode')
self.assertEqual(compose.model, 'mail.group', 'compose wizard: mail.compose.message incorrect model')
self.assertEqual(compose.res_id, self.group_pigs_id, 'compose wizard: mail.compose.message incorrect res_id')
# Do: Post the comment
mail_compose.send_mail(cr, user_raoul.id, [compose_id], {'mail_post_autofollow': True, 'mail_create_nosubscribe': True})
group_pigs.refresh()
message = group_pigs.message_ids[0]
# Test: mail.group: followers (c and d added by auto follow key; raoul not added by nosubscribe key)
pigs_pids = [p.id for p in group_pigs.message_follower_ids]
test_pids = [self.partner_admin_id, p_b_id, p_c_id, p_d_id]
self.assertEqual(set(pigs_pids), set(test_pids),
'compose wizard: mail_post_autofollow and mail_create_nosubscribe context keys not correctly taken into account')
# Test: mail.message: subject, body inside p
self.assertEqual(message.subject, _subject, 'compose wizard: mail.message incorrect subject')
self.assertEqual(message.body, '<p>%s</p>' % _body, 'compose wizard: mail.message incorrect body')
# Test: mail.message: notified_partner_ids = admin + bert (followers) + c + d (recipients)
msg_pids = [partner.id for partner in message.notified_partner_ids]
test_pids = [self.partner_admin_id, p_b_id, p_c_id, p_d_id]
self.assertEqual(set(msg_pids), set(test_pids),
'compose wizard: mail.message notified_partner_ids incorrect')
# --------------------------------------------------
# CASE2: reply + attachments
# --------------------------------------------------
# Do: Reply with attachments
compose_id = mail_compose.create(cr, user_raoul.id,
{
'attachment_ids': [(0, 0, _attachments[0]), (0, 0, _attachments[1])]
}, context={
'default_composition_mode': 'comment',
'default_res_id': self.group_pigs_id,
'default_parent_id': message.id
})
compose = mail_compose.browse(cr, uid, compose_id)
# Test: mail.compose.message: model, res_id, parent_id
self.assertEqual(compose.model, 'mail.group', 'compose wizard: mail.compose.message incorrect model')
self.assertEqual(compose.res_id, self.group_pigs_id, 'compose wizard: mail.compose.message incorrect res_id')
self.assertEqual(compose.parent_id.id, message.id, 'compose wizard: mail.compose.message incorrect parent_id')
# Test: mail.compose.message: subject as Re:.., body, parent_id
self.assertEqual(compose.subject, _reply_subject, 'compose wizard: mail.compose.message incorrect subject')
self.assertFalse(compose.body, 'compose wizard: mail.compose.message body should not contain parent message body')
self.assertEqual(compose.parent_id and compose.parent_id.id, message.id, 'compose wizard: mail.compose.message parent_id incorrect')
# Test: mail.compose.message: attachments
for attach in compose.attachment_ids:
self.assertIn((attach.datas_fname, attach.datas.decode('base64')), _attachments_test,
'compose wizard: mail.message attachment name / data incorrect')
# --------------------------------------------------
# CASE3: mass_mail on Pigs and Bird
# --------------------------------------------------
# Do: Compose in mass_mail_mode on pigs and bird
compose_id = mail_compose.create(
cr, user_raoul.id, {
'subject': _subject,
'body': '${object.description}',
'partner_ids': [(4, p_c_id), (4, p_d_id)],
}, context={
'default_composition_mode': 'mass_mail',
'default_model': 'mail.group',
'default_res_id': False,
'active_ids': [self.group_pigs_id, group_bird_id],
})
compose = mail_compose.browse(cr, uid, compose_id)
# Do: Post the comment, get created message for each group
mail_compose.send_mail(cr, user_raoul.id, [compose_id], context={
'default_res_id': -1,
'active_ids': [self.group_pigs_id, group_bird_id]
})
# check mail_mail
mail_mail_ids = self.mail_mail.search(cr, uid, [('subject', '=', _subject)])
for mail_mail in self.mail_mail.browse(cr, uid, mail_mail_ids):
self.assertEqual(set([p.id for p in mail_mail.recipient_ids]), set([p_c_id, p_d_id]),
'compose wizard: mail_mail mass mailing: mail.mail in mass mail incorrect recipients')
# check logged messages
group_pigs.refresh()
group_bird.refresh()
message1 = group_pigs.message_ids[0]
message2 = group_bird.message_ids[0]
# Test: Pigs and Bird did receive their message
test_msg_ids = self.mail_message.search(cr, uid, [], limit=2)
self.assertIn(message1.id, test_msg_ids, 'compose wizard: Pigs did not receive its mass mailing message')
self.assertIn(message2.id, test_msg_ids, 'compose wizard: Bird did not receive its mass mailing message')
# Test: mail.message: subject, body, subtype, notified partners (nobody + specific recipients)
self.assertEqual(message1.subject, _subject,
'compose wizard: message_post: mail.message in mass mail subject incorrect')
self.assertEqual(message1.body, '<p>%s</p>' % group_pigs.description,
'compose wizard: message_post: mail.message in mass mail body incorrect')
# self.assertEqual(set([p.id for p in message1.notified_partner_ids]), set([p_c_id, p_d_id]),
# 'compose wizard: message_post: mail.message in mass mail incorrect notified partners')
self.assertEqual(message2.subject, _subject,
'compose wizard: message_post: mail.message in mass mail subject incorrect')
self.assertEqual(message2.body, '<p>%s</p>' % group_bird.description,
'compose wizard: message_post: mail.message in mass mail body incorrect')
# self.assertEqual(set([p.id for p in message2.notified_partner_ids]), set([p_c_id, p_d_id]),
# 'compose wizard: message_post: mail.message in mass mail incorrect notified partners')
# Test: mail.group followers: author not added as follower in mass mail mode
pigs_pids = [p.id for p in group_pigs.message_follower_ids]
test_pids = [self.partner_admin_id, p_b_id, p_c_id, p_d_id]
self.assertEqual(set(pigs_pids), set(test_pids),
'compose wizard: mail_post_autofollow and mail_create_nosubscribe context keys not correctly taken into account')
bird_pids = [p.id for p in group_bird.message_follower_ids]
test_pids = [self.partner_admin_id]
self.assertEqual(set(bird_pids), set(test_pids),
'compose wizard: mail_post_autofollow and mail_create_nosubscribe context keys not correctly taken into account')
# Do: Compose in mass_mail, coming from list_view, we have an active_domain that should be supported
compose_id = mail_compose.create(cr, user_raoul.id,
{
'subject': _subject,
'body': '${object.description}',
'partner_ids': [(4, p_c_id), (4, p_d_id)],
}, context={
'default_composition_mode': 'mass_mail',
'default_model': 'mail.group',
'default_res_id': False,
'active_ids': [self.group_pigs_id],
'active_domain': [('name', 'in', ['Pigs', 'Bird'])],
})
compose = mail_compose.browse(cr, uid, compose_id)
# Do: Post the comment, get created message for each group
mail_compose.send_mail(
cr, user_raoul.id, [compose_id], context={
'default_res_id': -1,
'active_ids': [self.group_pigs_id, group_bird_id]
})
group_pigs.refresh()
group_bird.refresh()
message1 = group_pigs.message_ids[0]
message2 = group_bird.message_ids[0]
# Test: Pigs and Bird did receive their message
test_msg_ids = self.mail_message.search(cr, uid, [], limit=2)
self.assertIn(message1.id, test_msg_ids, 'compose wizard: Pigs did not receive its mass mailing message')
self.assertIn(message2.id, test_msg_ids, 'compose wizard: Bird did not receive its mass mailing message')
def test_30_needaction(self):
""" Tests for mail.message needaction. """
cr, uid, user_admin, user_raoul, group_pigs = self.cr, self.uid, self.user_admin, self.user_raoul, self.group_pigs
na_admin_base = self.mail_message._needaction_count(cr, uid, domain=[])
na_demo_base = self.mail_message._needaction_count(cr, user_raoul.id, domain=[])
# Test: number of unread notification = needaction on mail.message
notif_ids = self.mail_notification.search(cr, uid, [
('partner_id', '=', user_admin.partner_id.id),
('is_read', '=', False)
])
na_count = self.mail_message._needaction_count(cr, uid, domain=[])
self.assertEqual(len(notif_ids), na_count, 'unread notifications count does not match needaction count')
# Do: post 2 message on group_pigs as admin, 3 messages as demo user
for dummy in range(2):
group_pigs.message_post(body='My Body', subtype='mt_comment')
raoul_pigs = group_pigs.sudo(user_raoul)
for dummy in range(3):
raoul_pigs.message_post(body='My Demo Body', subtype='mt_comment')
# Test: admin has 3 new notifications (from demo), and 3 new needaction
notif_ids = self.mail_notification.search(cr, uid, [
('partner_id', '=', user_admin.partner_id.id),
('is_read', '=', False)
])
self.assertEqual(len(notif_ids), na_admin_base + 3, 'Admin should have 3 new unread notifications')
na_admin = self.mail_message._needaction_count(cr, uid, domain=[])
na_admin_group = self.mail_message._needaction_count(cr, uid, domain=[('model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id)])
self.assertEqual(na_admin, na_admin_base + 3, 'Admin should have 3 new needaction')
self.assertEqual(na_admin_group, 3, 'Admin should have 3 needaction related to Pigs')
# Test: demo has 0 new notifications (not a follower, not receiving its own messages), and 0 new needaction
notif_ids = self.mail_notification.search(cr, uid, [
('partner_id', '=', user_raoul.partner_id.id),
('is_read', '=', False)
])
self.assertEqual(len(notif_ids), na_demo_base + 0, 'Demo should have 0 new unread notifications')
na_demo = self.mail_message._needaction_count(cr, user_raoul.id, domain=[])
na_demo_group = self.mail_message._needaction_count(cr, user_raoul.id, domain=[('model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id)])
self.assertEqual(na_demo, na_demo_base + 0, 'Demo should have 0 new needaction')
self.assertEqual(na_demo_group, 0, 'Demo should have 0 needaction related to Pigs')
def test_40_track_field(self):
""" Testing auto tracking of fields. """
def _strip_string_spaces(body):
return body.replace(' ', '').replace('\n', '')
# Data: subscribe Raoul to Pigs, because he will change the public attribute and may loose access to the record
cr, uid = self.cr, self.uid
self.mail_group.message_subscribe_users(cr, uid, [self.group_pigs_id], [self.user_raoul_id])
# Data: res.users.group, to test group_public_id automatic logging
group_system_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_system')
group_system_id = group_system_ref and group_system_ref[1] or False
# Data: custom subtypes
mt_private_id = self.mail_message_subtype.create(cr, uid, {'name': 'private', 'description': 'Private public'})
self.ir_model_data.create(cr, uid, {'name': 'mt_private', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_private_id})
mt_name_supername_id = self.mail_message_subtype.create(cr, uid, {'name': 'name_supername', 'description': 'Supername name'})
self.ir_model_data.create(cr, uid, {'name': 'mt_name_supername', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_name_supername_id})
mt_group_public_set_id = self.mail_message_subtype.create(cr, uid, {'name': 'group_public_set', 'description': 'Group set'})
self.ir_model_data.create(cr, uid, {'name': 'mt_group_public_set', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_group_public_set_id})
mt_group_public_id = self.mail_message_subtype.create(cr, uid, {'name': 'group_public', 'description': 'Group changed'})
self.ir_model_data.create(cr, uid, {'name': 'mt_group_public', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_group_public_id})
# Data: alter mail_group model for testing purposes (test on classic, selection and many2one fields)
cls = type(self.mail_group)
self.assertNotIn('_track', cls.__dict__)
cls._track = {
'public': {
'mail.mt_private': lambda self, cr, uid, obj, ctx=None: obj.public == 'private',
},
'name': {
'mail.mt_name_supername': lambda self, cr, uid, obj, ctx=None: obj.name == 'supername',
},
'group_public_id': {
'mail.mt_group_public_set': lambda self, cr, uid, obj, ctx=None: obj.group_public_id,
'mail.mt_group_public': lambda self, cr, uid, obj, ctx=None: True,
},
}
visibility = {'public': 'onchange', 'name': 'always', 'group_public_id': 'onchange'}
for key in visibility:
self.assertFalse(hasattr(getattr(cls, key), 'track_visibility'))
getattr(cls, key).track_visibility = visibility[key]
@self.addCleanup
def cleanup():
delattr(cls, '_track')
for key in visibility:
del getattr(cls, key).track_visibility
# Test: change name -> always tracked, not related to a subtype
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'public': 'public'})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 1, 'tracked: a message should have been produced')
# Test: first produced message: no subtype, name change tracked
last_msg = self.group_pigs.message_ids[-1]
self.assertFalse(last_msg.subtype_id, 'tracked: message should not have been linked to a subtype')
self.assertIn(u'SelectedGroupOnly\u2192Public', _strip_string_spaces(last_msg.body), 'tracked: message body incorrect')
self.assertIn('Pigs', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: change name as supername, public as private -> 2 subtypes
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'name': 'supername', 'public': 'private'})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 3, 'tracked: two messages should have been produced')
# Test: first produced message: mt_name_supername
last_msg = self.group_pigs.message_ids[-2]
self.assertEqual(last_msg.subtype_id.id, mt_private_id, 'tracked: message should be linked to mt_private subtype')
self.assertIn('Private public', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Pigs\u2192supername', _strip_string_spaces(last_msg.body), 'tracked: message body incorrect')
# Test: second produced message: mt_name_supername
last_msg = self.group_pigs.message_ids[-3]
self.assertEqual(last_msg.subtype_id.id, mt_name_supername_id, 'tracked: message should be linked to mt_name_supername subtype')
self.assertIn('Supername name', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Public\u2192Private', _strip_string_spaces(last_msg.body), 'tracked: message body incorrect')
self.assertIn(u'Pigs\u2192supername', _strip_string_spaces(last_msg.body), 'tracked feature: message body does not hold always tracked field')
# Test: change public as public, group_public_id -> 2 subtypes, name always tracked
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'public': 'public', 'group_public_id': group_system_id})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 5, 'tracked: one message should have been produced')
# Test: first produced message: mt_group_public_set_id, with name always tracked, public tracked on change
last_msg = self.group_pigs.message_ids[-4]
self.assertEqual(last_msg.subtype_id.id, mt_group_public_set_id, 'tracked: message should be linked to mt_group_public_set_id')
self.assertIn('Group set', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Private\u2192Public', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold changed tracked field')
self.assertIn(u'HumanResources/Employee\u2192Administration/Settings', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: second produced message: mt_group_public_id, with name always tracked, public tracked on change
last_msg = self.group_pigs.message_ids[-5]
self.assertEqual(last_msg.subtype_id.id, mt_group_public_id, 'tracked: message should be linked to mt_group_public_id')
self.assertIn('Group changed', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Private\u2192Public', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold changed tracked field')
self.assertIn(u'HumanResources/Employee\u2192Administration/Settings', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: change group_public_id to False -> 1 subtype, name always tracked
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'group_public_id': False})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 6, 'tracked: one message should have been produced')
# Test: first produced message: mt_group_public_set_id, with name always tracked, public tracked on change
last_msg = self.group_pigs.message_ids[-6]
self.assertEqual(last_msg.subtype_id.id, mt_group_public_id, 'tracked: message should be linked to mt_group_public_id')
self.assertIn('Group changed', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Administration/Settings\u2192', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: change not tracked field, no tracking message
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'description': 'Dummy'})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 6, 'tracked: No message should have been produced')
|
agpl-3.0
|
miloszz/DIRAC
|
ResourceStatusSystem/Client/ResourceManagementClient.py
|
6
|
41110
|
# $HeadURL: $
""" ResourceManagementClient
Client to interact with the ResourceManagementDB.
"""
from DIRAC import gLogger, S_ERROR
from DIRAC.Core.DISET.RPCClient import RPCClient
__RCSID__ = '$Id: $'
class ResourceManagementClient( object ):
"""
The :class:`ResourceManagementClient` class exposes the :mod:`DIRAC.ResourceManagement`
API. All functions you need are on this client.
It has the 'direct-db-access' functions, the ones of the type:
- insert
- update
- select
- delete
that return parts of the RSSConfiguration stored on the CS, and used everywhere
on the RSS module. Finally, and probably more interesting, it exposes a set
of functions, badly called 'boosters'. They are 'home made' functions using the
basic database functions that are interesting enough to be exposed.
The client will ALWAYS try to connect to the DB, and in case of failure, to the
XML-RPC server ( namely :class:`ResourceManagementDB` and
:class:`ResourceManagementHancler` ).
You can use this client on this way
>>> from DIRAC.ResourceManagementSystem.Client.ResourceManagementClient import ResourceManagementClient
>>> rsClient = ResourceManagementClient()
All functions calling methods exposed on the database or on the booster are
making use of some syntactic sugar, in this case a decorator that simplifies
the client considerably.
"""
def __init__( self , serviceIn = None ):
'''
The client tries to connect to :class:ResourceManagementDB by default. If it
fails, then tries to connect to the Service :class:ResourceManagementHandler.
'''
if not serviceIn:
self.gate = RPCClient( "ResourceStatus/ResourceManagement" )
else:
self.gate = serviceIn
# AccountingCache Methods ....................................................
def selectAccountingCache( self, name = None, plotType = None, plotName = None,
result = None, dateEffective = None,
lastCheckTime = None, meta = None ):
'''
Gets from PolicyResult all rows that match the parameters given.
:Parameters:
**name** - `[, string, list]`
name of an individual of the grid topology
**plotType** - `[, string, list]`
the plotType name (e.g. 'Pilot')
**plotName** - `[, string, list]`
the plot name
**result** - `[, string, list]`
command result
**dateEffective** - `[, datetime, list]`
time-stamp from which the result is effective
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the result was checked
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'select', 'AccountingCache', locals() )
def addOrModifyAccountingCache( self, name = None, plotType = None,
plotName = None, result = None,
dateEffective = None, lastCheckTime = None,
meta = None ):
'''
Adds or updates-if-duplicated to AccountingCache. Using `name`, `plotType`
and `plotName` to query the database, decides whether to insert or update the
table.
:Parameters:
**name** - `string`
name of an individual of the grid topology
**plotType** - `string`
the plotType name (e.g. 'Pilot')
**plotName** - `string`
the plot name
**result** - `string`
command result
**dateEffective** - `datetime`
time-stamp from which the result is effective
**lastCheckTime** - `datetime`
time-stamp setting last time the result was checked
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'AccountingCache', locals() )
# GGUSTicketsCache Methods ...................................................
#FIXME: only one method
def selectGGUSTicketsCache( self, gocSite = None, link = None, openTickets = None,
tickets = None, lastCheckTime = None, meta = None ):
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'select', 'GGUSTicketsCache', locals() )
def deleteGGUSTicketsCache( self, gocSite = None, link = None, openTickets = None,
tickets = None, lastCheckTime = None, meta = None ):
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'delete', 'GGUSTicketsCache', locals() )
def addOrModifyGGUSTicketsCache( self, gocSite = None, link = None,
openTickets = None, tickets = None,
lastCheckTime = None, meta = None ):
# Unused argument
# pylint: disable-msg=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'GGUSTicketsCache', locals() )
# DowntimeCache Methods ......................................................
def selectDowntimeCache( self, downtimeID = None, element = None, name = None,
startDate = None, endDate = None, severity = None,
description = None, link = None, dateEffective = None,
lastCheckTime = None, gocdbServiceType = None, meta = None ):
'''
Gets from DowntimeCache all rows that match the parameters given.
:Parameters:
**downtimeID** - [, `string`, `list`]
unique id for the downtime
**element** - [, `string`, `list`]
valid element in the topology ( Site, Resource, Node )
**name** - [, `string`, `list`]
name of the element where the downtime applies
**startDate** - [, `datetime`, `list`]
starting time for the downtime
**endDate** - [, `datetime`, `list`]
ending time for the downtime
**severity** - [, `string`, `list`]
severity assigned by the gocdb
**description** - [, `string`, `list`]
brief description of the downtime
**link** - [, `string`, `list`]
url to the details
**dateEffective** - [, `datetime`, `list`]
time when the entry was created in this database
**lastCheckTime** - [, `datetime`, `list`]
time-stamp setting last time the result was checked
**gocdbServiceType** - `string`
service type assigned by gocdb
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'select', 'DowntimeCache', locals() )
def deleteDowntimeCache( self, downtimeID = None, element = None, name = None,
startDate = None, endDate = None, severity = None,
description = None, link = None, dateEffective = None,
lastCheckTime = None, gocdbServiceType = None, meta = None ):
'''
Deletes from DowntimeCache all rows that match the parameters given.
:Parameters:
**downtimeID** - [, `string`, `list`]
unique id for the downtime
**element** - [, `string`, `list`]
valid element in the topology ( Site, Resource, Node )
**name** - [, `string`, `list`]
name of the element where the downtime applies
**startDate** - [, `datetime`, `list`]
starting time for the downtime
**endDate** - [, `datetime`, `list`]
ending time for the downtime
**severity** - [, `string`, `list`]
severity assigned by the gocdb
**description** - [, `string`, `list`]
brief description of the downtime
**link** - [, `string`, `list`]
url to the details
**dateEffective** - [, `datetime`, `list`]
time when the entry was created in this database
**lastCheckTime** - [, `datetime`, `list`]
time-stamp setting last time the result was checked
**gocdbServiceType** - `string`
service type assigned by gocdb
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'delete', 'DowntimeCache', locals() )
def addOrModifyDowntimeCache( self, downtimeID = None, element = None, name = None,
startDate = None, endDate = None, severity = None,
description = None, link = None, dateEffective = None,
lastCheckTime = None, gocdbServiceType = None, meta = None ):
'''
Adds or updates-if-duplicated to DowntimeCache. Using `downtimeID` to query
the database, decides whether to insert or update the table.
:Parameters:
**downtimeID** - `string`
unique id for the downtime
**element** - `string`
valid element in the topology ( Site, Resource, Node )
**name** - `string`
name of the element where the downtime applies
**startDate** - `datetime`
starting time for the downtime
**endDate** - `datetime`
ending time for the downtime
**severity** - `string`
severity assigned by the gocdb
**description** - `string`
brief description of the downtime
**link** - `string`
url to the details
**dateEffective** - `datetime`
time when the entry was created in this database
**lastCheckTime** - `datetime`
time-stamp setting last time the result was checked
**gocdbServiceType** - `string`
service type assigned by gocdb
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'DowntimeCache', locals() )
# JobCache Methods ...........................................................
def selectJobCache( self, site = None, maskStatus = None, efficiency = None,
status = None, lastCheckTime = None, meta = None ):
'''
Gets from JobCache all rows that match the parameters given.
:Parameters:
**site** - `[, string, list ]`
name of the site element
**maskStatus** - `[, string, list ]`
maskStatus for the site
**efficiency** - `[, float, list ]`
job efficiency ( successful / total )
**status** - `[, string, list ]`
status for the site computed
**lastCheckTime** - `[, datetime, list ]`
measure calculated
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'select', 'JobCache', locals() )
def deleteJobCache( self, site = None, maskStatus = None, efficiency = None,
status = None, lastCheckTime = None, meta = None ):
'''
Deletes from JobCache all rows that match the parameters given.
:Parameters:
**site** - `[, string, list ]`
name of the site element
**maskStatus** - `[, string, list ]`
maskStatus for the site
**efficiency** - `[, float, list ]`
job efficiency ( successful / total )
**status** - `[, string, list ]`
status for the site computed
**lastCheckTime** - `[, datetime, list ]`
measure calculated
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'delete', 'JobCache', locals() )
def addOrModifyJobCache( self, site = None, maskStatus = None, efficiency = None,
status = None, lastCheckTime = None, meta = None ):
'''
Adds or updates-if-duplicated to JobCache. Using `site` to query
the database, decides whether to insert or update the table.
:Parameters:
**site** - `[, string, list ]`
name of the site element
**maskStatus** - `[, string, list ]`
maskStatus for the site
**efficiency** - `[, float, list ]`
job efficiency ( successful / total )
**status** - `[, string, list ]`
status for the site computed
**lastCheckTime** - `[, datetime, list ]`
measure calculated
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'JobCache', locals() )
# TransferCache Methods ......................................................
def selectTransferCache( self, sourceName = None, destinationName = None, metric = None,
value = None, lastCheckTime = None, meta = None ):
'''
# Gets from TransferCache all rows that match the parameters given.
#
# :Parameters:
# **elementName** - `[, string, list ]`
# name of the element
# **direction** - `[, string, list ]`
# the element taken as Source or Destination of the transfer
# **metric** - `[, string, list ]`
# measured quality of failed transfers
# **value** - `[, float, list ]`
# percentage
# **lastCheckTime** - `[, float, list ]`
# time-stamp setting last time the result was checked
# **meta** - `[, dict]`
# meta-data for the MySQL query. It will be filled automatically with the\
# `table` key and the proper table name.
#
# :return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'select', 'TransferCache', locals() )
def deleteTransferCache( self, sourceName = None, destinationName = None, metric = None,
value = None, lastCheckTime = None, meta = None ):
'''
# Deletes from TransferCache all rows that match the parameters given.
#
# :Parameters:
# **elementName** - `[, string, list ]`
# name of the element
# **direction** - `[, string, list ]`
# the element taken as Source or Destination of the transfer
# **metric** - `[, string, list ]`
# measured quality of failed transfers
# **value** - `[, float, list ]`
# percentage
# **lastCheckTime** - `[, float, list ]`
# time-stamp setting last time the result was checked
# **meta** - `[, dict]`
# meta-data for the MySQL query. It will be filled automatically with the\
# `table` key and the proper table name.
#
# :return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'delete', 'TransferCache', locals() )
def addOrModifyTransferCache( self, sourceName = None, destinationName = None,
metric = None, value = None, lastCheckTime = None,
meta = None ):
'''
# Adds or updates-if-duplicated to TransferCache. Using `elementName`, `direction`
# and `metric` to query the database, decides whether to insert or update the table.
#
# :Parameters:
# **elementName** - `string`
# name of the element
# **direction** - `string`
# the element taken as Source or Destination of the transfer
# **metric** - `string`
# measured quality of failed transfers
# **value** - `float`
# percentage
# **lastCheckTime** - `datetime`
# time-stamp setting last time the result was checked
# **meta** - `[, dict]`
# meta-data for the MySQL query. It will be filled automatically with the\
# `table` key and the proper table name.
#
# :return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'TransferCache', locals() )
# PilotCache Methods .........................................................
def selectPilotCache( self, site = None, cE = None, pilotsPerJob = None,
pilotJobEff = None, status = None, lastCheckTime = None,
meta = None ):
'''
Gets from TransferCache all rows that match the parameters given.
:Parameters:
**site** - `[, string, list ]`
name of the site
**cE** - `[, string, list ]`
name of the CE of 'Multiple' if all site CEs are considered
**pilotsPerJob** - `[, float, list ]`
measure calculated
**pilotJobEff** - `[, float, list ]`
percentage
**status** - `[, float, list ]`
status of the CE / Site
**lastCheckTime** - `[, datetime, list ]`
measure calculated
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'select', 'PilotCache', locals() )
def deletePilotCache( self, site = None, cE = None, pilotsPerJob = None,
pilotJobEff = None, status = None, lastCheckTime = None,
meta = None ):
'''
Deletes from TransferCache all rows that match the parameters given.
:Parameters:
**site** - `[, string, list ]`
name of the site
**cE** - `[, string, list ]`
name of the CE of 'Multiple' if all site CEs are considered
**pilotsPerJob** - `[, float, list ]`
measure calculated
**pilotJobEff** - `[, float, list ]`
percentage
**status** - `[, float, list ]`
status of the CE / Site
**lastCheckTime** - `[, datetime, list ]`
measure calculated
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'delete', 'PilotCache', locals() )
def addOrModifyPilotCache( self, site = None, cE = None, pilotsPerJob = None,
pilotJobEff = None, status = None, lastCheckTime = None,
meta = None ):
'''
Adds or updates-if-duplicated to PilotCache. Using `cE` and `timespan`
to query the database, decides whether to insert or update the table.
:Parameters:
**site** - `string`
name of the site
**cE** - `string`
name of the CE of 'Multiple' if all site CEs are considered
**pilotsPerJob** - `float`
measure calculated
**pilotJobEff** - `float`
percentage
**status** - `string`
status of the CE / Site
**lastCheckTime** - `datetime`
measure calculated
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'PilotCache', locals() )
# PolicyResult Methods .......................................................
def selectPolicyResult( self, element = None, name = None, policyName = None,
statusType = None, status = None, reason = None,
lastCheckTime = None, meta = None ):
'''
Gets from PolicyResult all rows that match the parameters given.
:Parameters:
**granularity** - `[, string, list]`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Service` | `Resource` | `StorageElement`
**name** - `[, string, list]`
name of the element
**policyName** - `[, string, list]`
name of the policy
**statusType** - `[, string, list]`
it has to be a valid status type for the given granularity
**status** - `[, string, list]`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**reason** - `[, string, list]`
decision that triggered the assigned status
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the policy result was checked
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'select', 'PolicyResult', locals() )
def deletePolicyResult( self, element = None, name = None,
policyName = None, statusType = None, status = None,
reason = None, lastCheckTime = None, meta = None ):
'''
Deletes from PolicyResult all rows that match the parameters given.
:Parameters:
**granularity** - `[, string, list]`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Service` | `Resource` | `StorageElement`
**name** - `[, string, list]`
name of the element
**policyName** - `[, string, list]`
name of the policy
**statusType** - `[, string, list]`
it has to be a valid status type for the given granularity
**status** - `[, string, list]`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**reason** - `[, string, list]`
decision that triggered the assigned status
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the policy result was checked
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'delete', 'PolicyResult', locals() )
def addOrModifyPolicyResult( self, element = None, name = None,
policyName = None, statusType = None,
status = None, reason = None, dateEffective = None,
lastCheckTime = None, meta = None ):
'''
Adds or updates-if-duplicated to PolicyResult. Using `name`, `policyName` and
`statusType` to query the database, decides whether to insert or update the table.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Service` | `Resource` | `StorageElement`
**name** - `string`
name of the element
**policyName** - `string`
name of the policy
**statusType** - `string`
it has to be a valid status type for the given element
**status** - `string`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**reason** - `string`
decision that triggered the assigned status
**dateEffective** - `datetime`
time-stamp from which the policy result is effective
**lastCheckTime** - `datetime`
time-stamp setting last time the policy result was checked
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'PolicyResult', locals() )
# PolicyResultLog Methods ....................................................
def selectPolicyResultLog( self, element = None, name = None,
policyName = None, statusType = None, status = None,
reason = None, lastCheckTime = None, meta = None ):
'''
Gets from PolicyResultLog all rows that match the parameters given.
:Parameters:
**element** - `[, string, list]`
it has to be a valid element ( ValidRes ), any of the defaults: `Site` \
| `Service` | `Resource` | `StorageElement`
**name** - `[, string, list]`
name of the element
**policyName** - `[, string, list]`
name of the policy
**statusType** - `[, string, list]`
it has to be a valid status type for the given element
**status** - `[, string, list]`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**reason** - `[, string, list]`
decision that triggered the assigned status
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the policy result was checked
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'select', 'PolicyResultLog', locals() )
def deletePolicyResultLog( self, element = None, name = None,
policyName = None, statusType = None, status = None,
reason = None, lastCheckTime = None, meta = None ):
'''
Deletes from PolicyResult all rows that match the parameters given.
:Parameters:
**element** - `[, string, list]`
it has to be a valid element ( ValidRes ), any of the defaults: `Site` \
| `Service` | `Resource` | `StorageElement`
**name** - `[, string, list]`
name of the element
**policyName** - `[, string, list]`
name of the policy
**statusType** - `[, string, list]`
it has to be a valid status type for the given element
**status** - `[, string, list]`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**reason** - `[, string, list]`
decision that triggered the assigned status
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the policy result was checked
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'delete', 'PolicyResultLog', locals() )
def addOrModifyPolicyResultLog( self, element = None, name = None,
policyName = None, statusType = None,
status = None, reason = None, lastCheckTime = None,
meta = None ):
'''
Adds or updates-if-duplicated to PolicyResultLog. Using `name`, `policyName`,
'statusType` to query the database, decides whether to insert or update the table.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidRes ), any of the defaults: `Site` \
| `Service` | `Resource` | `StorageElement`
**name** - `string`
name of the element
**policyName** - `string`
name of the policy
**statusType** - `string`
it has to be a valid status type for the given element
**status** - `string`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**reason** - `string`
decision that triggered the assigned status
**lastCheckTime** - `datetime`
time-stamp setting last time the policy result was checked
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'PolicyResultLog', locals() )
# SpaceTokenOccupancyCache Methods ...........................................
def selectSpaceTokenOccupancyCache( self, endpoint = None, token = None,
total = None, guaranteed = None, free = None,
lastCheckTime = None, meta = None ):
'''
Gets from SpaceTokenOccupancyCache all rows that match the parameters given.
:Parameters:
**endpoint** - `[, string, list]`
srm endpoint
**token** - `[, string, list]`
name of the token
**total** - `[, integer, list]`
total terabytes
**guaranteed** - `[, integer, list]`
guaranteed terabytes
**free** - `[, integer, list]`
free terabytes
**lastCheckTime** - `[, datetime, list]`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'select', 'SpaceTokenOccupancyCache', locals() )
def deleteSpaceTokenOccupancyCache( self, endpoint = None, token = None,
total = None, guaranteed = None, free = None,
lastCheckTime = None, meta = None ):
'''
Deletes from SpaceTokenOccupancyCache all rows that match the parameters given.
:Parameters:
**endpoint** - `[, string, list]`
srm endpoint
**token** - `[, string, list]`
name of the token
**total** - `[, integer, list]`
total terabytes
**guaranteed** - `[, integer, list]`
guaranteed terabytes
**free** - `[, integer, list]`
free terabytes
**lastCheckTime** - `[, datetime, list]`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'delete', 'SpaceTokenOccupancyCache', locals() )
def addOrModifySpaceTokenOccupancyCache( self, endpoint = None, token = None,
total = None, guaranteed = None,
free = None, lastCheckTime = None,
meta = None ):
'''
Adds or updates-if-duplicated to SpaceTokenOccupancyCache. Using `site` and `token`
to query the database, decides whether to insert or update the table.
:Parameters:
**endpoint** - `[, string, list]`
srm endpoint
**token** - `string`
name of the token
**total** - `integer`
total terabytes
**guaranteed** - `integer`
guaranteed terabytes
**free** - `integer`
free terabytes
**lastCheckTime** - `datetime`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'SpaceTokenOccupancyCache', locals() )
# UserRegistryCache Methods ..................................................
def selectUserRegistryCache( self, login = None, name = None, email = None,
lastCheckTime = None, meta = None ):
'''
Gets from UserRegistryCache all rows that match the parameters given.
:Parameters:
**login** - `[, string, list]`
user's login ID
**name** - `[, string, list]`
user's name
**email** - `[, string, list]`
user's email
**lastCheckTime** - `[, datetime, list]`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'select', 'UserRegistryCache', locals() )
def deleteUserRegistryCache( self, login = None, name = None, email = None,
lastCheckTime = None, meta = None ):
'''
Deletes from UserRegistryCache all rows that match the parameters given.
:Parameters:
**login** - `[, string, list]`
user's login ID
**name** - `[, string, list]`
user's name
**email** - `[, string, list]`
user's email
**lastCheckTime** - `[, datetime, list]`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'delete', 'UserRegistryCache', locals() )
def addOrModifyUserRegistryCache( self, login = None, name = None,
email = None, lastCheckTime = None, meta = None ):
'''
Adds or updates-if-duplicated to UserRegistryCache. Using `login` to query
the database, decides whether to insert or update the table.
:Parameters:
**login** - `string`
user's login ID
**name** - `string`
user's name
**email** - `string`
user's email
**lastCheckTime** - `datetime`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'UserRegistryCache', locals() )
# VOBOXCache Methods ........................................................
def selectVOBOXCache( self, site = None, system = None, serviceUp = None,
machineUp = None, lastCheckTime = None, meta = None ):
'''
Gets from VOBOXCache all rows that match the parameters given.
:Parameters:
**site** - `[, string, list ]`
name of the site hosting the VOBOX
**system** - `[, string, list ]`
DIRAC system ( e.g. ConfigurationService )
**serviceUp** - `[, integer, list]`
seconds the system has been up
**machineUp** - `[, integer, list]`
seconds the machine has been up
**lastCheckTime** - `[, datetime, list]`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'select', 'VOBOXCache', locals() )
def deleteVOBOXCache( self, site = None, system = None, serviceUp = None,
machineUp = None, lastCheckTime = None, meta = None ):
'''
Deletes from VOBOXCache all rows that match the parameters given.
:Parameters:
**site** - `[, string, list ]`
name of the site hosting the VOBOX
**system** - `[, string, list ]`
DIRAC system ( e.g. ConfigurationService )
**serviceUp** - `[, integer, list]`
seconds the system has been up
**machineUp** - `[, integer, list]`
seconds the machine has been up
**lastCheckTime** - `[, datetime, list]`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'delete', 'VOBOXCache', locals() )
def addOrModifyVOBOXCache( self, site = None, system = None, serviceUp = None,
machineUp = None, lastCheckTime = None, meta = None ):
'''
Adds or updates-if-duplicated to VOBOXCache. Using `site` and `system` to query
the database, decides whether to insert or update the table.
:Parameters:
**site** - `string`
name of the site hosting the VOBOX
**system** - `string`
DIRAC system ( e.g. ConfigurationService )
**serviceUp** - `integer`
seconds the system has been up
**machineUp** - `integer`
seconds the machine has been up
**lastCheckTime** - `datetime`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'VOBOXCache', locals() )
# ErrorReportBuffer Methods ..................................................
def insertErrorReportBuffer( self, name = None, elementType = None, reporter = None,
errorMessage = None, operation = None, arguments = None,
dateEffective = None, meta = None ):
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'insert', 'ErrorReportBuffer', locals() )
def selectErrorReportBuffer( self, name = None, elementType = None, reporter = None,
errorMessage = None, operation = None, arguments = None,
dateEffective = None, meta = None ):
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'select', 'ErrorReportBuffer', locals() )
def deleteErrorReportBuffer( self, name = None, elementType = None, reporter = None,
errorMessage = None, operation = None, arguments = None,
dateEffective = None, meta = None ):
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'delete', 'ErrorReportBuffer', locals() )
# Protected methods ..........................................................
def _query( self, queryType, tableName, parameters ):
'''
It is a simple helper, this way inheriting classes can use it.
'''
return self.__query( queryType, tableName, parameters )
def __query( self, queryType, tableName, parameters ):
'''
This method is a rather important one. It will format the input for the DB
queries, instead of doing it on a decorator. Two dictionaries must be passed
to the DB. First one contains 'columnName' : value pairs, being the key
lower camel case. The second one must have, at lease, a key named 'table'
with the right table name.
'''
# Functions we can call, just a light safety measure.
_gateFunctions = [ 'insert', 'update', 'select', 'delete', 'addOrModify', 'addIfNotThere' ]
if not queryType in _gateFunctions:
return S_ERROR( '"%s" is not a proper gate call' % queryType )
gateFunction = getattr( self.gate, queryType )
# If meta is None, we set it to {}
meta = ( True and parameters.pop( 'meta' ) ) or {}
# params = parameters
# Remove self, added by locals()
del parameters[ 'self' ]
meta[ 'table' ] = tableName
gLogger.debug( 'Calling %s, with \n params %s \n meta %s' % ( queryType, parameters, meta ) )
return gateFunction( parameters, meta )
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
gpl-3.0
|
talon-one/talon_one.py
|
test/test_ledger_entry.py
|
1
|
2766
|
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.ledger_entry import LedgerEntry # noqa: E501
from talon_one.rest import ApiException
class TestLedgerEntry(unittest.TestCase):
"""LedgerEntry unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test LedgerEntry
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = talon_one.models.ledger_entry.LedgerEntry() # noqa: E501
if include_optional :
return LedgerEntry(
id = 56,
created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
profile_id = '0',
account_id = 56,
loyalty_program_id = 56,
event_id = 56,
amount = 56,
reason = '0',
expiry_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
reference_id = 56
)
else :
return LedgerEntry(
id = 56,
created = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
profile_id = '0',
account_id = 56,
loyalty_program_id = 56,
event_id = 56,
amount = 56,
reason = '0',
expiry_date = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
)
def testLedgerEntry(self):
"""Test LedgerEntry"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
mit
|
abhishek-ch/hue
|
desktop/core/ext-py/python-openid-2.2.5/openid/store/memstore.py
|
165
|
3597
|
"""A simple store using only in-process memory."""
from openid.store import nonce
import copy
import time
class ServerAssocs(object):
def __init__(self):
self.assocs = {}
def set(self, assoc):
self.assocs[assoc.handle] = assoc
def get(self, handle):
return self.assocs.get(handle)
def remove(self, handle):
try:
del self.assocs[handle]
except KeyError:
return False
else:
return True
def best(self):
"""Returns association with the oldest issued date.
or None if there are no associations.
"""
best = None
for assoc in self.assocs.values():
if best is None or best.issued < assoc.issued:
best = assoc
return best
def cleanup(self):
"""Remove expired associations.
@return: tuple of (removed associations, remaining associations)
"""
remove = []
for handle, assoc in self.assocs.iteritems():
if assoc.getExpiresIn() == 0:
remove.append(handle)
for handle in remove:
del self.assocs[handle]
return len(remove), len(self.assocs)
class MemoryStore(object):
"""In-process memory store.
Use for single long-running processes. No persistence supplied.
"""
def __init__(self):
self.server_assocs = {}
self.nonces = {}
def _getServerAssocs(self, server_url):
try:
return self.server_assocs[server_url]
except KeyError:
assocs = self.server_assocs[server_url] = ServerAssocs()
return assocs
def storeAssociation(self, server_url, assoc):
assocs = self._getServerAssocs(server_url)
assocs.set(copy.deepcopy(assoc))
def getAssociation(self, server_url, handle=None):
assocs = self._getServerAssocs(server_url)
if handle is None:
return assocs.best()
else:
return assocs.get(handle)
def removeAssociation(self, server_url, handle):
assocs = self._getServerAssocs(server_url)
return assocs.remove(handle)
def useNonce(self, server_url, timestamp, salt):
if abs(timestamp - time.time()) > nonce.SKEW:
return False
anonce = (str(server_url), int(timestamp), str(salt))
if anonce in self.nonces:
return False
else:
self.nonces[anonce] = None
return True
def cleanupNonces(self):
now = time.time()
expired = []
for anonce in self.nonces.iterkeys():
if abs(anonce[1] - now) > nonce.SKEW:
# removing items while iterating over the set could be bad.
expired.append(anonce)
for anonce in expired:
del self.nonces[anonce]
return len(expired)
def cleanupAssociations(self):
remove_urls = []
removed_assocs = 0
for server_url, assocs in self.server_assocs.iteritems():
removed, remaining = assocs.cleanup()
removed_assocs += removed
if not remaining:
remove_urls.append(server_url)
# Remove entries from server_assocs that had none remaining.
for server_url in remove_urls:
del self.server_assocs[server_url]
return removed_assocs
def __eq__(self, other):
return ((self.server_assocs == other.server_assocs) and
(self.nonces == other.nonces))
def __ne__(self, other):
return not (self == other)
|
apache-2.0
|
lz1988/company-site
|
tests/regressiontests/admin_changelist/models.py
|
51
|
2489
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Event(models.Model):
# Oracle can have problems with a column named "date"
date = models.DateField(db_column="event_date")
class Parent(models.Model):
name = models.CharField(max_length=128)
class Child(models.Model):
parent = models.ForeignKey(Parent, editable=False, null=True)
name = models.CharField(max_length=30, blank=True)
age = models.IntegerField(null=True, blank=True)
class Genre(models.Model):
name = models.CharField(max_length=20)
class Band(models.Model):
name = models.CharField(max_length=20)
nr_of_members = models.PositiveIntegerField()
genres = models.ManyToManyField(Genre)
@python_2_unicode_compatible
class Musician(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(Musician, through='Membership')
def __str__(self):
return self.name
class Membership(models.Model):
music = models.ForeignKey(Musician)
group = models.ForeignKey(Group)
role = models.CharField(max_length=15)
class Quartet(Group):
pass
class ChordsMusician(Musician):
pass
class ChordsBand(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(ChordsMusician, through='Invitation')
class Invitation(models.Model):
player = models.ForeignKey(ChordsMusician)
band = models.ForeignKey(ChordsBand)
instrument = models.CharField(max_length=15)
class Swallow(models.Model):
origin = models.CharField(max_length=255)
load = models.FloatField()
speed = models.FloatField()
class Meta:
ordering = ('speed', 'load')
class UnorderedObject(models.Model):
"""
Model without any defined `Meta.ordering`.
Refs #17198.
"""
bool = models.BooleanField(default=True)
class OrderedObjectManager(models.Manager):
def get_query_set(self):
return super(OrderedObjectManager, self).get_query_set().order_by('number')
class OrderedObject(models.Model):
"""
Model with Manager that defines a default order.
Refs #17198.
"""
name = models.CharField(max_length=255)
bool = models.BooleanField(default=True)
number = models.IntegerField(default=0, db_column='number_val')
objects = OrderedObjectManager()
|
bsd-3-clause
|
Trust-Code/PySPED
|
pysped/cte/leiaute/conssitcte_104.py
|
8
|
5910
|
# -*- coding: utf-8 -*-
#
# PySPED - Python libraries to deal with Brazil's SPED Project
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation, either version 2.1 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# PySPED - Bibliotecas Python para o
# SPED - Sistema Público de Escrituração Digital
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br>
#
# Este programa é um software livre: você pode redistribuir e/ou modificar
# este programa sob os termos da licença GNU Library General Public License,
# publicada pela Free Software Foundation, em sua versão 2.1 ou, de acordo
# com sua opção, qualquer versão posterior.
#
# Este programa é distribuido na esperança de que venha a ser útil,
# porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a
# GNU Library General Public License para mais detalhes.
#
# Você deve ter recebido uma cópia da GNU Library General Public License
# juntamente com este programa. Caso esse não seja o caso, acesse:
# <http://www.gnu.org/licenses/>
#
from __future__ import division, print_function, unicode_literals
from pysped.xml_sped import (ABERTURA, NAMESPACE_CTE, TagCaracter,
TagDecimal, TagInteiro, XMLNFe, tira_abertura)
from pysped.cte.leiaute import ESQUEMA_ATUAL_VERSAO_104 as ESQUEMA_ATUAL
from pysped.cte.leiaute.consrecicte_104 import ProtCTe as ProtCTe_104
from pysped.cte.leiaute.canccte_104 import RetCancCTe as RetCancCTe_104
import os
DIRNAME = os.path.dirname(__file__)
class ConsSitCTe(XMLNFe):
def __init__(self):
super(ConsSitCTe, self).__init__()
self.versao = TagDecimal(nome='consSitCTe', codigo='EP01', propriedade='versao', namespace=NAMESPACE_CTE, valor='1.04', raiz='/')
self.tpAmb = TagInteiro(nome='tpAmb' , codigo='EP03', tamanho=[ 1, 1, 1], raiz='//consSitCTe', valor=2, namespace=NAMESPACE_CTE)
self.xServ = TagCaracter(nome='xServ' , codigo='EP04', tamanho=[ 9, 9] , raiz='//consSitCTe', valor='CONSULTAR', namespace=NAMESPACE_CTE)
self.chNFe = TagCaracter(nome='chCTe' , codigo='EP05', tamanho=[44, 44] , raiz='//consSitCTe', namespace=NAMESPACE_CTE)
self.caminho_esquema = os.path.join(DIRNAME, 'schema', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'consSitCte_v1.04.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.tpAmb.xml
xml += self.xServ.xml
xml += self.chNFe.xml
xml += '</consSitCTe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.tpAmb.xml = arquivo
self.xServ.xml = arquivo
self.chNFe.xml = arquivo
xml = property(get_xml, set_xml)
class RetConsSitCTe(XMLNFe):
def __init__(self):
super(RetConsSitCTe, self).__init__()
self.versao = TagDecimal(nome='retConsSitCTe', codigo='ER01', propriedade='versao', namespace=NAMESPACE_CTE, valor='1.04', raiz='/')
self.tpAmb = TagInteiro(nome='tpAmb' , codigo='ER03' , tamanho=[1, 1, 1], raiz='//retConsSitCTe', namespace=NAMESPACE_CTE)
self.verAplic = TagCaracter(nome='verAplic' , codigo='ER04' , tamanho=[1, 20] , raiz='//retConsSitCTe', namespace=NAMESPACE_CTE)
self.cStat = TagCaracter(nome='cStat' , codigo='ER05' , tamanho=[1, 3] , raiz='//retConsSitCTe', namespace=NAMESPACE_CTE)
self.xMotivo = TagCaracter(nome='xMotivo' , codigo='ER06' , tamanho=[1, 2000] , raiz='//retConsSitCTe', namespace=NAMESPACE_CTE)
self.cUF = TagInteiro(nome='cUF' , codigo='ER07' , tamanho=[2, 2, 2], raiz='//retConsSitCTe', namespace=NAMESPACE_CTE)
self.protCTe = None
self.retCancCTe = None
self.caminho_esquema = os.path.join(DIRNAME, 'schema', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'retConsSitCte_v1.04.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.tpAmb.xml
xml += self.verAplic.xml
xml += self.cStat.xml
xml += self.xMotivo.xml
xml += self.cUF.xml
if self.protCTe is not None:
xml += self.protCTe.xml
if self.retCancCTe is not None:
xml += tira_abertura(self.retCancCTe.xml)
xml += '</retConsSitCTe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.tpAmb.xml = arquivo
self.verAplic.xml = arquivo
self.cStat.xml = arquivo
self.xMotivo.xml = arquivo
self.cUF.xml = arquivo
if self._le_noh('//retConsSitCTe/protCTe') is not None:
self.protCTe = ProtCTe_104()
self.protCTe.xml = arquivo
if self._le_noh('//retConsSitCTe/retCancCTe') is not None:
self.retCancCTe = RetCancCTe_104()
self.retCancCTe.xml = arquivo
xml = property(get_xml, set_xml)
|
lgpl-2.1
|
google/contentbox
|
third_party/social/strategies/cherrypy_strategy.py
|
8
|
2039
|
import six
import cherrypy
from social.strategies.base import BaseStrategy, BaseTemplateStrategy
class CherryPyJinja2TemplateStrategy(BaseTemplateStrategy):
def __init__(self, strategy):
self.strategy = strategy
self.env = cherrypy.tools.jinja2env
def render_template(self, tpl, context):
return self.env.get_template(tpl).render(context)
def render_string(self, html, context):
return self.env.from_string(html).render(context)
class CherryPyStrategy(BaseStrategy):
def __init__(self, *args, **kwargs):
kwargs.setdefault('tpl', CherryPyJinja2TemplateStrategy)
return super(CherryPyStrategy, self).__init__(*args, **kwargs)
def get_setting(self, name):
return cherrypy.config[name]
def request_data(self, merge=True):
if merge:
data = cherrypy.request.params
elif cherrypy.request.method == 'POST':
data = cherrypy.body.params
else:
data = cherrypy.request.params
return data
def request_host(self):
return cherrypy.request.base
def redirect(self, url):
raise cherrypy.HTTPRedirect(url)
def html(self, content):
return content
def authenticate(self, *args, **kwargs):
kwargs['strategy'] = self
kwargs['storage'] = self.storage
kwargs['backend'] = self.backend
return self.backend.authenticate(*args, **kwargs)
def session_get(self, name, default=None):
return cherrypy.session.get(name, default)
def session_set(self, name, value):
cherrypy.session[name] = value
def session_pop(self, name):
cherrypy.session.pop(name, None)
def session_setdefault(self, name, value):
return cherrypy.session.setdefault(name, value)
def build_absolute_uri(self, path=None):
return cherrypy.url(path or '')
def is_response(self, value):
return isinstance(value, six.string_types) or \
isinstance(value, cherrypy.CherryPyException)
|
apache-2.0
|
NeCTAR-RC/neutron
|
neutron/db/migration/alembic_migrations/portsec_init_ops.py
|
61
|
1452
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial operations for the port security extension
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'networksecuritybindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('port_security_enabled', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'))
op.create_table(
'portsecuritybindings',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('port_security_enabled', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('port_id'))
|
apache-2.0
|
luotao1/Paddle
|
python/paddle/fluid/tests/unittests/xpu/test_lamb_op_xpu.py
|
2
|
3892
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
sys.path.append("..")
import unittest
import numpy as np
from op_test_xpu import XPUOpTest
from paddle.fluid import core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
import paddle
class TestLambOp1(XPUOpTest):
def set_attrs(self):
self.attrs = {
'epsilon': 1e-6,
'beta1': 0.9,
'beta2': 0.999,
'weight_decay': 0.01
}
def setUp(self):
'''Test Lamb Op with supplied attributes
'''
self.op_type = "lamb"
param = np.random.uniform(-1, 1, 5000).astype("float32")
grad = np.random.uniform(-1, 1, 5000).astype("float32")
moment1 = np.random.uniform(-1, 1, 5000).astype("float32")
moment2 = np.random.random(5000).astype("float32")
self.set_attrs()
learning_rate = 0.001
beta1_pow = self.attrs['beta1']
beta2_pow = self.attrs['beta2']
self.inputs = {
'Param': param,
'Grad': grad,
'Moment1': moment1,
'Moment2': moment2,
'LearningRate': np.array([learning_rate]).astype("float32"),
'Beta1Pow': np.array([beta1_pow]).astype("float32"),
'Beta2Pow': np.array([beta2_pow]).astype("float32")
}
param_out, moment1_out, moment2_out, \
beta1_pow_out, beta2_pow_out = lamb_step(self.inputs, self.attrs)
self.outputs = {
'Moment1Out': moment1_out,
'Moment2Out': moment2_out,
'ParamOut': param_out,
'Beta1PowOut': beta1_pow_out,
'Beta2PowOut': beta2_pow_out
}
def test_check_output(self):
self.check_output_with_place(paddle.XPUPlace(0))
def lamb_step(inputs, attributes):
'''
Simulate one step of the lamb optimizer
:param inputs: dict of inputs
:param attributes: dict of attributes
:return tuple: tuple of output param, moment1, moment2,
beta1 power accumulator and beta2 power accumulator
'''
param = inputs['Param']
grad = inputs['Grad']
moment1 = inputs['Moment1']
moment2 = inputs['Moment2']
lr = inputs['LearningRate']
beta1_pow = inputs['Beta1Pow']
beta2_pow = inputs['Beta2Pow']
beta1 = attributes['beta1']
beta2 = attributes['beta2']
epsilon = attributes['epsilon']
weight_decay = attributes['weight_decay']
moment1_out = beta1 * moment1 + (1 - beta1) * grad
moment2_out = beta2 * moment2 + (1 - beta2) * np.square(grad)
moment1_unbiased = moment1_out / (1 - beta1_pow)
moment2_unbiased = moment2_out / (1 - beta2_pow)
r_1 = np.linalg.norm(param)
r_2 = np.linalg.norm(moment1_unbiased / (np.sqrt(moment2_unbiased) + epsilon
) + weight_decay * param)
if r_1 > 0.0 and r_2 > 0.0:
lr_t = lr * r_1 / r_2
else:
lr_t = 1.0
param_out = param - lr_t * (moment1_unbiased / (
np.sqrt(moment2_unbiased) + epsilon) + weight_decay * param)
beta1_pow_out = beta1_pow * beta1
beta2_pow_out = beta2_pow * beta2
return param_out, moment1_out, moment2_out, beta1_pow_out, beta2_pow_out
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
|
apache-2.0
|
jdreaver/vispy
|
vispy/visuals/border.py
|
17
|
6845
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
# Author: Siddharth Bhat
# -----------------------------------------------------------------------------
import numpy as np
from . import Visual
from ..color import Color
VERT_SHADER_BORDER = """
attribute vec2 a_position;
attribute vec2 a_adjust_dir;
void main() {
// First map the vertex to document coordinates
vec4 doc_pos = $visual_to_doc(vec4(a_position, 0, 1));
// Also need to map the adjustment direction vector, but this is tricky!
// We need to adjust separately for each component of the vector:
vec4 adjusted;
if ( a_adjust_dir.x == 0 ) {
// If this is an outer vertex, no adjustment for line weight is needed.
// (In fact, trying to make the adjustment would result in no
// triangles being drawn, hence the if/else block)
adjusted = doc_pos;
}
else {
// Inner vertexes must be adjusted for line width, but this is
// surprisingly tricky given that the rectangle may have been scaled
// and rotated!
vec4 doc_x = $visual_to_doc(vec4(a_adjust_dir.x, 0, 0, 0)) -
$visual_to_doc(vec4(0, 0, 0, 0));
vec4 doc_y = $visual_to_doc(vec4(0, a_adjust_dir.y, 0, 0)) -
$visual_to_doc(vec4(0, 0, 0, 0));
doc_x = normalize(doc_x);
doc_y = normalize(doc_y);
// Now doc_x + doc_y points in the direction we need in order to
// correct the line weight of _both_ segments, but the magnitude of
// that correction is wrong. To correct it we first need to
// measure the width that would result from using doc_x + doc_y:
vec4 proj_y_x = dot(doc_x, doc_y) * doc_x; // project y onto x
float cur_width = length(doc_y - proj_y_x); // measure current weight
// And now we can adjust vertex position for line width:
adjusted = doc_pos + ($border_width / cur_width) * (doc_x + doc_y);
}
// Finally map the remainder of the way to render coordinates
gl_Position = $doc_to_render(adjusted);
}
"""
FRAG_SHADER_BORDER = """
void main() {
gl_FragColor = $border_color;
}
""" # noqa
class _BorderVisual(Visual):
"""
Visual subclass to display 2D pixel-width borders.
Parameters
----------
pos : tuple (x, y)
Position where the colorbar is to be placed with
respect to the center of the colorbar
halfdim : tuple (half_width, half_height)
Half the dimensions of the colorbar measured
from the center. That way, the total dimensions
of the colorbar is (x - half_width) to (x + half_width)
and (y - half_height) to (y + half_height)
border_width : float (in px)
The width of the border the colormap should have. This measurement
is given in pixels
border_color : str | vispy.color.Color
The color of the border of the colormap. This can either be a
str as the color's name or an actual instace of a vipy.color.Color
"""
def __init__(self, pos, halfdim,
border_width=1.0,
border_color=None,
**kwargs):
self._pos = pos
self._halfdim = halfdim
self._border_width = border_width
self._border_color = Color(border_color)
Visual.__init__(self, vcode=VERT_SHADER_BORDER,
fcode=FRAG_SHADER_BORDER, **kwargs)
@staticmethod
def _prepare_transforms(view):
program = view.shared_program
program.vert['visual_to_doc'] = \
view.transforms.get_transform('visual', 'document')
program.vert['doc_to_render'] = \
view.transforms.get_transform('document', 'render')
@property
def visual_border_width(self):
""" The border width in visual coordinates
"""
render_to_doc = \
self.transforms.get_transform('document', 'visual')
vec = render_to_doc.map([self.border_width, self.border_width, 0])
origin = render_to_doc.map([0, 0, 0])
visual_border_width = [vec[0] - origin[0], vec[1] - origin[1]]
# we need to flip the y axis because coordinate systems are inverted
visual_border_width[1] *= -1
return visual_border_width
def _update(self):
x, y = self._pos
halfw, halfh = self._halfdim
border_vertices = np.array([
[x - halfw, y - halfh],
[x - halfw, y - halfh],
[x + halfw, y - halfh],
[x + halfw, y - halfh],
[x + halfw, y + halfh],
[x + halfw, y + halfh],
[x - halfw, y + halfh],
[x - halfw, y + halfh],
[x - halfw, y - halfh],
[x - halfw, y - halfh],
], dtype=np.float32)
# Direction each vertex should move to correct for line width
adjust_dir = np.array([
[0, 0], [-1, -1],
[0, 0], [1, -1],
[0, 0], [1, 1],
[0, 0], [-1, 1],
[0, 0], [-1, -1],
], dtype=np.float32)
self.shared_program['a_position'] = border_vertices
self.shared_program['a_adjust_dir'] = adjust_dir
self.shared_program.vert['border_width'] = self._border_width
self.shared_program.frag['border_color'] = self._border_color.rgba
def _prepare_draw(self, view=None):
self._update()
self._draw_mode = "triangle_strip"
return True
@property
def border_width(self):
""" The width of the border
"""
return self._border_width
@border_width.setter
def border_width(self, border_width):
self._border_width = border_width
# positions of text need to be changed accordingly
self._update()
@property
def border_color(self):
""" The color of the border in pixels
"""
return self._border_color
@border_color.setter
def border_color(self, border_color):
self._border_color = Color(border_color)
self.shared_program.frag['border_color'] = self._border_color.rgba
@property
def pos(self):
""" The center of the BorderVisual
"""
return self._pos
@pos.setter
def pos(self, pos):
self._pos = pos
self._update()
@property
def halfdim(self):
""" The half-dimensions measured from the center of the BorderVisual
"""
return self._halfdim
@halfdim.setter
def halfdim(self, halfdim):
self._halfdim = halfdim
self._update()
|
bsd-3-clause
|
zadgroup/edx-platform
|
lms/djangoapps/licenses/management/commands/generate_serial_numbers.py
|
106
|
1929
|
from uuid import uuid4
from django.utils.html import escape
from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.django import modulestore
from licenses.models import CourseSoftware, UserLicense
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class Command(BaseCommand):
help = """Generate random serial numbers for software used in a course.
Usage: generate_serial_numbers <course_id> <software_name> <count>
<count> is the number of numbers to generate.
Example:
import_serial_numbers MITx/6.002x/2012_Fall matlab 100
"""
args = "course_id software_id count"
def handle(self, *args, **options):
course_id, software_name, count = self._parse_arguments(args)
software, _ = CourseSoftware.objects.get_or_create(course_id=course_id,
name=software_name)
self._generate_serials(software, count)
def _parse_arguments(self, args):
if len(args) != 3:
raise CommandError("Incorrect number of arguments")
course_id = args[0]
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
if not modulestore().has_course(course_key):
raise CommandError("Unknown course_id")
software_name = escape(args[1].lower())
try:
count = int(args[2])
except ValueError:
raise CommandError("Invalid <count> argument.")
return course_key, software_name, count
def _generate_serials(self, software, count):
print "Generating {0} serials".format(count)
# add serial numbers them to the database
for _ in xrange(count):
serial = str(uuid4())
license = UserLicense(software=software, serial=serial)
license.save()
print "{0} new serial numbers generated.".format(count)
|
agpl-3.0
|
jkramarz/zuombot
|
cloudbot/util/web.py
|
19
|
4934
|
"""
web.py
Contains functions for interacting with web services.
Created by:
- Bjorn Neergaard <https://github.com/neersighted>
Maintainer:
- Luke Rogers <https://github.com/lukeroge>
License:
GPL v3
"""
import json
import requests
# Constants
DEFAULT_SHORTENER = 'is.gd'
DEFAULT_PASTEBIN = 'hastebin'
HASTEBIN_SERVER = 'http://hasteb.in'
# Python eval
def pyeval(code, pastebin=True):
p = {'input': code}
r = requests.post('http://pyeval.appspot.com/exec', data=p)
p = {'id': r.text}
r = requests.get('http://pyeval.appspot.com/exec', params=p)
j = r.json()
output = j['output'].rstrip('\n')
if '\n' in output and pastebin:
return paste(output)
else:
return output
# Shortening / pasting
# Public API
def shorten(url, custom=None, key=None, service=DEFAULT_SHORTENER):
impl = shorteners[service]
return impl.shorten(url, custom, key)
def try_shorten(url, custom=None, key=None, service=DEFAULT_SHORTENER):
impl = shorteners[service]
return impl.try_shorten(url, custom, key)
def expand(url, service=None):
if service:
impl = shorteners[service]
else:
impl = None
for name in shorteners:
if name in url:
impl = shorteners[name]
break
if impl is None:
impl = Shortener()
return impl.expand(url)
def paste(data, ext='txt', service=DEFAULT_PASTEBIN):
impl = pastebins[service]
return impl.paste(data, ext)
class ServiceError(Exception):
def __init__(self, message, request):
self.message = message
self.request = request
def __str__(self):
return '[HTTP {}] {}'.format(self.request.status_code, self.message)
class Shortener:
def __init__(self):
pass
def shorten(self, url, custom=None, key=None):
return url
def try_shorten(self, url, custom=None, key=None):
try:
return self.shorten(url, custom, key)
except ServiceError:
return url
def expand(self, url):
r = requests.get(url, allow_redirects=False)
if 'location' in r.headers:
return r.headers['location']
else:
raise ServiceError('That URL does not exist', r)
class Pastebin:
def __init__(self):
pass
def paste(self, data, ext):
raise NotImplementedError
# Internal Implementations
shorteners = {}
pastebins = {}
def _shortener(name):
def _decorate(impl):
shorteners[name] = impl()
return _decorate
def _pastebin(name):
def _decorate(impl):
pastebins[name] = impl()
return _decorate
@_shortener('is.gd')
class Isgd(Shortener):
def shorten(self, url, custom=None, key=None):
p = {'url': url, 'shorturl': custom, 'format': 'json'}
r = requests.get('http://is.gd/create.php', params=p)
j = r.json()
if 'shorturl' in j:
return j['shorturl']
else:
raise ServiceError(j['errormessage'], r)
def expand(self, url):
p = {'shorturl': url, 'format': 'json'}
r = requests.get('http://is.gd/forward.php', params=p)
j = r.json()
if 'url' in j:
return j['url']
else:
raise ServiceError(j['errormessage'], r)
@_shortener('goo.gl')
class Googl(Shortener):
def shorten(self, url, custom=None, key=None):
h = {'content-type': 'application/json'}
k = {'key': key}
p = {'longUrl': url}
r = requests.post('https://www.googleapis.com/urlshortener/v1/url', params=k, data=json.dumps(p), headers=h)
j = r.json()
if 'error' not in j:
return j['id']
else:
raise ServiceError(j['error']['message'], r)
def expand(self, url):
p = {'shortUrl': url}
r = requests.get('https://www.googleapis.com/urlshortener/v1/url', params=p)
j = r.json()
if 'error' not in j:
return j['longUrl']
else:
raise ServiceError(j['error']['message'], r)
@_shortener('git.io')
class Gitio(Shortener):
def shorten(self, url, custom=None, key=None):
p = {'url': url, 'code': custom}
r = requests.post('http://git.io', data=p)
if r.status_code == requests.codes.created:
s = r.headers['location']
if custom and custom not in s:
raise ServiceError('That URL is already in use', r)
else:
return s
else:
raise ServiceError(r.text, r)
@_pastebin('hastebin')
class Hastebin(Pastebin):
def paste(self, data, ext):
r = requests.post(HASTEBIN_SERVER + '/documents', data=data)
j = r.json()
if r.status_code is requests.codes.ok:
return '{}/{}.{}'.format(HASTEBIN_SERVER, j['key'], ext)
else:
raise ServiceError(j['message'], r)
|
gpl-3.0
|
fish2000/h5dj
|
docs/conf.py
|
1
|
7955
|
# -*- coding: utf-8 -*-
#
# h5dj documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 25 06:03:15 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'h5dj'
copyright = u'2012, Alexander Bohn'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.0'
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'h5djdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'h5dj.tex', u'h5dj Documentation',
u'Alexander Bohn', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'h5dj', u'h5dj Documentation',
[u'Alexander Bohn'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'h5dj', u'h5dj Documentation',
u'Alexander Bohn', 'h5dj', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
bsd-3-clause
|
shlomif/patool
|
patoolib/programs/cpio.py
|
1
|
2007
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2012 Bastian Kleineidam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Archive commands for the cpio program."""
import os
from patoolib import util
def extract_cpio (archive, compression, cmd, **kwargs):
"""Extract a CPIO archive."""
cmdlist = [util.shell_quote(cmd), '--extract', '--make-directories',
'--preserve-modification-time', '--no-absolute-filenames',
'--force-local', '--nonmatching', r'"*\.\.*"']
if kwargs['verbose']:
cmdlist.append('-v')
cmdlist.extend(['<', util.shell_quote(os.path.abspath(archive))])
return (cmdlist, {'cwd': kwargs['outdir'], 'shell': True})
def list_cpio (archive, compression, cmd, **kwargs):
"""List a CPIO archive."""
cmdlist = [cmd, '-t']
if kwargs['verbose']:
cmdlist.append('-v')
cmdlist.extend(['-F', archive])
return cmdlist
test_cpio = list_cpio
def create_cpio(archive, compression, cmd, *args, **kwargs):
"""Create a CPIO archive."""
cmdlist = [util.shell_quote(cmd), '--create']
if kwargs['verbose']:
cmdlist.append('-v')
if len(args) != 0:
findcmd = ['find']
findcmd.extend([util.shell_quote(x) for x in args])
findcmd.extend(['-print0', '|'])
cmdlist[0:0] = findcmd
cmdlist.append('-0')
cmdlist.extend([">", util.shell_quote(archive)])
return (cmdlist, {'shell': True})
|
gpl-3.0
|
gabrielfalcao/lettuce
|
tests/integration/lib/Django-1.3/django/utils/cache.py
|
96
|
9483
|
"""
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
import re
import time
from django.conf import settings
from django.core.cache import get_cache
from django.utils.encoding import smart_str, iri_to_uri
from django.utils.http import http_date
from django.utils.hashcompat import md5_constructor
from django.utils.translation import get_language
from django.http import HttpRequest
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
This function patches the Cache-Control header by adding all
keyword arguments to it. The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return t[0] + '=' + smart_str(t[1])
if response.has_header('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict([dictitem(el) for el in cc])
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(cc['max-age'], kwargs['max_age'])
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join([dictvalue(el) for el in cc.items()])
response['Cache-Control'] = cc
def get_max_age(response):
"""
Returns the max-age from the response Cache-Control header as an integer
(or ``None`` if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict([_to_tuple(el) for el in
cc_delim_re.split(response['Cache-Control'])])
if 'max-age' in cc:
try:
return int(cc['max-age'])
except (ValueError, TypeError):
pass
def patch_response_headers(response, cache_timeout=None):
"""
Adds some useful headers to the given HttpResponse object:
ETag, Last-Modified, Expires and Cache-Control
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if settings.USE_ETAGS and not response.has_header('ETag'):
response['ETag'] = '"%s"' % md5_constructor(response.content).hexdigest()
if not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date()
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Adds headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
def patch_vary_headers(response, newheaders):
"""
Adds (or updates) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = set([header.lower() for header in vary_headers])
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def has_vary_header(response, header_query):
"""
Checks to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = set([header.lower() for header in vary_headers])
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If enabled, returns the cache key ending with a locale."""
if settings.USE_I18N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Returns a cache key from the headers given in the header list."""
ctx = md5_constructor()
for header in headerlist:
value = request.META.get(header, None)
if value is not None:
ctx.update(value)
path = md5_constructor(iri_to_uri(request.get_full_path()))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, request.method, path.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Returns a cache key for the header cache."""
path = md5_constructor(iri_to_uri(request.get_full_path()))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, path.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Returns a cache key based on the request path and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global path registry and uses those to build a cache key
to check against.
If there is no headerlist stored, the page needs to be rebuilt, so this
function returns None.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
headerlist = cache.get(cache_key, None)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learns what headers to take into account for some request path from the
response object. It stores those headers in a global path registry so that
later access to that path will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
if response.has_header('Vary'):
headerlist = ['HTTP_'+header.upper().replace('-', '_')
for header in cc_delim_re.split(response['Vary'])]
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.get_full_path()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=',1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
|
gpl-3.0
|
tell-k/csquery
|
setup.py
|
1
|
2233
|
# -*- coding: utf-8 -*-
import sys
import os
import re
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
here = os.path.dirname(__file__)
with open(os.path.join(here, 'csquery', '__init__.py'), 'r') as f:
version = re.compile(
r".*__version__ = '(.*?)'", re.S).match(f.read()).group(1)
readme = open(os.path.join(here, 'README.rst')).read()
requires = [
'six'
]
tests_require = [
'pytest-cov',
'pytest',
'mock',
]
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Software Development :: Libraries',
]
setup(
name='csquery',
version=version,
description='A simple query builder for Amazon Cloudsearch structured query parser.', # NOQA
long_description=readme,
url='https://github.com/tell-k/csquery',
keywords='aws amazon cloudsearch querybuilder structured',
author='tell-k',
author_email='ffk2005 at gmail.com',
classifiers=classifiers,
install_requires=requires,
tests_require=tests_require,
cmdclass={'test': PyTest},
packages=find_packages(exclude=['tests']),
license='MIT',
)
|
mit
|
tornadozou/tensorflow
|
tensorflow/contrib/learn/python/learn/graph_actions.py
|
76
|
28682
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level operations on graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import threading
import time
import numpy as np
from six import reraise
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import ops as contrib_ops
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import session_manager as session_manager_lib
from tensorflow.python.training import summary_io
from tensorflow.python.training import supervisor as tf_supervisor
from tensorflow.python.util.deprecation import deprecated
# Singleton for SummaryWriter per logdir folder.
_SUMMARY_WRITERS = {}
# Lock protecting _SUMMARY_WRITERS
_summary_writer_lock = threading.Lock()
_graph_action_deprecation = deprecated(
'2017-02-15',
'graph_actions.py will be deleted. Use tf.train.* utilities instead. '
'You can use learn/estimators/estimator.py as an example.')
@_graph_action_deprecation
def clear_summary_writers():
"""Clear cached summary writers. Currently only used for unit tests."""
return summary_io.SummaryWriterCache.clear()
def get_summary_writer(logdir):
"""Returns single SummaryWriter per logdir in current run.
Args:
logdir: str, folder to write summaries.
Returns:
Existing `SummaryWriter` object or new one if never wrote to given
directory.
"""
return summary_io.SummaryWriterCache.get(logdir)
def _make_saver(graph, keep_checkpoint_max=5):
vars_to_save = (graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) +
graph.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS))
if vars_to_save:
return tf_saver.Saver(vars_to_save,
sharded=True,
max_to_keep=keep_checkpoint_max)
else:
return None
def _restore_from_checkpoint(session, graph, checkpoint_path, saver=None):
logging.info('Loading model from checkpoint: %s.', checkpoint_path)
saver = saver or _make_saver(graph)
if saver:
saver.restore(session, checkpoint_path)
else:
logging.info('No variables found in graph, not creating Saver() object.')
def _run_with_monitors(session, step, tensors, feed_dict, monitors):
"""Runs session for given tensors with monitor callbacks."""
for monitor in monitors:
tensors += monitor.step_begin(step)
tensors = list(set(tensors))
outputs = session.run(tensors, feed_dict=feed_dict)
outputs = dict(zip(
[t.name if isinstance(t, ops.Tensor) else t for t in tensors],
outputs))
should_stop = False
for monitor in monitors:
induce_stop = monitor.step_end(step, outputs)
should_stop = should_stop or induce_stop
return outputs, should_stop
@_graph_action_deprecation
def train(graph,
output_dir,
train_op,
loss_op,
global_step_tensor=None,
init_op=None,
init_feed_dict=None,
init_fn=None,
log_every_steps=10,
supervisor_is_chief=True,
supervisor_master='',
supervisor_save_model_secs=600,
keep_checkpoint_max=5,
supervisor_save_summaries_steps=100,
feed_fn=None,
steps=None,
fail_on_nan_loss=True,
monitors=None,
max_steps=None):
"""Train a model.
Given `graph`, a directory to write outputs to (`output_dir`), and some ops,
run a training loop. The given `train_op` performs one step of training on the
model. The `loss_op` represents the objective function of the training. It is
expected to increment the `global_step_tensor`, a scalar integer tensor
counting training steps. This function uses `Supervisor` to initialize the
graph (from a checkpoint if one is available in `output_dir`), write summaries
defined in the graph, and write regular checkpoints as defined by
`supervisor_save_model_secs`.
Training continues until `global_step_tensor` evaluates to `max_steps`, or, if
`fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the
program is terminated with exit code 1.
Args:
graph: A graph to train. It is expected that this graph is not in use
elsewhere.
output_dir: A directory to write outputs to.
train_op: An op that performs one training step when run.
loss_op: A scalar loss tensor.
global_step_tensor: A tensor representing the global step. If none is given,
one is extracted from the graph using the same logic as in `Supervisor`.
init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
default.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
init_fn: Optional callable passed to Supervisor to initialize the model.
log_every_steps: Output logs regularly. The logs contain timing data and the
current loss.
supervisor_is_chief: Whether the current process is the chief supervisor in
charge of restoring the model and running standard services.
supervisor_master: The master string to use when preparing the session.
supervisor_save_model_secs: Save a checkpoint every
`supervisor_save_model_secs` seconds when training.
keep_checkpoint_max: The maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
all checkpoint files are kept. This is simply passed as the max_to_keep
arg to tf.train.Saver constructor.
supervisor_save_summaries_steps: Save summaries every
`supervisor_save_summaries_steps` seconds when training.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
steps: Trains for this many steps (e.g. current global step + `steps`).
fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`
evaluates to `NaN`. If false, continue training as if nothing happened.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
max_steps: Number of total steps for which to train model. If `None`,
train forever. Two calls fit(steps=100) means 200 training iterations.
On the other hand two calls of fit(max_steps=100) means, second call
will not do any iteration since first call did all 100 steps.
Returns:
The final loss value.
Raises:
ValueError: If `output_dir`, `train_op`, `loss_op`, or `global_step_tensor`
is not provided. See `tf.contrib.framework.get_global_step` for how we
look up the latter if not provided explicitly.
NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
evaluates to `NaN`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
while True:
try:
return _train_internal(graph,
output_dir,
train_op,
loss_op,
global_step_tensor,
init_op,
init_feed_dict,
init_fn,
log_every_steps,
supervisor_is_chief,
supervisor_master,
supervisor_save_model_secs,
keep_checkpoint_max,
supervisor_save_summaries_steps,
feed_fn,
steps,
fail_on_nan_loss,
monitors,
max_steps)
except errors.AbortedError:
# Happens when PS restarts, keep training.
logging.warning('Training got Aborted error. Keep training.')
def _train_internal(graph,
output_dir,
train_op,
loss_op,
global_step_tensor,
init_op,
init_feed_dict,
init_fn,
log_every_steps,
supervisor_is_chief,
supervisor_master,
supervisor_save_model_secs,
keep_checkpoint_max,
supervisor_save_summaries_steps,
feed_fn,
steps,
fail_on_nan_loss,
monitors,
max_steps):
"""See train."""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if not output_dir:
raise ValueError('Output directory should be non-empty %s.' % output_dir)
if train_op is None:
raise ValueError('Missing train_op.')
if loss_op is None:
raise ValueError('Missing loss_op.')
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
if global_step_tensor is None:
raise ValueError('No "global_step" was provided or found in the graph.')
# Get current step.
try:
start_step = load_variable(output_dir, global_step_tensor.name)
except (errors.NotFoundError, ValueError):
start_step = 0
summary_writer = (get_summary_writer(output_dir)
if supervisor_is_chief else None)
# Add default chief monitors if none were provided.
if not monitors:
monitors = monitors_lib.get_default_monitors(
loss_op=loss_op,
summary_op=logging_ops.get_summary_op(),
save_summary_steps=supervisor_save_summaries_steps,
summary_writer=summary_writer) if supervisor_is_chief else []
# TODO(ipolosukhin): Replace all functionality of Supervisor
# with Chief-Exclusive Monitors.
if not supervisor_is_chief:
# Prune list of monitor to the ones runnable on all workers.
monitors = [monitor for monitor in monitors if monitor.run_on_all_workers]
if max_steps is None:
max_steps = (start_step + steps) if steps else None
# Start monitors, can create graph parts.
for monitor in monitors:
monitor.begin(max_steps=max_steps)
supervisor = tf_supervisor.Supervisor(
graph,
init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT,
init_feed_dict=init_feed_dict,
is_chief=supervisor_is_chief,
logdir=output_dir,
saver=_make_saver(graph, keep_checkpoint_max),
global_step=global_step_tensor,
summary_op=None,
summary_writer=summary_writer,
save_model_secs=supervisor_save_model_secs,
init_fn=init_fn)
session = supervisor.PrepareSession(master=supervisor_master,
start_standard_services=True)
supervisor.StartQueueRunners(session)
with session:
get_current_step = lambda: session.run(global_step_tensor)
start_step = get_current_step()
last_step = start_step
last_log_step = start_step
loss_value = None
logging.info('Training steps [%d,%s)', last_step, 'inf'
if max_steps is None else str(max_steps))
excinfo = None
try:
while not supervisor.ShouldStop() and (
(max_steps is None) or (last_step < max_steps)):
start_time = time.time()
feed_dict = feed_fn() if feed_fn is not None else None
outputs, should_stop = _run_with_monitors(
session, last_step + 1, [train_op, loss_op], feed_dict, monitors)
loss_value = outputs[loss_op.name]
if np.isnan(loss_value):
failure_message = 'Model diverged with loss = NaN.'
if fail_on_nan_loss:
logging.error(failure_message)
raise monitors_lib.NanLossDuringTrainingError()
else:
logging.warning(failure_message)
if should_stop:
break
this_step = get_current_step()
if this_step <= last_step:
logging.error(
'Global step was not incremented by train op at step %s'
': new step %d', last_step, this_step)
last_step = this_step
is_last_step = (max_steps is not None) and (last_step >= max_steps)
if is_last_step or (last_step - last_log_step >= log_every_steps):
logging.info(
'training step %d, loss = %.5f (%.3f sec/batch).',
last_step, loss_value, float(time.time() - start_time))
last_log_step = last_step
except errors.OutOfRangeError as e:
logging.warn('Got exception during tf.learn training loop possibly '
'due to exhausted input queue %s.', e)
except StopIteration:
logging.info('Exhausted input iterarator.')
except BaseException as e: # pylint: disable=broad-except
# Hold on to any other exceptions while we try recording a final
# checkpoint and summary.
excinfo = sys.exc_info()
finally:
try:
# Call supervisor.Stop() from within a try block because it re-raises
# exceptions thrown by the supervised threads.
supervisor.Stop(close_summary_writer=False)
# Save one last checkpoint and summaries
# TODO(wicke): This should be handled by Supervisor
# In case we encountered an exception in the try block before we updated
# last_step, update it here (again).
last_step = get_current_step()
if supervisor_is_chief:
ckpt_path = supervisor.save_path
logging.info('Saving checkpoint for step %d to checkpoint: %s.',
last_step, ckpt_path)
supervisor.saver.save(session, ckpt_path, global_step=last_step)
# Finish monitors.
for monitor in monitors:
monitor.end()
# catch OutOfRangeError which is thrown when queue is out of data (and for
# other reasons as well).
except errors.OutOfRangeError as e:
logging.warn('OutOfRangeError in tf.learn final checkpoint possibly '
'due to exhausted input queue. Note: summary_op is not '
'expected to trigger dequeues. %s.', e)
except BaseException as e: # pylint: disable=broad-except
# If we don't already have an exception to re-raise, raise this one.
if not excinfo:
raise
# Otherwise, log this one and raise the other in the finally block.
logging.error('Got exception during tf.learn final checkpoint %s.', e)
finally:
if excinfo:
reraise(*excinfo)
return loss_value
def _get_first_op_from_collection(collection_name):
elements = ops.get_collection(collection_name)
if elements:
return elements[0]
return None
def _get_saver():
"""Lazy init and return saver."""
saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.global_variables():
saver = tf_saver.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
return saver
def _get_ready_op():
ready_op = _get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
return ready_op
def _get_local_init_op():
"""Returns the local init ops to initialize tables and local variables."""
local_init_op = _get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [
variables.local_variables_initializer(),
lookup_ops.tables_initializer()
]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
return local_init_op
def _eval_results_to_str(eval_results):
return ', '.join('%s = %s' % (k, v) for k, v in sorted(eval_results.items()))
def _write_summary_results(output_dir, eval_results, current_global_step):
"""Writes eval results into summary file in given dir."""
logging.info('Saving evaluation summary for step %d: %s', current_global_step,
_eval_results_to_str(eval_results))
summary_writer = get_summary_writer(output_dir)
summary = summary_pb2.Summary()
for key in eval_results:
if eval_results[key] is None:
continue
value = summary.value.add()
value.tag = key
if (isinstance(eval_results[key], np.float32) or
isinstance(eval_results[key], float)):
value.simple_value = float(eval_results[key])
else:
logging.warn('Skipping summary for %s, must be a float or np.float32.',
key)
summary_writer.add_summary(summary, current_global_step)
summary_writer.flush()
@_graph_action_deprecation
def evaluate(graph,
output_dir,
checkpoint_path,
eval_dict,
update_op=None,
global_step_tensor=None,
supervisor_master='',
log_every_steps=10,
feed_fn=None,
max_steps=None):
"""Evaluate a model loaded from a checkpoint.
Given `graph`, a directory to write summaries to (`output_dir`), a checkpoint
to restore variables from, and a `dict` of `Tensor`s to evaluate, run an eval
loop for `max_steps` steps, or until an exception (generally, an
end-of-input signal from a reader operation) is raised from running
`eval_dict`.
In each step of evaluation, all tensors in the `eval_dict` are evaluated, and
every `log_every_steps` steps, they are logged. At the very end of evaluation,
a summary is evaluated (finding the summary ops using `Supervisor`'s logic)
and written to `output_dir`.
Args:
graph: A `Graph` to train. It is expected that this graph is not in use
elsewhere.
output_dir: A string containing the directory to write a summary to.
checkpoint_path: A string containing the path to a checkpoint to restore.
Can be `None` if the graph doesn't require loading any variables.
eval_dict: A `dict` mapping string names to tensors to evaluate. It is
evaluated in every logging step. The result of the final evaluation is
returned. If `update_op` is None, then it's evaluated in every step. If
`max_steps` is `None`, this should depend on a reader that will raise an
end-of-input exception when the inputs are exhausted.
update_op: A `Tensor` which is run in every step.
global_step_tensor: A `Variable` containing the global step. If `None`,
one is extracted from the graph using the same logic as in `Supervisor`.
Used to place eval summaries on training curves.
supervisor_master: The master string to use when preparing the session.
log_every_steps: Integer. Output logs every `log_every_steps` evaluation
steps. The logs contain the `eval_dict` and timing information.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
max_steps: Integer. Evaluate `eval_dict` this many times.
Returns:
A tuple `(eval_results, global_step)`:
eval_results: A `dict` mapping `string` to numeric values (`int`, `float`)
that are the result of running eval_dict in the last step. `None` if no
eval steps were run.
global_step: The global step this evaluation corresponds to.
Raises:
ValueError: if `output_dir` is empty.
"""
if not output_dir:
raise ValueError('Output directory should be non-empty %s.' % output_dir)
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
# Create or get summary op, global_step and saver.
saver = _get_saver()
local_init_op = _get_local_init_op()
ready_for_local_init_op = _get_first_op_from_collection(
ops.GraphKeys.READY_FOR_LOCAL_INIT_OP)
ready_op = _get_ready_op()
session_manager = session_manager_lib.SessionManager(
local_init_op=local_init_op,
ready_op=ready_op,
ready_for_local_init_op=ready_for_local_init_op)
session, initialized = session_manager.recover_session(
master=supervisor_master,
saver=saver,
checkpoint_dir=checkpoint_path)
# Start queue runners.
coord = coordinator.Coordinator()
threads = queue_runner.start_queue_runners(session, coord)
with session:
if not initialized:
logging.warning('Failed to initialize from %s.', checkpoint_path)
# TODO(ipolosukhin): This should be failing, but old code relies on that.
session.run(variables.global_variables_initializer())
if checkpoint_path:
_restore_from_checkpoint(session, graph, checkpoint_path, saver)
current_global_step = session.run(global_step_tensor)
eval_results = None
# TODO(amodei): Fix this to run through the eval set exactly once.
step = 0
eval_step = None
feed_dict = None
logging.info('Eval steps [%d,%s) for training step %d.', step,
'inf' if max_steps is None
else str(max_steps), current_global_step)
try:
try:
while (max_steps is None) or (step < max_steps):
step += 1
start_time = time.time()
feed_dict = feed_fn() if feed_fn is not None else None
if update_op is not None:
session.run(update_op, feed_dict=feed_dict)
else:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
# TODO(wicke): We should assert that the global step hasn't changed.
if step % log_every_steps == 0:
if eval_step is None or step != eval_step:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
duration = time.time() - start_time
logging.info('Results after %d steps (%.3f sec/batch): %s.',
step, float(duration),
_eval_results_to_str(eval_results))
finally:
if eval_results is None or step != eval_step:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
# Stop session first, before queue runners.
session.close()
# Stop queue runners.
try:
coord.request_stop()
coord.join(threads, stop_grace_period_secs=120)
except (RuntimeError, errors.CancelledError) as e:
logging.warning('Coordinator didn\'t stop cleanly: %s', e)
# catch OutOfRangeError which is thrown when queue is out of data (and for
# other reasons as well).
except errors.OutOfRangeError as e:
if max_steps is None:
logging.info('Input queue is exhausted.')
else:
logging.warn('Input queue is exhausted: %s.', e)
# catch StopIteration which is thrown is DataReader is out of data.
except StopIteration as e:
if max_steps is None:
logging.info('Input iterator is exhausted.')
else:
logging.warn('Input iterator is exhausted: %s.', e)
# Save summaries for this evaluation.
_write_summary_results(output_dir, eval_results, current_global_step)
return eval_results, current_global_step
@_graph_action_deprecation
def run_n(output_dict, feed_dict=None, restore_checkpoint_path=None, n=1):
"""Run `output_dict` tensors `n` times, with the same `feed_dict` each run.
Args:
output_dict: A `dict` mapping string names to tensors to run. Must all be
from the same graph.
feed_dict: `dict` of input values to feed each run.
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
n: Number of times to repeat.
Returns:
A list of `n` `dict` objects, each containing values read from `output_dict`
tensors.
"""
return run_feeds(
output_dict=output_dict,
feed_dicts=itertools.repeat(feed_dict, n),
restore_checkpoint_path=restore_checkpoint_path)
@_graph_action_deprecation
def run_feeds_iter(output_dict, feed_dicts, restore_checkpoint_path=None):
"""Run `output_dict` tensors with each input in `feed_dicts`.
If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
init all variables.
Args:
output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dicts: Iterable of `dict` objects of input values to feed.
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
Yields:
A sequence of dicts of values read from `output_dict` tensors, one item
yielded for each item in `feed_dicts`. Keys are the same as `output_dict`,
values are the results read from the corresponding `Tensor` in
`output_dict`.
Raises:
ValueError: if `output_dict` or `feed_dicts` is None or empty.
"""
if not output_dict:
raise ValueError('output_dict is invalid: %s.' % output_dict)
if not feed_dicts:
raise ValueError('feed_dicts is invalid: %s.' % feed_dicts)
graph = contrib_ops.get_graph_from_inputs(output_dict.values())
with graph.as_default() as g:
with tf_session.Session('') as session:
session.run(
resources.initialize_resources(resources.shared_resources() +
resources.local_resources()))
if restore_checkpoint_path:
_restore_from_checkpoint(session, g, restore_checkpoint_path)
else:
session.run(variables.global_variables_initializer())
session.run(variables.local_variables_initializer())
session.run(lookup_ops.tables_initializer())
coord = coordinator.Coordinator()
threads = None
try:
threads = queue_runner.start_queue_runners(session, coord=coord)
for f in feed_dicts:
yield session.run(output_dict, f)
finally:
coord.request_stop()
if threads:
coord.join(threads, stop_grace_period_secs=120)
@_graph_action_deprecation
def run_feeds(*args, **kwargs):
"""See run_feeds_iter(). Returns a `list` instead of an iterator."""
return list(run_feeds_iter(*args, **kwargs))
@_graph_action_deprecation
def infer(restore_checkpoint_path, output_dict, feed_dict=None):
"""Restore graph from `restore_checkpoint_path` and run `output_dict` tensors.
If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
init all variables.
Args:
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dict: `dict` object mapping `Tensor` objects to input values to feed.
Returns:
Dict of values read from `output_dict` tensors. Keys are the same as
`output_dict`, values are the results read from the corresponding `Tensor`
in `output_dict`.
Raises:
ValueError: if `output_dict` or `feed_dicts` is None or empty.
"""
return run_feeds(output_dict=output_dict,
feed_dicts=[feed_dict] if feed_dict is not None else [None],
restore_checkpoint_path=restore_checkpoint_path)[0]
|
apache-2.0
|
ErykB2000/home-assistant
|
homeassistant/helpers/state.py
|
4
|
1685
|
"""
homeassistant.helpers.state
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Helpers that help with state related things.
"""
import logging
from homeassistant import State
import homeassistant.util.dt as dt_util
from homeassistant.const import (
STATE_ON, STATE_OFF, SERVICE_TURN_ON, SERVICE_TURN_OFF, ATTR_ENTITY_ID)
_LOGGER = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods, attribute-defined-outside-init
class TrackStates(object):
"""
Records the time when the with-block is entered. Will add all states
that have changed since the start time to the return list when with-block
is exited.
"""
def __init__(self, hass):
self.hass = hass
self.states = []
def __enter__(self):
self.now = dt_util.utcnow()
return self.states
def __exit__(self, exc_type, exc_value, traceback):
self.states.extend(self.hass.states.get_since(self.now))
def reproduce_state(hass, states, blocking=False):
""" Takes in a state and will try to have the entity reproduce it. """
if isinstance(states, State):
states = [states]
for state in states:
current_state = hass.states.get(state.entity_id)
if current_state is None:
continue
if state.state == STATE_ON:
service = SERVICE_TURN_ON
elif state.state == STATE_OFF:
service = SERVICE_TURN_OFF
else:
_LOGGER.warning("Unable to reproduce state for %s", state)
continue
service_data = dict(state.attributes)
service_data[ATTR_ENTITY_ID] = state.entity_id
hass.services.call(state.domain, service, service_data, blocking)
|
mit
|
seanfisk/buzzword-bingo-server
|
django/contrib/gis/tests/geogapp/tests.py
|
222
|
4080
|
"""
Tests for geography support in PostGIS 1.5+
"""
import os
from django.contrib.gis import gdal
from django.contrib.gis.measure import D
from django.test import TestCase
from models import City, County, Zipcode
class GeographyTest(TestCase):
def test01_fixture_load(self):
"Ensure geography features loaded properly."
self.assertEqual(8, City.objects.count())
def test02_distance_lookup(self):
"Testing GeoQuerySet distance lookup support on non-point geography fields."
z = Zipcode.objects.get(code='77002')
cities1 = list(City.objects
.filter(point__distance_lte=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
cities2 = list(City.objects
.filter(point__dwithin=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
for cities in [cities1, cities2]:
self.assertEqual(['Dallas', 'Houston', 'Oklahoma City'], cities)
def test03_distance_method(self):
"Testing GeoQuerySet.distance() support on non-point geography fields."
# `GeoQuerySet.distance` is not allowed geometry fields.
htown = City.objects.get(name='Houston')
qs = Zipcode.objects.distance(htown.point)
def test04_invalid_operators_functions(self):
"Ensuring exceptions are raised for operators & functions invalid on geography fields."
# Only a subset of the geometry functions & operator are available
# to PostGIS geography types. For more information, visit:
# http://postgis.refractions.net/documentation/manual-1.5/ch08.html#PostGIS_GeographyFunctions
z = Zipcode.objects.get(code='77002')
# ST_Within not available.
self.assertRaises(ValueError, City.objects.filter(point__within=z.poly).count)
# `@` operator not available.
self.assertRaises(ValueError, City.objects.filter(point__contained=z.poly).count)
# Regression test for #14060, `~=` was never really implemented for PostGIS.
htown = City.objects.get(name='Houston')
self.assertRaises(ValueError, City.objects.get, point__exact=htown.point)
def test05_geography_layermapping(self):
"Testing LayerMapping support on models with geography fields."
# There is a similar test in `layermap` that uses the same data set,
# but the County model here is a bit different.
if not gdal.HAS_GDAL: return
from django.contrib.gis.utils import LayerMapping
# Getting the shapefile and mapping dictionary.
shp_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'data'))
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
co_mapping = {'name' : 'Name',
'state' : 'State',
'mpoly' : 'MULTIPOLYGON',
}
# Reference county names, number of polygons, and state names.
names = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
num_polys = [1, 2, 1, 19, 1] # Number of polygons for each.
st_names = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269, unique='name')
lm.save(silent=True, strict=True)
for c, name, num_poly, state in zip(County.objects.order_by('name'), names, num_polys, st_names):
self.assertEqual(4326, c.mpoly.srid)
self.assertEqual(num_poly, len(c.mpoly))
self.assertEqual(name, c.name)
self.assertEqual(state, c.state)
def test06_geography_area(self):
"Testing that Area calculations work on geography columns."
from django.contrib.gis.measure import A
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
ref_area = 5439084.70637573
tol = 5
z = Zipcode.objects.area().get(code='77002')
self.assertAlmostEqual(z.area.sq_m, ref_area, tol)
|
bsd-3-clause
|
bennylope/garland
|
setup.py
|
1
|
1539
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open("garland.py", "r") as module_file:
for line in module_file:
if line.startswith("__version__"):
version_string = line.split("=")[1]
version = version_string.strip().replace("'", "")
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
test_requirements = [
'mock', # For older Python versions
]
setup(
name='garland',
version=version,
description='Python decorator mocking.',
long_description=readme + '\n\n' + history,
author='Ben Lopatin',
author_email='[email protected]',
url='https://github.com/bennylope/garland',
py_modules=['garland'],
include_package_data=True,
license="BSD",
zip_safe=False,
keywords='garland',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
|
bsd-3-clause
|
yujinrobot/transitions
|
transitions/core.py
|
1
|
20113
|
try:
from builtins import object
except ImportError:
# python2
pass
from functools import partial
from collections import defaultdict, OrderedDict
from six import string_types
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def listify(obj):
if obj is None:
return []
else:
return obj if isinstance(obj, (list, type(None))) else [obj]
class State(object):
def __init__(self, name, on_enter=None, on_exit=None,
ignore_invalid_triggers=False):
"""
Args:
name (string): The name of the state
on_enter (string, list): Optional callable(s) to trigger when a
state is entered. Can be either a string providing the name of
a callable, or a list of strings.
on_exit (string, list): Optional callable(s) to trigger when a
state is exited. Can be either a string providing the name of a
callable, or a list of strings.
ignore_invalid_triggers (Boolean): Optional flag to indicate if
unhandled/invalid triggers should raise an exception
"""
self.name = name
self.ignore_invalid_triggers = ignore_invalid_triggers
self.on_enter = listify(on_enter) if on_enter else []
self.on_exit = listify(on_exit) if on_exit else []
def enter(self, event_data):
""" Triggered when a state is entered. """
for oe in self.on_enter:
event_data.machine.callback(
getattr(event_data.model, oe), event_data)
logger.info("Entered state %s", self.name)
def exit(self, event_data):
""" Triggered when a state is exited. """
for oe in self.on_exit:
event_data.machine.callback(
getattr(event_data.model, oe), event_data)
logger.info("Exited state %s", self.name)
def add_callback(self, trigger, func):
""" Add a new enter or exit callback.
Args:
trigger (string): The type of triggering event. Must be one of
'enter' or 'exit'.
func (string): The name of the callback function.
"""
callback_list = getattr(self, 'on_' + trigger)
callback_list.append(func)
class Transition(object):
class Condition(object):
def __init__(self, func, target=True):
self.func = func
self.target = target
def check(self, model):
""" Check whether the condition passes.
Args:
model (object): the data model attached to the current Machine.
"""
return getattr(model, self.func)() == self.target
def __init__(self, source, dest, conditions=None, unless=None, before=None,
after=None):
"""
Args:
source (string): The name of the source State.
dest (string): The name of the destination State.
conditions (string, list): Condition(s) that must pass in order for
the transition to take place. Either a string providing the
name of a callable, or a list of callables. For the transition
to occur, ALL callables must return True.
unless (string, list): Condition(s) that must return False in order
for the transition to occur. Behaves just like conditions arg
otherwise.
before (string or list): callbacks to trigger before the
transition.
after (string or list): callbacks to trigger after the transition.
"""
self.source = source
self.dest = dest
self.before = [] if before is None else listify(before)
self.after = [] if after is None else listify(after)
self.conditions = []
if conditions is not None:
for c in listify(conditions):
self.conditions.append(self.Condition(c))
if unless is not None:
for u in listify(unless):
self.conditions.append(self.Condition(u, target=False))
def execute(self, event_data):
""" Execute the transition.
Args:
event: An instance of class EventData.
"""
logger.info("Initiating transition from state %s to state %s...",
self.source, self.dest)
machine = event_data.machine
for c in self.conditions:
if not c.check(event_data.model):
logger.info("Transition condition failed: %s() does not " +
"return %s. Transition halted.", c.func, c.target)
return False
for func in self.before:
machine.callback(getattr(event_data.model, func), event_data)
logger.info("Executing callback '%s' before transition.", func)
machine.get_state(self.source).exit(event_data)
machine.set_state(self.dest)
event_data.update()
machine.get_state(self.dest).enter(event_data)
for func in self.after:
machine.callback(getattr(event_data.model, func), event_data)
logger.info("Executed callback '%s' after transition.", func)
return True
def add_callback(self, trigger, func):
""" Add a new before or after callback.
Args:
trigger (string): The type of triggering event. Must be one of
'before' or 'after'.
func (string): The name of the callback function.
"""
callback_list = getattr(self, trigger)
callback_list.append(func)
class EventData(object):
def __init__(self, state, event, machine, model, *args, **kwargs):
"""
Args:
state (State): The State from which the Event was triggered.
event (Event): The triggering Event.
machine (Machine): The current Machine instance.
model (object): The model/object the machine is bound to.
args and kwargs: Optional positional or named arguments that will
be stored internally for possible later use.
"""
self.state = state
self.event = event
self.machine = machine
self.model = model
self.args = args
self.kwargs = kwargs
def update(self):
""" Updates the current State to accurately reflect the Machine. """
self.state = self.machine.current_state
class Event(object):
def __init__(self, name, machine):
"""
Args:
name (string): The name of the event, which is also the name of the
triggering callable (e.g., 'advance' implies an advance()
method).
machine (Machine): The current Machine instance.
"""
self.name = name
self.machine = machine
self.transitions = defaultdict(list)
def add_transition(self, transition):
""" Add a transition to the list of potential transitions.
Args:
transition (Transition): The Transition instance to add to the
list.
"""
self.transitions[transition.source].append(transition)
def trigger(self, *args, **kwargs):
""" Serially execute all transitions that match the current state,
halting as soon as one successfully completes.
Args:
args and kwargs: Optional positional or named arguments that will
be passed onto the EventData object, enabling arbitrary state
information to be passed on to downstream triggered functions.
"""
state_name = self.machine.current_state.name
if state_name not in self.transitions:
if not self.machine.current_state.ignore_invalid_triggers:
raise MachineError(
"Can't trigger event %s from state %s!" % (self.name,
state_name))
event = EventData(self.machine.current_state, self,
self.machine, self.machine.model, *args, **kwargs)
for t in self.transitions[state_name]:
if t.execute(event):
return True
return False
class Machine(object):
def __init__(self, model=None, states=None, initial=None, transitions=None,
send_event=False, auto_transitions=True,
ordered_transitions=False, ignore_invalid_triggers=None,
before_state_change=None, after_state_change=None):
"""
Args:
model (object): The object whose states we want to manage. If None,
the current Machine instance will be used the model (i.e., all
triggering events will be attached to the Machine itself).
states (list): A list of valid states. Each element can be either a
string or a State instance. If string, a new generic State
instance will be created that has the same name as the string.
initial (string): The initial state of the Machine.
transitions (list): An optional list of transitions. Each element
is a dictionary of named arguments to be passed onto the
Transition initializer.
send_event (boolean): When True, any arguments passed to trigger
methods will be wrapped in an EventData object, allowing
indirect and encapsulated access to data. When False, all
positional and keyword arguments will be passed directly to all
callback methods.
auto_transitions (boolean): When True (default), every state will
automatically have an associated to_{state}() convenience
trigger in the base model.
ordered_transitions (boolean): Convenience argument that calls
add_ordered_transitions() at the end of initialization if set
to True.
ignore_invalid_triggers: when True, any calls to trigger methods
that are not valid for the present state (e.g., calling an
a_to_b() trigger when the current state is c) will be silently
ignored rather than raising an invalid transition exception.
before_state_change: A callable called on every change state before
the transition happened. It receives the very same args as normal
callbacks
after_state_change: A callable called on every change state after
the transition happened. It receives the very same args as normal
callbacks
"""
self.model = self if model is None else model
self.states = OrderedDict()
self.events = {}
self.current_state = None
self.send_event = send_event
self.auto_transitions = auto_transitions
self.ignore_invalid_triggers = ignore_invalid_triggers
self.before_state_change = before_state_change
self.after_state_change = after_state_change
if initial is None:
self.add_states('initial')
initial = 'initial'
self._initial = initial
if states is not None:
self.add_states(states)
self.set_state(self._initial)
if transitions is not None:
transitions = listify(transitions)
for t in transitions:
if isinstance(t, list):
self.add_transition(*t)
else:
self.add_transition(**t)
if ordered_transitions:
self.add_ordered_transitions()
@property
def initial(self):
""" Return the initial state. """
return self._initial
def is_state(self, state):
""" Check whether the current state matches the named state. """
return self.current_state.name == state
def get_state(self, state):
""" Return the State instance with the passed name. """
if state not in self.states:
raise ValueError("State '%s' is not a registered state." % state)
return self.states[state]
def set_state(self, state):
""" Set the current state. """
if isinstance(state, string_types):
state = self.get_state(state)
self.current_state = state
self.model.state = self.current_state.name
def add_state(self, *args, **kwargs):
""" Alias for add_states. """
self.add_states(*args, **kwargs)
def add_states(self, states, on_enter=None, on_exit=None,
ignore_invalid_triggers=None):
""" Add new state(s).
Args:
state (list, string, dict, or State): a list, a State instance, the
name of a new state, or a dict with keywords to pass on to the
State initializer. If a list, each element can be of any of the
latter three types.
on_enter (string or list): callbacks to trigger when the state is
entered. Only valid if first argument is string.
on_exit (string or list): callbacks to trigger when the state is
exited. Only valid if first argument is string.
ignore_invalid_triggers: when True, any calls to trigger methods
that are not valid for the present state (e.g., calling an
a_to_b() trigger when the current state is c) will be silently
ignored rather than raising an invalid transition exception.
Note that this argument takes precedence over the same
argument defined at the Machine level, and is in turn
overridden by any ignore_invalid_triggers explicitly
passed in an individual state's initialization arguments.
"""
ignore = ignore_invalid_triggers
if ignore is None:
ignore = self.ignore_invalid_triggers
states = listify(states)
for state in states:
if isinstance(state, string_types):
state = State(
state, on_enter=on_enter, on_exit=on_exit,
ignore_invalid_triggers=ignore)
elif isinstance(state, dict):
if 'ignore_invalid_triggers' not in state:
state['ignore_invalid_triggers'] = ignore
state = State(**state)
self.states[state.name] = state
setattr(self.model, 'is_%s' %
state.name, partial(self.is_state, state.name))
state_name = state.name
if self != self.model and hasattr(
self.model, 'on_enter_' + state_name):
state.add_callback('enter', 'on_enter_' + state_name)
if self != self.model and hasattr(
self.model, 'on_exit_' + state_name):
state.add_callback('exit', 'on_exit_' + state_name)
# Add automatic transitions after all states have been created
if self.auto_transitions:
for s in self.states.keys():
self.add_transition('to_%s' % s, '*', s)
def add_transition(self, trigger, source, dest, conditions=None,
unless=None, before=None, after=None):
""" Create a new Transition instance and add it to the internal list.
Args:
trigger (string): The name of the method that will trigger the
transition. This will be attached to the currently specified
model (e.g., passing trigger='advance' will create a new
advance() method in the model that triggers the transition.)
source(string): The name of the source state--i.e., the state we
are transitioning away from.
dest (string): The name of the destination State--i.e., the state
we are transitioning into.
conditions (string or list): Condition(s) that must pass in order
for the transition to take place. Either a list providing the
name of a callable, or a list of callables. For the transition
to occur, ALL callables must return True.
unless (string, list): Condition(s) that must return False in order
for the transition to occur. Behaves just like conditions arg
otherwise.
before (string or list): Callables to call before the transition.
after (string or list): Callables to call after the transition.
"""
if trigger not in self.events:
self.events[trigger] = Event(trigger, self)
setattr(self.model, trigger, self.events[trigger].trigger)
if isinstance(source, string_types):
source = list(self.states.keys()) if source == '*' else [source]
if self.before_state_change:
before = listify(before) + listify(self.before_state_change)
if self.after_state_change:
after = listify(after) + listify(self.after_state_change)
for s in source:
t = Transition(s, dest, conditions, unless, before, after)
self.events[trigger].add_transition(t)
def add_ordered_transitions(self, states=None, trigger='next_state',
loop=True, loop_includes_initial=True):
""" Add a set of transitions that move linearly from state to state.
Args:
states (list): A list of state names defining the order of the
transitions. E.g., ['A', 'B', 'C'] will generate transitions
for A --> B, B --> C, and C --> A (if loop is True). If states
is None, all states in the current instance will be used.
trigger (string): The name of the trigger method that advances to
the next state in the sequence.
loop (boolean): Whether or not to add a transition from the last
state to the first state.
loop_includes_initial (boolean): If no initial state was defined in
the machine, setting this to True will cause the _initial state
placeholder to be included in the added transitions.
"""
if states is None:
states = list(self.states.keys()) # need to listify for Python3
if len(states) < 2:
raise MachineError("Can't create ordered transitions on a Machine "
"with fewer than 2 states.")
for i in range(1, len(states)):
self.add_transition(trigger, states[i - 1], states[i])
if loop:
if not loop_includes_initial:
states.remove(self._initial)
self.add_transition(trigger, states[-1], states[0])
def callback(self, func, event_data):
""" Trigger a callback function, possibly wrapping it in an EventData
instance.
Args:
func (callable): The callback function.
event_data (EventData): An EventData instance to pass to the
callback (if event sending is enabled) or to extract arguments
from (if event sending is disabled).
"""
if self.send_event:
func(event_data)
else:
func(*event_data.args, **event_data.kwargs)
def __getattr__(self, name):
terms = name.split('_')
if terms[0] in ['before', 'after']:
name = '_'.join(terms[1:])
if name not in self.events:
raise MachineError('Event "%s" is not registered.' % name)
return partial(self.events[name].add_callback, terms[0])
elif name.startswith('on_enter') or name.startswith('on_exit'):
state = self.get_state('_'.join(terms[2:]))
return partial(state.add_callback, terms[1])
class MachineError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
mit
|
Programmica/pygtk-tutorial
|
codebank/iconviewdual.py
|
1
|
2745
|
#!/usr/bin/env python
import gtk
class IconViewDual:
def item_dragged(self, iconview, context, selection, info, time):
model = iconview.get_model()
selected = iconview.get_selected_items()
files = []
for item in selected[0]:
location = model[item][1]
location = "file://" + location
files.append(location)
selection.set_uris(files)
def item_dropped(self, iconview, context, x, y, selection, info, time):
model = iconview.get_model()
dropinfo = iconview.get_dest_item_at_pos(x, y)
for location in selection.get_uris():
location = location.replace("file://", "", 1)
pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(location, 128, -1)
if dropinfo:
path, position = dropinfo
iter = model.get_iter(path)
if (position == gtk.ICON_VIEW_DROP_LEFT or position == gtk.ICON_VIEW_DROP_ABOVE):
model.insert_before(iter, [pixbuf, location])
else:
model.insert_after(iter, [pixbuf, location])
else:
model.append([pixbuf, location])
if context.action == gtk.gdk.ACTION_MOVE:
context.finish(True, True, time)
def __init__(self):
window = gtk.Window()
window.set_default_size(625, 220)
hbox = gtk.HBox(True, 2)
liststore1 = gtk.ListStore(gtk.gdk.Pixbuf, str)
iconview1 = gtk.IconView(liststore1)
iconview1.set_pixbuf_column(0)
iconview1.set_columns(-1)
liststore2 = gtk.ListStore(gtk.gdk.Pixbuf, str)
iconview2 = gtk.IconView(liststore2)
iconview2.set_pixbuf_column(0)
iconview2.set_columns(-1)
iconview1.enable_model_drag_source(gtk.gdk.BUTTON1_MASK, [("text/uri-list", 0, 0)], gtk.gdk.ACTION_MOVE)
iconview1.enable_model_drag_dest([("text/uri-list", 0, 1)], gtk.gdk.ACTION_MOVE)
iconview2.enable_model_drag_source(gtk.gdk.BUTTON1_MASK, [("text/uri-list", 0, 1)], gtk.gdk.ACTION_MOVE)
iconview2.enable_model_drag_dest([("text/uri-list", 0, 0)], gtk.gdk.ACTION_MOVE)
window.connect("destroy", lambda w: gtk.main_quit())
iconview1.connect("drag-data-get", self.item_dragged)
iconview1.connect("drag-data-received", self.item_dropped)
iconview2.connect("drag-data-get", self.item_dragged)
iconview2.connect("drag-data-received", self.item_dropped)
window.add(hbox)
hbox.pack_start(iconview1)
hbox.pack_end(iconview2)
window.show_all()
IconViewDual()
gtk.main()
|
cc0-1.0
|
ccastell/Transfer-System
|
Website/env/lib/python3.5/site-packages/django/db/migrations/recorder.py
|
478
|
2868
|
from __future__ import unicode_literals
from django.apps.registry import Apps
from django.db import models
from django.db.utils import DatabaseError
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from .exceptions import MigrationSchemaMissing
class MigrationRecorder(object):
"""
Deals with storing migration records in the database.
Because this table is actually itself used for dealing with model
creation, it's the one thing we can't do normally via migrations.
We manually handle table creation/schema updating (using schema backend)
and then have a floating model to do queries with.
If a migration is unapplied its row is removed from the table. Having
a row in the table always means a migration is applied.
"""
@python_2_unicode_compatible
class Migration(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField(default=now)
class Meta:
apps = Apps()
app_label = "migrations"
db_table = "django_migrations"
def __str__(self):
return "Migration %s for %s" % (self.name, self.app)
def __init__(self, connection):
self.connection = connection
@property
def migration_qs(self):
return self.Migration.objects.using(self.connection.alias)
def ensure_schema(self):
"""
Ensures the table exists and has the correct schema.
"""
# If the table's there, that's fine - we've never changed its schema
# in the codebase.
if self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor()):
return
# Make the table
try:
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
except DatabaseError as exc:
raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc)
def applied_migrations(self):
"""
Returns a set of (app, name) of applied migrations.
"""
self.ensure_schema()
return set(tuple(x) for x in self.migration_qs.values_list("app", "name"))
def record_applied(self, app, name):
"""
Records that a migration was applied.
"""
self.ensure_schema()
self.migration_qs.create(app=app, name=name)
def record_unapplied(self, app, name):
"""
Records that a migration was unapplied.
"""
self.ensure_schema()
self.migration_qs.filter(app=app, name=name).delete()
def flush(self):
"""
Deletes all migration records. Useful if you're testing migrations.
"""
self.migration_qs.all().delete()
|
apache-2.0
|
loranbriggs/barberlist
|
build/psycopg2/build/lib.linux-x86_64-2.7/psycopg2/tests/test_dates.py
|
8
|
22260
|
#!/usr/bin/env python
# test_dates.py - unit test for dates handling
#
# Copyright (C) 2008-2011 James Henstridge <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import math
import unittest
import psycopg2
from psycopg2.tz import FixedOffsetTimezone, ZERO
from testconfig import dsn
class CommonDatetimeTestsMixin:
def execute(self, *args):
self.curs.execute(*args)
return self.curs.fetchone()[0]
def test_parse_date(self):
value = self.DATE('2007-01-01', self.curs)
self.assert_(value is not None)
self.assertEqual(value.year, 2007)
self.assertEqual(value.month, 1)
self.assertEqual(value.day, 1)
def test_parse_null_date(self):
value = self.DATE(None, self.curs)
self.assertEqual(value, None)
def test_parse_incomplete_date(self):
self.assertRaises(psycopg2.DataError, self.DATE, '2007', self.curs)
self.assertRaises(psycopg2.DataError, self.DATE, '2007-01', self.curs)
def test_parse_time(self):
value = self.TIME('13:30:29', self.curs)
self.assert_(value is not None)
self.assertEqual(value.hour, 13)
self.assertEqual(value.minute, 30)
self.assertEqual(value.second, 29)
def test_parse_null_time(self):
value = self.TIME(None, self.curs)
self.assertEqual(value, None)
def test_parse_incomplete_time(self):
self.assertRaises(psycopg2.DataError, self.TIME, '13', self.curs)
self.assertRaises(psycopg2.DataError, self.TIME, '13:30', self.curs)
def test_parse_datetime(self):
value = self.DATETIME('2007-01-01 13:30:29', self.curs)
self.assert_(value is not None)
self.assertEqual(value.year, 2007)
self.assertEqual(value.month, 1)
self.assertEqual(value.day, 1)
self.assertEqual(value.hour, 13)
self.assertEqual(value.minute, 30)
self.assertEqual(value.second, 29)
def test_parse_null_datetime(self):
value = self.DATETIME(None, self.curs)
self.assertEqual(value, None)
def test_parse_incomplete_datetime(self):
self.assertRaises(psycopg2.DataError,
self.DATETIME, '2007', self.curs)
self.assertRaises(psycopg2.DataError,
self.DATETIME, '2007-01', self.curs)
self.assertRaises(psycopg2.DataError,
self.DATETIME, '2007-01-01 13', self.curs)
self.assertRaises(psycopg2.DataError,
self.DATETIME, '2007-01-01 13:30', self.curs)
def test_parse_null_interval(self):
value = self.INTERVAL(None, self.curs)
self.assertEqual(value, None)
class DatetimeTests(unittest.TestCase, CommonDatetimeTestsMixin):
"""Tests for the datetime based date handling in psycopg2."""
def setUp(self):
self.conn = psycopg2.connect(dsn)
self.curs = self.conn.cursor()
self.DATE = psycopg2.extensions.PYDATE
self.TIME = psycopg2.extensions.PYTIME
self.DATETIME = psycopg2.extensions.PYDATETIME
self.INTERVAL = psycopg2.extensions.PYINTERVAL
def tearDown(self):
self.conn.close()
def test_parse_bc_date(self):
# datetime does not support BC dates
self.assertRaises(ValueError, self.DATE, '00042-01-01 BC', self.curs)
def test_parse_bc_datetime(self):
# datetime does not support BC dates
self.assertRaises(ValueError, self.DATETIME,
'00042-01-01 13:30:29 BC', self.curs)
def test_parse_time_microseconds(self):
value = self.TIME('13:30:29.123456', self.curs)
self.assertEqual(value.second, 29)
self.assertEqual(value.microsecond, 123456)
def test_parse_datetime_microseconds(self):
value = self.DATETIME('2007-01-01 13:30:29.123456', self.curs)
self.assertEqual(value.second, 29)
self.assertEqual(value.microsecond, 123456)
def check_time_tz(self, str_offset, offset):
from datetime import time, timedelta
base = time(13, 30, 29)
base_str = '13:30:29'
value = self.TIME(base_str + str_offset, self.curs)
# Value has time zone info and correct UTC offset.
self.assertNotEqual(value.tzinfo, None),
self.assertEqual(value.utcoffset(), timedelta(seconds=offset))
# Time portion is correct.
self.assertEqual(value.replace(tzinfo=None), base)
def test_parse_time_timezone(self):
self.check_time_tz("+01", 3600)
self.check_time_tz("-01", -3600)
self.check_time_tz("+01:15", 4500)
self.check_time_tz("-01:15", -4500)
# The Python datetime module does not support time zone
# offsets that are not a whole number of minutes.
# We round the offset to the nearest minute.
self.check_time_tz("+01:15:00", 60 * (60 + 15))
self.check_time_tz("+01:15:29", 60 * (60 + 15))
self.check_time_tz("+01:15:30", 60 * (60 + 16))
self.check_time_tz("+01:15:59", 60 * (60 + 16))
self.check_time_tz("-01:15:00", -60 * (60 + 15))
self.check_time_tz("-01:15:29", -60 * (60 + 15))
self.check_time_tz("-01:15:30", -60 * (60 + 16))
self.check_time_tz("-01:15:59", -60 * (60 + 16))
def check_datetime_tz(self, str_offset, offset):
from datetime import datetime, timedelta
base = datetime(2007, 1, 1, 13, 30, 29)
base_str = '2007-01-01 13:30:29'
value = self.DATETIME(base_str + str_offset, self.curs)
# Value has time zone info and correct UTC offset.
self.assertNotEqual(value.tzinfo, None),
self.assertEqual(value.utcoffset(), timedelta(seconds=offset))
# Datetime is correct.
self.assertEqual(value.replace(tzinfo=None), base)
# Conversion to UTC produces the expected offset.
UTC = FixedOffsetTimezone(0, "UTC")
value_utc = value.astimezone(UTC).replace(tzinfo=None)
self.assertEqual(base - value_utc, timedelta(seconds=offset))
def test_parse_datetime_timezone(self):
self.check_datetime_tz("+01", 3600)
self.check_datetime_tz("-01", -3600)
self.check_datetime_tz("+01:15", 4500)
self.check_datetime_tz("-01:15", -4500)
# The Python datetime module does not support time zone
# offsets that are not a whole number of minutes.
# We round the offset to the nearest minute.
self.check_datetime_tz("+01:15:00", 60 * (60 + 15))
self.check_datetime_tz("+01:15:29", 60 * (60 + 15))
self.check_datetime_tz("+01:15:30", 60 * (60 + 16))
self.check_datetime_tz("+01:15:59", 60 * (60 + 16))
self.check_datetime_tz("-01:15:00", -60 * (60 + 15))
self.check_datetime_tz("-01:15:29", -60 * (60 + 15))
self.check_datetime_tz("-01:15:30", -60 * (60 + 16))
self.check_datetime_tz("-01:15:59", -60 * (60 + 16))
def test_parse_time_no_timezone(self):
self.assertEqual(self.TIME("13:30:29", self.curs).tzinfo, None)
self.assertEqual(self.TIME("13:30:29.123456", self.curs).tzinfo, None)
def test_parse_datetime_no_timezone(self):
self.assertEqual(
self.DATETIME("2007-01-01 13:30:29", self.curs).tzinfo, None)
self.assertEqual(
self.DATETIME("2007-01-01 13:30:29.123456", self.curs).tzinfo, None)
def test_parse_interval(self):
value = self.INTERVAL('42 days 12:34:56.123456', self.curs)
self.assertNotEqual(value, None)
self.assertEqual(value.days, 42)
self.assertEqual(value.seconds, 45296)
self.assertEqual(value.microseconds, 123456)
def test_parse_negative_interval(self):
value = self.INTERVAL('-42 days -12:34:56.123456', self.curs)
self.assertNotEqual(value, None)
self.assertEqual(value.days, -43)
self.assertEqual(value.seconds, 41103)
self.assertEqual(value.microseconds, 876544)
def test_adapt_date(self):
from datetime import date
value = self.execute('select (%s)::date::text',
[date(2007, 1, 1)])
self.assertEqual(value, '2007-01-01')
def test_adapt_time(self):
from datetime import time
value = self.execute('select (%s)::time::text',
[time(13, 30, 29)])
self.assertEqual(value, '13:30:29')
def test_adapt_datetime(self):
from datetime import datetime
value = self.execute('select (%s)::timestamp::text',
[datetime(2007, 1, 1, 13, 30, 29)])
self.assertEqual(value, '2007-01-01 13:30:29')
def test_adapt_timedelta(self):
from datetime import timedelta
value = self.execute('select extract(epoch from (%s)::interval)',
[timedelta(days=42, seconds=45296,
microseconds=123456)])
seconds = math.floor(value)
self.assertEqual(seconds, 3674096)
self.assertEqual(int(round((value - seconds) * 1000000)), 123456)
def test_adapt_megative_timedelta(self):
from datetime import timedelta
value = self.execute('select extract(epoch from (%s)::interval)',
[timedelta(days=-42, seconds=45296,
microseconds=123456)])
seconds = math.floor(value)
self.assertEqual(seconds, -3583504)
self.assertEqual(int(round((value - seconds) * 1000000)), 123456)
def _test_type_roundtrip(self, o1):
o2 = self.execute("select %s;", (o1,))
self.assertEqual(type(o1), type(o2))
return o2
def _test_type_roundtrip_array(self, o1):
o1 = [o1]
o2 = self.execute("select %s;", (o1,))
self.assertEqual(type(o1[0]), type(o2[0]))
def test_type_roundtrip_date(self):
from datetime import date
self._test_type_roundtrip(date(2010,5,3))
def test_type_roundtrip_datetime(self):
from datetime import datetime
dt = self._test_type_roundtrip(datetime(2010,5,3,10,20,30))
self.assertEqual(None, dt.tzinfo)
def test_type_roundtrip_datetimetz(self):
from datetime import datetime
import psycopg2.tz
tz = psycopg2.tz.FixedOffsetTimezone(8*60)
dt1 = datetime(2010,5,3,10,20,30, tzinfo=tz)
dt2 = self._test_type_roundtrip(dt1)
self.assertNotEqual(None, dt2.tzinfo)
self.assertEqual(dt1, dt2)
def test_type_roundtrip_time(self):
from datetime import time
self._test_type_roundtrip(time(10,20,30))
def test_type_roundtrip_interval(self):
from datetime import timedelta
self._test_type_roundtrip(timedelta(seconds=30))
def test_type_roundtrip_date_array(self):
from datetime import date
self._test_type_roundtrip_array(date(2010,5,3))
def test_type_roundtrip_datetime_array(self):
from datetime import datetime
self._test_type_roundtrip_array(datetime(2010,5,3,10,20,30))
def test_type_roundtrip_time_array(self):
from datetime import time
self._test_type_roundtrip_array(time(10,20,30))
def test_type_roundtrip_interval_array(self):
from datetime import timedelta
self._test_type_roundtrip_array(timedelta(seconds=30))
# Only run the datetime tests if psycopg was compiled with support.
if not hasattr(psycopg2.extensions, 'PYDATETIME'):
del DatetimeTests
class mxDateTimeTests(unittest.TestCase, CommonDatetimeTestsMixin):
"""Tests for the mx.DateTime based date handling in psycopg2."""
def setUp(self):
self.conn = psycopg2.connect(dsn)
self.curs = self.conn.cursor()
self.DATE = psycopg2._psycopg.MXDATE
self.TIME = psycopg2._psycopg.MXTIME
self.DATETIME = psycopg2._psycopg.MXDATETIME
self.INTERVAL = psycopg2._psycopg.MXINTERVAL
psycopg2.extensions.register_type(self.DATE, self.conn)
psycopg2.extensions.register_type(self.TIME, self.conn)
psycopg2.extensions.register_type(self.DATETIME, self.conn)
psycopg2.extensions.register_type(self.INTERVAL, self.conn)
psycopg2.extensions.register_type(psycopg2.extensions.MXDATEARRAY, self.conn)
psycopg2.extensions.register_type(psycopg2.extensions.MXTIMEARRAY, self.conn)
psycopg2.extensions.register_type(psycopg2.extensions.MXDATETIMEARRAY, self.conn)
psycopg2.extensions.register_type(psycopg2.extensions.MXINTERVALARRAY, self.conn)
def tearDown(self):
self.conn.close()
def test_parse_bc_date(self):
value = self.DATE('00042-01-01 BC', self.curs)
self.assert_(value is not None)
# mx.DateTime numbers BC dates from 0 rather than 1.
self.assertEqual(value.year, -41)
self.assertEqual(value.month, 1)
self.assertEqual(value.day, 1)
def test_parse_bc_datetime(self):
value = self.DATETIME('00042-01-01 13:30:29 BC', self.curs)
self.assert_(value is not None)
# mx.DateTime numbers BC dates from 0 rather than 1.
self.assertEqual(value.year, -41)
self.assertEqual(value.month, 1)
self.assertEqual(value.day, 1)
self.assertEqual(value.hour, 13)
self.assertEqual(value.minute, 30)
self.assertEqual(value.second, 29)
def test_parse_time_microseconds(self):
value = self.TIME('13:30:29.123456', self.curs)
self.assertEqual(math.floor(value.second), 29)
self.assertEqual(
int((value.second - math.floor(value.second)) * 1000000), 123456)
def test_parse_datetime_microseconds(self):
value = self.DATETIME('2007-01-01 13:30:29.123456', self.curs)
self.assertEqual(math.floor(value.second), 29)
self.assertEqual(
int((value.second - math.floor(value.second)) * 1000000), 123456)
def test_parse_time_timezone(self):
# Time zone information is ignored.
from mx.DateTime import Time
expected = Time(13, 30, 29)
self.assertEqual(expected, self.TIME("13:30:29+01", self.curs))
self.assertEqual(expected, self.TIME("13:30:29-01", self.curs))
self.assertEqual(expected, self.TIME("13:30:29+01:15", self.curs))
self.assertEqual(expected, self.TIME("13:30:29-01:15", self.curs))
self.assertEqual(expected, self.TIME("13:30:29+01:15:42", self.curs))
self.assertEqual(expected, self.TIME("13:30:29-01:15:42", self.curs))
def test_parse_datetime_timezone(self):
# Time zone information is ignored.
from mx.DateTime import DateTime
expected = DateTime(2007, 1, 1, 13, 30, 29)
self.assertEqual(
expected, self.DATETIME("2007-01-01 13:30:29+01", self.curs))
self.assertEqual(
expected, self.DATETIME("2007-01-01 13:30:29-01", self.curs))
self.assertEqual(
expected, self.DATETIME("2007-01-01 13:30:29+01:15", self.curs))
self.assertEqual(
expected, self.DATETIME("2007-01-01 13:30:29-01:15", self.curs))
self.assertEqual(
expected, self.DATETIME("2007-01-01 13:30:29+01:15:42", self.curs))
self.assertEqual(
expected, self.DATETIME("2007-01-01 13:30:29-01:15:42", self.curs))
def test_parse_interval(self):
value = self.INTERVAL('42 days 05:50:05', self.curs)
self.assert_(value is not None)
self.assertEqual(value.day, 42)
self.assertEqual(value.hour, 5)
self.assertEqual(value.minute, 50)
self.assertEqual(value.second, 5)
def test_adapt_time(self):
from mx.DateTime import Time
value = self.execute('select (%s)::time::text',
[Time(13, 30, 29)])
self.assertEqual(value, '13:30:29')
def test_adapt_datetime(self):
from mx.DateTime import DateTime
value = self.execute('select (%s)::timestamp::text',
[DateTime(2007, 1, 1, 13, 30, 29.123456)])
self.assertEqual(value, '2007-01-01 13:30:29.123456')
def test_adapt_bc_datetime(self):
from mx.DateTime import DateTime
value = self.execute('select (%s)::timestamp::text',
[DateTime(-41, 1, 1, 13, 30, 29.123456)])
# microsecs for BC timestamps look not available in PG < 8.4
# but more likely it's determined at compile time.
self.assert_(value in (
'0042-01-01 13:30:29.123456 BC',
'0042-01-01 13:30:29 BC'), value)
def test_adapt_timedelta(self):
from mx.DateTime import DateTimeDeltaFrom
value = self.execute('select extract(epoch from (%s)::interval)',
[DateTimeDeltaFrom(days=42,
seconds=45296.123456)])
seconds = math.floor(value)
self.assertEqual(seconds, 3674096)
self.assertEqual(int(round((value - seconds) * 1000000)), 123456)
def test_adapt_megative_timedelta(self):
from mx.DateTime import DateTimeDeltaFrom
value = self.execute('select extract(epoch from (%s)::interval)',
[DateTimeDeltaFrom(days=-42,
seconds=45296.123456)])
seconds = math.floor(value)
self.assertEqual(seconds, -3583504)
self.assertEqual(int(round((value - seconds) * 1000000)), 123456)
def _test_type_roundtrip(self, o1):
o2 = self.execute("select %s;", (o1,))
self.assertEqual(type(o1), type(o2))
def _test_type_roundtrip_array(self, o1):
o1 = [o1]
o2 = self.execute("select %s;", (o1,))
self.assertEqual(type(o1[0]), type(o2[0]))
def test_type_roundtrip_date(self):
from mx.DateTime import Date
self._test_type_roundtrip(Date(2010,5,3))
def test_type_roundtrip_datetime(self):
from mx.DateTime import DateTime
self._test_type_roundtrip(DateTime(2010,5,3,10,20,30))
def test_type_roundtrip_time(self):
from mx.DateTime import Time
self._test_type_roundtrip(Time(10,20,30))
def test_type_roundtrip_interval(self):
from mx.DateTime import DateTimeDeltaFrom
self._test_type_roundtrip(DateTimeDeltaFrom(seconds=30))
def test_type_roundtrip_date_array(self):
from mx.DateTime import Date
self._test_type_roundtrip_array(Date(2010,5,3))
def test_type_roundtrip_datetime_array(self):
from mx.DateTime import DateTime
self._test_type_roundtrip_array(DateTime(2010,5,3,10,20,30))
def test_type_roundtrip_time_array(self):
from mx.DateTime import Time
self._test_type_roundtrip_array(Time(10,20,30))
def test_type_roundtrip_interval_array(self):
from mx.DateTime import DateTimeDeltaFrom
self._test_type_roundtrip_array(DateTimeDeltaFrom(seconds=30))
# Only run the mx.DateTime tests if psycopg was compiled with support.
try:
if not hasattr(psycopg2._psycopg, 'MXDATETIME'):
del mxDateTimeTests
except AttributeError:
del mxDateTimeTests
class FromTicksTestCase(unittest.TestCase):
# bug "TimestampFromTicks() throws ValueError (2-2.0.14)"
# reported by Jozsef Szalay on 2010-05-06
def test_timestamp_value_error_sec_59_99(self):
from datetime import datetime
s = psycopg2.TimestampFromTicks(1273173119.99992)
self.assertEqual(s.adapted,
datetime(2010, 5, 6, 14, 11, 59, 999920,
tzinfo=FixedOffsetTimezone(-5 * 60)))
def test_date_value_error_sec_59_99(self):
from datetime import date
s = psycopg2.DateFromTicks(1273173119.99992)
self.assertEqual(s.adapted, date(2010, 5, 6))
def test_time_value_error_sec_59_99(self):
from datetime import time
s = psycopg2.TimeFromTicks(1273173119.99992)
self.assertEqual(s.adapted.replace(hour=0),
time(0, 11, 59, 999920))
class FixedOffsetTimezoneTests(unittest.TestCase):
def test_init_with_no_args(self):
tzinfo = FixedOffsetTimezone()
self.assert_(tzinfo._offset is ZERO)
self.assert_(tzinfo._name is None)
def test_repr_with_positive_offset(self):
tzinfo = FixedOffsetTimezone(5 * 60)
self.assertEqual(repr(tzinfo), "psycopg2.tz.FixedOffsetTimezone(offset=300, name=None)")
def test_repr_with_negative_offset(self):
tzinfo = FixedOffsetTimezone(-5 * 60)
self.assertEqual(repr(tzinfo), "psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None)")
def test_repr_with_name(self):
tzinfo = FixedOffsetTimezone(name="FOO")
self.assertEqual(repr(tzinfo), "psycopg2.tz.FixedOffsetTimezone(offset=0, name='FOO')")
def test_instance_caching(self):
self.assert_(FixedOffsetTimezone(name="FOO") is FixedOffsetTimezone(name="FOO"))
self.assert_(FixedOffsetTimezone(7 * 60) is FixedOffsetTimezone(7 * 60))
self.assert_(FixedOffsetTimezone(-9 * 60, 'FOO') is FixedOffsetTimezone(-9 * 60, 'FOO'))
self.assert_(FixedOffsetTimezone(9 * 60) is not FixedOffsetTimezone(9 * 60, 'FOO'))
self.assert_(FixedOffsetTimezone(name='FOO') is not FixedOffsetTimezone(9 * 60, 'FOO'))
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
mit
|
Xeralux/tensorflow
|
tensorflow/python/debug/lib/source_utils_test.py
|
89
|
13687
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for source_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import source_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import tf_inspect
def line_number_above():
return tf_inspect.stack()[1][2] - 1
class GuessIsTensorFlowLibraryTest(test_util.TensorFlowTestCase):
def setUp(self):
self.curr_file_path = os.path.normpath(os.path.abspath(__file__))
def tearDown(self):
ops.reset_default_graph()
def testGuessedBaseDirIsProbablyCorrect(self):
self.assertEqual("tensorflow",
os.path.basename(source_utils._TENSORFLOW_BASEDIR))
def testUnitTestFileReturnsFalse(self):
self.assertFalse(
source_utils.guess_is_tensorflow_py_library(self.curr_file_path))
def testSourceUtilModuleReturnsTrue(self):
self.assertTrue(
source_utils.guess_is_tensorflow_py_library(source_utils.__file__))
def testFileInPythonKernelsPathReturnsTrue(self):
x = constant_op.constant(42.0, name="x")
self.assertTrue(
source_utils.guess_is_tensorflow_py_library(x.op.traceback[-1][0]))
def testNonPythonFileRaisesException(self):
with self.assertRaisesRegexp(ValueError, r"is not a Python source file"):
source_utils.guess_is_tensorflow_py_library(
os.path.join(os.path.dirname(self.curr_file_path), "foo.cc"))
class SourceHelperTest(test_util.TensorFlowTestCase):
def createAndRunGraphHelper(self):
"""Create and run a TensorFlow Graph to generate debug dumps.
This is intentionally done in separate method, to make it easier to test
the stack-top mode of source annotation.
"""
self.dump_root = self.get_temp_dir()
self.curr_file_path = os.path.abspath(
tf_inspect.getfile(tf_inspect.currentframe()))
# Run a simple TF graph to generate some debug dumps that can be used in
# source annotation.
with session.Session() as sess:
self.u_init = constant_op.constant(
np.array([[5.0, 3.0], [-1.0, 0.0]]), shape=[2, 2], name="u_init")
self.u_init_line_number = line_number_above()
self.u = variables.Variable(self.u_init, name="u")
self.u_line_number = line_number_above()
self.v_init = constant_op.constant(
np.array([[2.0], [-1.0]]), shape=[2, 1], name="v_init")
self.v_init_line_number = line_number_above()
self.v = variables.Variable(self.v_init, name="v")
self.v_line_number = line_number_above()
self.w = math_ops.matmul(self.u, self.v, name="w")
self.w_line_number = line_number_above()
sess.run(self.u.initializer)
sess.run(self.v.initializer)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=["file://%s" % self.dump_root])
run_metadata = config_pb2.RunMetadata()
sess.run(self.w, options=run_options, run_metadata=run_metadata)
self.dump = debug_data.DebugDumpDir(
self.dump_root, partition_graphs=run_metadata.partition_graphs)
self.dump.set_python_graph(sess.graph)
def setUp(self):
self.createAndRunGraphHelper()
self.helper_line_number = line_number_above()
def tearDown(self):
if os.path.isdir(self.dump_root):
shutil.rmtree(self.dump_root)
ops.reset_default_graph()
def testAnnotateWholeValidSourceFileGivesCorrectResult(self):
source_annotation = source_utils.annotate_source(self.dump,
self.curr_file_path)
self.assertIn(self.u_init.op.name,
source_annotation[self.u_init_line_number])
self.assertIn(self.u.op.name, source_annotation[self.u_line_number])
self.assertIn(self.v_init.op.name,
source_annotation[self.v_init_line_number])
self.assertIn(self.v.op.name, source_annotation[self.v_line_number])
self.assertIn(self.w.op.name, source_annotation[self.w_line_number])
# In the non-stack-top (default) mode, the helper line should be annotated
# with all the ops as well.
self.assertIn(self.u_init.op.name,
source_annotation[self.helper_line_number])
self.assertIn(self.u.op.name, source_annotation[self.helper_line_number])
self.assertIn(self.v_init.op.name,
source_annotation[self.helper_line_number])
self.assertIn(self.v.op.name, source_annotation[self.helper_line_number])
self.assertIn(self.w.op.name, source_annotation[self.helper_line_number])
def testAnnotateWithStackTopGivesCorrectResult(self):
source_annotation = source_utils.annotate_source(
self.dump, self.curr_file_path, file_stack_top=True)
self.assertIn(self.u_init.op.name,
source_annotation[self.u_init_line_number])
self.assertIn(self.u.op.name, source_annotation[self.u_line_number])
self.assertIn(self.v_init.op.name,
source_annotation[self.v_init_line_number])
self.assertIn(self.v.op.name, source_annotation[self.v_line_number])
self.assertIn(self.w.op.name, source_annotation[self.w_line_number])
# In the stack-top mode, the helper line should not have been annotated.
self.assertNotIn(self.helper_line_number, source_annotation)
def testAnnotateSubsetOfLinesGivesCorrectResult(self):
source_annotation = source_utils.annotate_source(
self.dump,
self.curr_file_path,
min_line=self.u_line_number,
max_line=self.u_line_number + 1)
self.assertIn(self.u.op.name, source_annotation[self.u_line_number])
self.assertNotIn(self.v_line_number, source_annotation)
def testAnnotateDumpedTensorsGivesCorrectResult(self):
source_annotation = source_utils.annotate_source(
self.dump, self.curr_file_path, do_dumped_tensors=True)
# Note: Constant Tensors u_init and v_init may not get dumped due to
# constant-folding.
self.assertIn(self.u.name, source_annotation[self.u_line_number])
self.assertIn(self.v.name, source_annotation[self.v_line_number])
self.assertIn(self.w.name, source_annotation[self.w_line_number])
self.assertNotIn(self.u.op.name, source_annotation[self.u_line_number])
self.assertNotIn(self.v.op.name, source_annotation[self.v_line_number])
self.assertNotIn(self.w.op.name, source_annotation[self.w_line_number])
self.assertIn(self.u.name, source_annotation[self.helper_line_number])
self.assertIn(self.v.name, source_annotation[self.helper_line_number])
self.assertIn(self.w.name, source_annotation[self.helper_line_number])
def testCallingAnnotateSourceWithoutPythonGraphRaisesException(self):
self.dump.set_python_graph(None)
with self.assertRaises(ValueError):
source_utils.annotate_source(self.dump, self.curr_file_path)
def testCallingAnnotateSourceOnUnrelatedSourceFileDoesNotError(self):
# Create an unrelated source file.
unrelated_source_path = tempfile.mktemp()
with open(unrelated_source_path, "wt") as source_file:
source_file.write("print('hello, world')\n")
self.assertEqual({},
source_utils.annotate_source(self.dump,
unrelated_source_path))
# Clean up unrelated source file.
os.remove(unrelated_source_path)
class ListSourceAgainstDumpTest(test_util.TensorFlowTestCase):
def createAndRunGraphWithWhileLoop(self):
"""Create and run a TensorFlow Graph with a while loop to generate dumps."""
self.dump_root = self.get_temp_dir()
self.curr_file_path = os.path.abspath(
tf_inspect.getfile(tf_inspect.currentframe()))
# Run a simple TF graph to generate some debug dumps that can be used in
# source annotation.
with session.Session() as sess:
loop_body = lambda i: math_ops.add(i, 2)
self.traceback_first_line = line_number_above()
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=["file://%s" % self.dump_root])
run_metadata = config_pb2.RunMetadata()
sess.run(loop, options=run_options, run_metadata=run_metadata)
self.dump = debug_data.DebugDumpDir(
self.dump_root, partition_graphs=run_metadata.partition_graphs)
self.dump.set_python_graph(sess.graph)
def setUp(self):
self.createAndRunGraphWithWhileLoop()
def tearDown(self):
if os.path.isdir(self.dump_root):
shutil.rmtree(self.dump_root)
ops.reset_default_graph()
def testGenerateSourceList(self):
source_list = source_utils.list_source_files_against_dump(self.dump)
# Assert that the file paths are sorted and unique.
file_paths = [item[0] for item in source_list]
self.assertEqual(sorted(file_paths), file_paths)
self.assertEqual(len(set(file_paths)), len(file_paths))
# Assert that each item of source_list has length 6.
for item in source_list:
self.assertTrue(isinstance(item, tuple))
self.assertEqual(6, len(item))
# The while loop body should have executed 3 times. The following table
# lists the tensors and how many times each of them is dumped.
# Tensor name # of times dumped:
# i:0 1
# while/Enter:0 1
# while/Merge:0 4
# while/Merge:1 4
# while/Less/y:0 4
# while/Less:0 4
# while/LoopCond:0 4
# while/Switch:0 1
# while/Swtich:1 3
# while/Identity:0 3
# while/Add/y:0 3
# while/Add:0 3
# while/NextIteration:0 3
# while/Exit:0 1
# ----------------------------
# (Total) 39
#
# The total number of nodes is 12.
# The total number of tensors is 14 (2 of the nodes have 2 outputs:
# while/Merge, while/Switch).
_, is_tf_py_library, num_nodes, num_tensors, num_dumps, first_line = (
source_list[file_paths.index(self.curr_file_path)])
self.assertFalse(is_tf_py_library)
self.assertEqual(12, num_nodes)
self.assertEqual(14, num_tensors)
self.assertEqual(39, num_dumps)
self.assertEqual(self.traceback_first_line, first_line)
def testGenerateSourceListWithNodeNameFilter(self):
source_list = source_utils.list_source_files_against_dump(
self.dump, node_name_regex_whitelist=r"while/Add.*")
# Assert that the file paths are sorted.
file_paths = [item[0] for item in source_list]
self.assertEqual(sorted(file_paths), file_paths)
self.assertEqual(len(set(file_paths)), len(file_paths))
# Assert that each item of source_list has length 4.
for item in source_list:
self.assertTrue(isinstance(item, tuple))
self.assertEqual(6, len(item))
# Due to the node-name filtering the result should only contain 2 nodes
# and 2 tensors. The total number of dumped tensors should be 6:
# while/Add/y:0 3
# while/Add:0 3
_, is_tf_py_library, num_nodes, num_tensors, num_dumps, _ = (
source_list[file_paths.index(self.curr_file_path)])
self.assertFalse(is_tf_py_library)
self.assertEqual(2, num_nodes)
self.assertEqual(2, num_tensors)
self.assertEqual(6, num_dumps)
def testGenerateSourceListWithPathRegexFilter(self):
curr_file_basename = os.path.basename(self.curr_file_path)
source_list = source_utils.list_source_files_against_dump(
self.dump,
path_regex_whitelist=(
".*" + curr_file_basename.replace(".", "\\.") + "$"))
self.assertEqual(1, len(source_list))
(file_path, is_tf_py_library, num_nodes, num_tensors, num_dumps,
first_line) = source_list[0]
self.assertEqual(self.curr_file_path, file_path)
self.assertFalse(is_tf_py_library)
self.assertEqual(12, num_nodes)
self.assertEqual(14, num_tensors)
self.assertEqual(39, num_dumps)
self.assertEqual(self.traceback_first_line, first_line)
if __name__ == "__main__":
googletest.main()
|
apache-2.0
|
nhynes/neon
|
examples/mnist_branch.py
|
2
|
3877
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Example that trains a small multi-layer perceptron with multiple branches
Branch nodes are used to indicate points at which different layer sequences diverge
The topology of the network is:
cost1 cost3
| /
m_l4 b2_l2
| /
| ___b2_l1
|/
m_l3 cost2
| /
m_l2 b1_l2
| /
| ___b1_l1
|/
|
m_l1
|
|
data
"""
import logging
from neon.callbacks.callbacks import Callbacks
from neon.data import DataIterator, load_mnist
from neon.initializers import Gaussian
from neon.layers import GeneralizedCost, Affine, BranchNode, Multicost, Tree
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, Logistic, Misclassification, Softmax
from neon.transforms import CrossEntropyBinary, CrossEntropyMulti
from neon.util.argparser import NeonArgparser
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
# load up the mnist data set
# split into train and tests sets
(X_train, y_train), (X_test, y_test), nclass = load_mnist(path=args.data_dir)
# setup a training set iterator
train_set = DataIterator(X_train, y_train, nclass=nclass)
# setup a validation data set iterator
valid_set = DataIterator(X_test, y_test, nclass=nclass)
# setup weight initialization function
init_norm = Gaussian(loc=0.0, scale=0.01)
normrelu = dict(init=init_norm, activation=Rectlin())
normsigm = dict(init=init_norm, activation=Logistic(shortcut=True))
normsoft = dict(init=init_norm, activation=Softmax())
# setup model layers
b1 = BranchNode(name="b1")
b2 = BranchNode(name="b2")
p1 = [Affine(nout=100, linear_name="m_l1", **normrelu),
b1,
Affine(nout=32, linear_name="m_l2", **normrelu),
Affine(nout=16, linear_name="m_l3", **normrelu),
b2,
Affine(nout=10, linear_name="m_l4", **normsoft)]
p2 = [b1,
Affine(nout=16, linear_name="b1_l1", **normrelu),
Affine(nout=10, linear_name="b1_l2", **normsigm)]
p3 = [b2,
Affine(nout=16, linear_name="b2_l1", **normrelu),
Affine(nout=10, linear_name="b2_l2", **normsigm)]
# setup cost function as CrossEntropy
cost = Multicost(costs=[GeneralizedCost(costfunc=CrossEntropyMulti()),
GeneralizedCost(costfunc=CrossEntropyBinary()),
GeneralizedCost(costfunc=CrossEntropyBinary())],
weights=[1, 0., 0.])
# setup optimizer
optimizer = GradientDescentMomentum(0.1, momentum_coef=0.9, stochastic_round=args.rounding)
# initialize model object
alphas = [1, 0.25, 0.25]
mlp = Model(layers=Tree([p1, p2, p3], alphas=alphas))
# setup standard fit callbacks
callbacks = Callbacks(mlp, train_set, eval_set=valid_set, **args.callback_args)
# run fit
mlp.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
logging.getLogger('neon').info("Misclassification error = %.1f%%",
(mlp.eval(valid_set, metric=Misclassification())*100))
print('Misclassification error = %.1f%%' % (mlp.eval(valid_set, metric=Misclassification())*100))
|
apache-2.0
|
jhendrixMSFT/coreclr
|
src/scripts/genXplatEventing.py
|
28
|
30394
|
#
## Licensed to the .NET Foundation under one or more agreements.
## The .NET Foundation licenses this file to you under the MIT license.
## See the LICENSE file in the project root for more information.
#
#
#USAGE:
#Add Events: modify <root>src/vm/ClrEtwAll.man
#Look at the Code in <root>/src/scripts/genXplatLttng.py for using subroutines in this file
#
# Python 2 compatibility
from __future__ import print_function
import os
import xml.dom.minidom as DOM
stdprolog="""
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
/******************************************************************
DO NOT MODIFY. AUTOGENERATED FILE.
This file is generated using the logic from <root>/src/scripts/genXplatEventing.py
******************************************************************/
"""
stdprolog_cmake="""
#
#
#******************************************************************
#DO NOT MODIFY. AUTOGENERATED FILE.
#This file is generated using the logic from <root>/src/scripts/genXplatEventing.py
#******************************************************************
"""
lindent = " ";
palDataTypeMapping ={
#constructed types
"win:null" :" ",
"win:Int64" :"const __int64",
"win:ULong" :"const ULONG",
"win:count" :"*",
"win:Struct" :"const void",
#actual spec
"win:GUID" :"const GUID",
"win:AnsiString" :"LPCSTR",
"win:UnicodeString" :"PCWSTR",
"win:Double" :"const double",
"win:Int32" :"const signed int",
"win:Boolean" :"const BOOL",
"win:UInt64" :"const unsigned __int64",
"win:UInt32" :"const unsigned int",
"win:UInt16" :"const unsigned short",
"win:UInt8" :"const unsigned char",
"win:Pointer" :"const void*",
"win:Binary" :"const BYTE"
}
# A Template represents an ETW template can contain 1 or more AbstractTemplates
# The AbstractTemplate contains FunctionSignature
# FunctionSignature consist of FunctionParameter representing each parameter in it's signature
def getParamSequenceSize(paramSequence, estimate):
total = 0
pointers = 0
for param in paramSequence:
if param == "win:Int64":
total += 8
elif param == "win:ULong":
total += 4
elif param == "GUID":
total += 16
elif param == "win:Double":
total += 8
elif param == "win:Int32":
total += 4
elif param == "win:Boolean":
total += 4
elif param == "win:UInt64":
total += 8
elif param == "win:UInt32":
total += 4
elif param == "win:UInt16":
total += 2
elif param == "win:UInt8":
total += 1
elif param == "win:Pointer":
if estimate:
total += 8
else:
pointers += 1
elif param == "win:Binary":
total += 1
elif estimate:
if param == "win:AnsiString":
total += 32
elif param == "win:UnicodeString":
total += 64
elif param == "win:Struct":
total += 32
else:
raise Exception("Don't know size for " + param)
if estimate:
return total
return total, pointers
class Template:
def __repr__(self):
return "<Template " + self.name + ">"
def __init__(self, templateName, fnPrototypes, dependencies, structSizes, arrays):
self.name = templateName
self.signature = FunctionSignature()
self.structs = structSizes
self.arrays = arrays
for variable in fnPrototypes.paramlist:
for dependency in dependencies[variable]:
if not self.signature.getParam(dependency):
self.signature.append(dependency, fnPrototypes.getParam(dependency))
def getFnParam(self, name):
return self.signature.getParam(name)
@property
def num_params(self):
return len(self.signature.paramlist)
@property
def estimated_size(self):
total = getParamSequenceSize((self.getFnParam(paramName).winType for paramName in self.signature.paramlist), True)
if total < 32:
total = 32
elif total > 1024:
total = 1024
return total
class FunctionSignature:
def __repr__(self):
return ", ".join(self.paramlist)
def __init__(self):
self.LUT = {} # dictionary of FunctionParameter
self.paramlist = [] # list of parameters to maintain their order in signature
def append(self,variable,fnparam):
self.LUT[variable] = fnparam
self.paramlist.append(variable)
def getParam(self,variable):
return self.LUT.get(variable)
def getLength(self):
return len(self.paramlist)
class FunctionParameter:
def __repr__(self):
return self.name
def __init__(self,winType,name,count,prop):
self.winType = winType #ETW type as given in the manifest
self.name = name #parameter name as given in the manifest
self.prop = prop #any special property as determined by the manifest and developer
#self.count #indicates if the parameter is a pointer
if count == "win:null":
self.count = "win:null"
elif count or winType == "win:GUID" or count == "win:count":
#special case for GUIDS, consider them as structs
self.count = "win:count"
else:
self.count = "win:null"
def getTopLevelElementsByTagName(node,tag):
dataNodes = []
for element in node.getElementsByTagName(tag):
if element.parentNode == node:
dataNodes.append(element)
return dataNodes
ignoredXmlTemplateAttribes = frozenset(["map","outType"])
usedXmlTemplateAttribes = frozenset(["name","inType","count", "length"])
def parseTemplateNodes(templateNodes):
#return values
allTemplates = {}
for templateNode in templateNodes:
structCounts = {}
arrays = {}
templateName = templateNode.getAttribute('tid')
var_Dependecies = {}
fnPrototypes = FunctionSignature()
dataNodes = getTopLevelElementsByTagName(templateNode,'data')
# Validate that no new attributes has been added to manifest
for dataNode in dataNodes:
nodeMap = dataNode.attributes
for attrib in nodeMap.values():
attrib_name = attrib.name
if attrib_name not in ignoredXmlTemplateAttribes and attrib_name not in usedXmlTemplateAttribes:
raise ValueError('unknown attribute: '+ attrib_name + ' in template:'+ templateName)
for dataNode in dataNodes:
variable = dataNode.getAttribute('name')
wintype = dataNode.getAttribute('inType')
#count and length are the same
wincount = dataNode.getAttribute('count')
winlength = dataNode.getAttribute('length');
var_Props = None
var_dependency = [variable]
if winlength:
if wincount:
raise Exception("both count and length property found on: " + variable + "in template: " + templateName)
wincount = winlength
if (wincount.isdigit() and int(wincount) ==1):
wincount = ''
if wincount:
if (wincount.isdigit()):
var_Props = wincount
elif fnPrototypes.getParam(wincount):
var_Props = wincount
var_dependency.insert(0, wincount)
arrays[variable] = wincount
#construct the function signature
if wintype == "win:GUID":
var_Props = "sizeof(GUID)/sizeof(int)"
var_Dependecies[variable] = var_dependency
fnparam = FunctionParameter(wintype,variable,wincount,var_Props)
fnPrototypes.append(variable,fnparam)
structNodes = getTopLevelElementsByTagName(templateNode,'struct')
for structToBeMarshalled in structNodes:
structName = structToBeMarshalled.getAttribute('name')
countVarName = structToBeMarshalled.getAttribute('count')
assert(countVarName == "Count")
assert(countVarName in fnPrototypes.paramlist)
if not countVarName:
raise ValueError("Struct '%s' in template '%s' does not have an attribute count." % (structName, templateName))
names = [x.attributes['name'].value for x in structToBeMarshalled.getElementsByTagName("data")]
types = [x.attributes['inType'].value for x in structToBeMarshalled.getElementsByTagName("data")]
structCounts[structName] = countVarName
var_Dependecies[structName] = [countVarName, structName]
fnparam_pointer = FunctionParameter("win:Struct", structName, "win:count", countVarName)
fnPrototypes.append(structName, fnparam_pointer)
allTemplates[templateName] = Template(templateName, fnPrototypes, var_Dependecies, structCounts, arrays)
return allTemplates
def generateClrallEvents(eventNodes,allTemplates):
clrallEvents = []
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
#generate EventEnabled
clrallEvents.append("inline BOOL EventEnabled")
clrallEvents.append(eventName)
clrallEvents.append("() {return XplatEventLogger::IsEventLoggingEnabled() && EventXplatEnabled")
clrallEvents.append(eventName+"();}\n\n")
#generate FireEtw functions
fnptype = []
fnbody = []
fnptype.append("inline ULONG FireEtw")
fnptype.append(eventName)
fnptype.append("(\n")
fnbody.append(lindent)
fnbody.append("if (!EventEnabled")
fnbody.append(eventName)
fnbody.append("()) {return ERROR_SUCCESS;}\n")
line = []
fnptypeline = []
if templateName:
template = allTemplates[templateName]
fnSig = template.signature
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
wintypeName = fnparam.winType
typewName = palDataTypeMapping[wintypeName]
winCount = fnparam.count
countw = palDataTypeMapping[winCount]
if params in template.structs:
fnptypeline.append("%sint %s_ElementSize,\n" % (lindent, params))
fnptypeline.append(lindent)
fnptypeline.append(typewName)
fnptypeline.append(countw)
fnptypeline.append(" ")
fnptypeline.append(fnparam.name)
fnptypeline.append(",\n")
#fnsignature
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
if params in template.structs:
line.append(fnparam.name + "_ElementSize")
line.append(", ")
line.append(fnparam.name)
line.append(",")
#remove trailing commas
if len(line) > 0:
del line[-1]
if len(fnptypeline) > 0:
del fnptypeline[-1]
fnptype.extend(fnptypeline)
fnptype.append("\n)\n{\n")
fnbody.append(lindent)
fnbody.append("return FireEtXplat")
fnbody.append(eventName)
fnbody.append("(")
fnbody.extend(line)
fnbody.append(");\n")
fnbody.append("}\n\n")
clrallEvents.extend(fnptype)
clrallEvents.extend(fnbody)
return ''.join(clrallEvents)
def generateClrXplatEvents(eventNodes, allTemplates):
clrallEvents = []
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
#generate EventEnabled
clrallEvents.append("extern \"C\" BOOL EventXplatEnabled")
clrallEvents.append(eventName)
clrallEvents.append("();\n")
#generate FireEtw functions
fnptype = []
fnptypeline = []
fnptype.append("extern \"C\" ULONG FireEtXplat")
fnptype.append(eventName)
fnptype.append("(\n")
if templateName:
template = allTemplates[templateName]
fnSig = template.signature
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
wintypeName = fnparam.winType
typewName = palDataTypeMapping[wintypeName]
winCount = fnparam.count
countw = palDataTypeMapping[winCount]
if params in template.structs:
fnptypeline.append("%sint %s_ElementSize,\n" % (lindent, params))
fnptypeline.append(lindent)
fnptypeline.append(typewName)
fnptypeline.append(countw)
fnptypeline.append(" ")
fnptypeline.append(fnparam.name)
fnptypeline.append(",\n")
#remove trailing commas
if len(fnptypeline) > 0:
del fnptypeline[-1]
fnptype.extend(fnptypeline)
fnptype.append("\n);\n")
clrallEvents.extend(fnptype)
return ''.join(clrallEvents)
#generates the dummy header file which is used by the VM as entry point to the logging Functions
def generateclrEtwDummy(eventNodes,allTemplates):
clretmEvents = []
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
fnptype = []
#generate FireEtw functions
fnptype.append("#define FireEtw")
fnptype.append(eventName)
fnptype.append("(");
line = []
if templateName:
template = allTemplates[templateName]
fnSig = template.signature
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
if params in template.structs:
line.append(fnparam.name + "_ElementSize")
line.append(", ")
line.append(fnparam.name)
line.append(", ")
#remove trailing commas
if len(line) > 0:
del line[-1]
fnptype.extend(line)
fnptype.append(") 0\n")
clretmEvents.extend(fnptype)
return ''.join(clretmEvents)
def generateClralltestEvents(sClrEtwAllMan):
tree = DOM.parse(sClrEtwAllMan)
clrtestEvents = []
for providerNode in tree.getElementsByTagName('provider'):
templateNodes = providerNode.getElementsByTagName('template')
allTemplates = parseTemplateNodes(templateNodes)
eventNodes = providerNode.getElementsByTagName('event')
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
clrtestEvents.append(" EventXplatEnabled" + eventName + "();\n")
clrtestEvents.append("Error |= FireEtXplat" + eventName + "(\n")
line =[]
if templateName:
template = allTemplates[templateName]
fnSig = template.signature
for params in fnSig.paramlist:
if params in template.structs:
line.append("sizeof(Struct1),\n")
argline =''
fnparam = fnSig.getParam(params)
if fnparam.name.lower() == 'count':
argline = '2'
else:
if fnparam.winType == "win:Binary":
argline = 'win_Binary'
elif fnparam.winType == "win:Pointer" and fnparam.count == "win:count":
argline = "(const void**)&var11"
elif fnparam.winType == "win:Pointer" :
argline = "(const void*)var11"
elif fnparam.winType =="win:AnsiString":
argline = '" Testing AniString "'
elif fnparam.winType =="win:UnicodeString":
argline = 'W(" Testing UnicodeString ")'
else:
if fnparam.count == "win:count":
line.append("&")
argline = fnparam.winType.replace(":","_")
line.append(argline)
line.append(",\n")
#remove trailing commas
if len(line) > 0:
del line[-1]
line.append("\n")
line.append(");\n")
clrtestEvents.extend(line)
return ''.join(clrtestEvents)
def generateSanityTest(sClrEtwAllMan,testDir):
if not testDir:
return
print('Generating Event Logging Tests')
if not os.path.exists(testDir):
os.makedirs(testDir)
cmake_file = testDir + "/CMakeLists.txt"
test_cpp = "clralltestevents.cpp"
testinfo = testDir + "/testinfo.dat"
Cmake_file = open(cmake_file,'w')
Test_cpp = open(testDir + "/" + test_cpp,'w')
Testinfo = open(testinfo,'w')
#CMake File:
Cmake_file.write(stdprolog_cmake)
Cmake_file.write("""
cmake_minimum_required(VERSION 2.8.12.2)
set(CMAKE_INCLUDE_CURRENT_DIR ON)
set(SOURCES
""")
Cmake_file.write(test_cpp)
Cmake_file.write("""
)
include_directories(${GENERATED_INCLUDE_DIR})
include_directories(${COREPAL_SOURCE_DIR}/inc/rt)
add_executable(eventprovidertest
${SOURCES}
)
set(EVENT_PROVIDER_DEPENDENCIES "")
set(EVENT_PROVIDER_LINKER_OTPTIONS "")
if(FEATURE_EVENT_TRACE)
add_definitions(-DFEATURE_EVENT_TRACE=1)
list(APPEND EVENT_PROVIDER_DEPENDENCIES
coreclrtraceptprovider
eventprovider
)
list(APPEND EVENT_PROVIDER_LINKER_OTPTIONS
${EVENT_PROVIDER_DEPENDENCIES}
)
endif(FEATURE_EVENT_TRACE)
add_dependencies(eventprovidertest ${EVENT_PROVIDER_DEPENDENCIES} coreclrpal)
target_link_libraries(eventprovidertest
coreclrpal
${EVENT_PROVIDER_LINKER_OTPTIONS}
)
""")
Testinfo.write("""
Copyright (c) Microsoft Corporation. All rights reserved.
#
Version = 1.0
Section = EventProvider
Function = EventProvider
Name = PAL test for FireEtW* and EventEnabled* functions
TYPE = DEFAULT
EXE1 = eventprovidertest
Description
=This is a sanity test to check that there are no crashes in Xplat eventing
""")
#Test.cpp
Test_cpp.write(stdprolog)
Test_cpp.write("""
/*=====================================================================
**
** Source: clralltestevents.cpp
**
** Purpose: Ensure Correctness of Eventing code
**
**
**===================================================================*/
#include <palsuite.h>
#include <clrxplatevents.h>
typedef struct _Struct1 {
ULONG Data1;
unsigned short Data2;
unsigned short Data3;
unsigned char Data4[8];
} Struct1;
Struct1 var21[2] = { { 245, 13, 14, "deadbea" }, { 542, 0, 14, "deadflu" } };
Struct1* var11 = var21;
Struct1* win_Struct = var21;
GUID win_GUID ={ 245, 13, 14, "deadbea" };
double win_Double =34.04;
ULONG win_ULong = 34;
BOOL win_Boolean = FALSE;
unsigned __int64 win_UInt64 = 114;
unsigned int win_UInt32 = 4;
unsigned short win_UInt16 = 12;
unsigned char win_UInt8 = 9;
int win_Int32 = 12;
BYTE* win_Binary =(BYTE*)var21 ;
int __cdecl main(int argc, char **argv)
{
/* Initialize the PAL.
*/
if(0 != PAL_Initialize(argc, argv))
{
return FAIL;
}
ULONG Error = ERROR_SUCCESS;
#if defined(FEATURE_EVENT_TRACE)
Trace("\\n Starting functional eventing APIs tests \\n");
""")
Test_cpp.write(generateClralltestEvents(sClrEtwAllMan))
Test_cpp.write("""
/* Shutdown the PAL.
*/
if (Error != ERROR_SUCCESS)
{
Fail("One or more eventing Apis failed\\n ");
return FAIL;
}
Trace("\\n All eventing APIs were fired succesfully \\n");
#endif //defined(FEATURE_EVENT_TRACE)
PAL_Terminate();
return PASS;
}
""")
Cmake_file.close()
Test_cpp.close()
Testinfo.close()
def generateEtmDummyHeader(sClrEtwAllMan,clretwdummy):
if not clretwdummy:
return
print(' Generating Dummy Event Headers')
tree = DOM.parse(sClrEtwAllMan)
incDir = os.path.dirname(os.path.realpath(clretwdummy))
if not os.path.exists(incDir):
os.makedirs(incDir)
Clretwdummy = open(clretwdummy,'w')
Clretwdummy.write(stdprolog + "\n")
for providerNode in tree.getElementsByTagName('provider'):
templateNodes = providerNode.getElementsByTagName('template')
allTemplates = parseTemplateNodes(templateNodes)
eventNodes = providerNode.getElementsByTagName('event')
#pal: create etmdummy.h
Clretwdummy.write(generateclrEtwDummy(eventNodes, allTemplates) + "\n")
Clretwdummy.close()
def generatePlformIndependentFiles(sClrEtwAllMan,incDir,etmDummyFile):
generateEtmDummyHeader(sClrEtwAllMan,etmDummyFile)
tree = DOM.parse(sClrEtwAllMan)
if not incDir:
return
print(' Generating Event Headers')
if not os.path.exists(incDir):
os.makedirs(incDir)
clrallevents = incDir + "/clretwallmain.h"
clrxplatevents = incDir + "/clrxplatevents.h"
Clrallevents = open(clrallevents,'w')
Clrxplatevents = open(clrxplatevents,'w')
Clrallevents.write(stdprolog + "\n")
Clrxplatevents.write(stdprolog + "\n")
Clrallevents.write("\n#include \"clrxplatevents.h\"\n\n")
for providerNode in tree.getElementsByTagName('provider'):
templateNodes = providerNode.getElementsByTagName('template')
allTemplates = parseTemplateNodes(templateNodes)
eventNodes = providerNode.getElementsByTagName('event')
#vm header:
Clrallevents.write(generateClrallEvents(eventNodes, allTemplates) + "\n")
#pal: create clrallevents.h
Clrxplatevents.write(generateClrXplatEvents(eventNodes, allTemplates) + "\n")
Clrxplatevents.close()
Clrallevents.close()
class EventExclusions:
def __init__(self):
self.nostack = set()
self.explicitstack = set()
self.noclrinstance = set()
def parseExclusionList(exclusionListFile):
ExclusionFile = open(exclusionListFile,'r')
exclusionInfo = EventExclusions()
for line in ExclusionFile:
line = line.strip()
#remove comments
if not line or line.startswith('#'):
continue
tokens = line.split(':')
#entries starting with nomac are ignored
if "nomac" in tokens:
continue
if len(tokens) > 5:
raise Exception("Invalid Entry " + line + "in "+ exclusionListFile)
eventProvider = tokens[2]
eventTask = tokens[1]
eventSymbol = tokens[4]
if eventProvider == '':
eventProvider = "*"
if eventTask == '':
eventTask = "*"
if eventSymbol == '':
eventSymbol = "*"
entry = eventProvider + ":" + eventTask + ":" + eventSymbol
if tokens[0].lower() == "nostack":
exclusionInfo.nostack.add(entry)
if tokens[0].lower() == "stack":
exclusionInfo.explicitstack.add(entry)
if tokens[0].lower() == "noclrinstanceid":
exclusionInfo.noclrinstance.add(entry)
ExclusionFile.close()
return exclusionInfo
def getStackWalkBit(eventProvider, taskName, eventSymbol, stackSet):
for entry in stackSet:
tokens = entry.split(':')
if len(tokens) != 3:
raise Exception("Error, possible error in the script which introduced the enrty "+ entry)
eventCond = tokens[0] == eventProvider or tokens[0] == "*"
taskCond = tokens[1] == taskName or tokens[1] == "*"
symbolCond = tokens[2] == eventSymbol or tokens[2] == "*"
if eventCond and taskCond and symbolCond:
return False
return True
#Add the miscelaneous checks here
def checkConsistency(sClrEtwAllMan,exclusionListFile):
tree = DOM.parse(sClrEtwAllMan)
exclusionInfo = parseExclusionList(exclusionListFile)
for providerNode in tree.getElementsByTagName('provider'):
stackSupportSpecified = {}
eventNodes = providerNode.getElementsByTagName('event')
templateNodes = providerNode.getElementsByTagName('template')
eventProvider = providerNode.getAttribute('name')
allTemplates = parseTemplateNodes(templateNodes)
for eventNode in eventNodes:
taskName = eventNode.getAttribute('task')
eventSymbol = eventNode.getAttribute('symbol')
eventTemplate = eventNode.getAttribute('template')
eventValue = int(eventNode.getAttribute('value'))
clrInstanceBit = getStackWalkBit(eventProvider, taskName, eventSymbol, exclusionInfo.noclrinstance)
sLookupFieldName = "ClrInstanceID"
sLookupFieldType = "win:UInt16"
if clrInstanceBit and allTemplates.get(eventTemplate):
# check for the event template and look for a field named ClrInstanceId of type win:UInt16
fnParam = allTemplates[eventTemplate].getFnParam(sLookupFieldName)
if not(fnParam and fnParam.winType == sLookupFieldType):
raise Exception(exclusionListFile + ":No " + sLookupFieldName + " field of type " + sLookupFieldType + " for event symbol " + eventSymbol)
# If some versions of an event are on the nostack/stack lists,
# and some versions are not on either the nostack or stack list,
# then developer likely forgot to specify one of the versions
eventStackBitFromNoStackList = getStackWalkBit(eventProvider, taskName, eventSymbol, exclusionInfo.nostack)
eventStackBitFromExplicitStackList = getStackWalkBit(eventProvider, taskName, eventSymbol, exclusionInfo.explicitstack)
sStackSpecificityError = exclusionListFile + ": Error processing event :" + eventSymbol + "(ID" + str(eventValue) + "): This file must contain either ALL versions of this event or NO versions of this event. Currently some, but not all, versions of this event are present\n"
if not stackSupportSpecified.get(eventValue):
# Haven't checked this event before. Remember whether a preference is stated
if ( not eventStackBitFromNoStackList) or ( not eventStackBitFromExplicitStackList):
stackSupportSpecified[eventValue] = True
else:
stackSupportSpecified[eventValue] = False
else:
# We've checked this event before.
if stackSupportSpecified[eventValue]:
# When we last checked, a preference was previously specified, so it better be specified here
if eventStackBitFromNoStackList and eventStackBitFromExplicitStackList:
raise Exception(sStackSpecificityError)
else:
# When we last checked, a preference was not previously specified, so it better not be specified here
if ( not eventStackBitFromNoStackList) or ( not eventStackBitFromExplicitStackList):
raise Exception(sStackSpecificityError)
import argparse
import sys
def main(argv):
#parse the command line
parser = argparse.ArgumentParser(description="Generates the Code required to instrument LTTtng logging mechanism")
required = parser.add_argument_group('required arguments')
required.add_argument('--man', type=str, required=True,
help='full path to manifest containig the description of events')
required.add_argument('--exc', type=str, required=True,
help='full path to exclusion list')
required.add_argument('--inc', type=str, default=None,
help='full path to directory where the header files will be generated')
required.add_argument('--dummy', type=str,default=None,
help='full path to file that will have dummy definitions of FireEtw functions')
required.add_argument('--testdir', type=str, default=None,
help='full path to directory where the test assets will be deployed' )
args, unknown = parser.parse_known_args(argv)
if unknown:
print('Unknown argument(s): ', ', '.join(unknown))
return const.UnknownArguments
sClrEtwAllMan = args.man
exclusionListFile = args.exc
incdir = args.inc
etmDummyFile = args.dummy
testDir = args.testdir
checkConsistency(sClrEtwAllMan, exclusionListFile)
generatePlformIndependentFiles(sClrEtwAllMan,incdir,etmDummyFile)
generateSanityTest(sClrEtwAllMan,testDir)
if __name__ == '__main__':
return_code = main(sys.argv[1:])
sys.exit(return_code)
|
mit
|
ashishnitinpatil/django_appengine_project_template
|
django/utils/ipv6.py
|
113
|
7965
|
# This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. http://code.google.com/p/ipaddr-py/
# Licensed under the Apache License, Version 2.0 (the "License").
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.utils.six.moves import xrange
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message=_("This is not a valid IPv6 address.")):
"""
Cleans a IPv6 address string.
Validity is checked by calling is_valid_ipv6_address() - if an
invalid address is passed, ValidationError is raised.
Replaces the longest continious zero-sequence with "::" and
removes leading zeroes and makes sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: A error message for in the ValidationError.
Returns:
A compressed IPv6 address, or the same value
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
if not is_valid_ipv6_address(ip_str):
raise ValidationError(error_message, code='invalid')
# This algorithm can only handle fully exploded
# IP strings
ip_str = _explode_shorthand_ip_string(ip_str)
ip_str = _sanitize_ipv4_mapping(ip_str)
# If needed, unpack the IPv4 and return straight away
# - no need in running the rest of the algorithm
if unpack_ipv4:
ipv4_unpacked = _unpack_ipv4(ip_str)
if ipv4_unpacked:
return ipv4_unpacked
hextets = ip_str.split(":")
for index in range(len(hextets)):
# Remove leading zeroes
hextets[index] = hextets[index].lstrip('0')
if not hextets[index]:
hextets[index] = '0'
# Determine best hextet to compress
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
# Compress the most suitable hextet
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
result = ":".join(hextets)
return result.lower()
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in a expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
return ip_str.rsplit(':', 1)[1]
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
from django.core.validators import validate_ipv4_address
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
validate_ipv4_address(hextet)
except ValidationError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
new_ip = []
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for _ in xrange(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if any(len(x) < 4 for x in ip_str.split(':')):
return True
return False
|
bsd-2-clause
|
frenos/wireshark
|
tools/rdps.py
|
42
|
4450
|
#!/usr/bin/env python
#
# rdps.py
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
'''\
takes the file listed as the first argument and creates the file listed
as the second argument. It takes a PostScript file and creates a C source
with 2 functions:
print_ps_preamble()
print_ps_finale()
Ported to Python from rdps.c.
'''
import sys
import os.path
def ps_clean_string(raw_str):
ps_str = ''
for c in raw_str:
if c == '\\':
ps_str += '\\\\'
elif c == '%':
ps_str += '%%'
elif c == '\n':
ps_str += '\\n'
else:
ps_str += c
return ps_str
def start_code(fd, func):
script_name = os.path.split(__file__)[-1]
fd.write("void print_ps_%s(FILE *fd) {\n" % func)
def write_code(fd, raw_str):
ps_str = ps_clean_string(raw_str)
fd.write("\tfprintf(fd, \"%s\");\n" % ps_str)
def end_code(fd):
fd.write("}\n\n\n")
def exit_err(msg=None, *param):
if msg is not None:
sys.stderr.write(msg % param)
sys.exit(1)
# Globals
STATE_NULL = 'null'
STATE_PREAMBLE = 'preamble'
STATE_FINALE = 'finale'
def main():
state = STATE_NULL;
if len(sys.argv) != 3:
exit_err("%s: input_file output_file\n", __file__)
input = open(sys.argv[1], 'r')
output = open(sys.argv[2], 'w')
script_name = os.path.split(__file__)[-1]
output.write('''\
/* DO NOT EDIT
*
* Created by %s.
*
* ps.c
* Definitions for generating PostScript(R) packet output.
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <[email protected]>
* Copyright 1998 Gerald Combs
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <stdio.h>
#include "ps.h"
''' % script_name)
for line in input:
#line = line.rstrip()
if state is STATE_NULL:
if line.startswith("% ---- wireshark preamble start ---- %"):
state = STATE_PREAMBLE
start_code(output, "preamble")
continue
elif line.startswith("% ---- wireshark finale start ---- %"):
state = STATE_FINALE
start_code(output, "finale")
continue
elif state is STATE_PREAMBLE:
if line.startswith("% ---- wireshark preamble end ---- %"):
state = STATE_NULL
end_code(output)
continue
else:
write_code(output, line)
elif state is STATE_FINALE:
if line.startswith("% ---- wireshark finale end ---- %"):
state = STATE_NULL
end_code(output)
continue
else:
write_code(output, line)
else:
exit_err("NO MATCH:%s", line)
sys.exit(0)
if __name__ == "__main__":
main()
#
# Editor modelines - http://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
#
|
gpl-2.0
|
XiaodunServerGroup/xiaodun-platform
|
lms/djangoapps/instructor/management/commands/openended_stats.py
|
20
|
5270
|
"""
Command to get statistics about open ended problems.
"""
import csv
import time
from django.core.management.base import BaseCommand
from optparse import make_option
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from courseware.courses import get_course
from courseware.models import StudentModule
from student.models import anonymous_id_for_user, CourseEnrollment
from instructor.utils import get_module_for_student
class Command(BaseCommand):
"""
Command to get statistics about open ended problems.
"""
help = "Usage: openended_stats <course_id> <problem_location> --task-number=<task_number>\n"
option_list = BaseCommand.option_list + (
make_option('--task-number',
type='int', default=0,
help="Task number to get statistics about."),
)
def handle(self, *args, **options):
"""Handler for command."""
task_number = options['task_number']
if len(args) == 2:
course_id = args[0]
location = args[1]
else:
print self.help
return
try:
course = get_course(course_id)
except ValueError as err:
print err
return
descriptor = modulestore().get_instance(course.id, location, depth=0)
if descriptor is None:
print "Location {0} not found in course".format(location)
return
try:
enrolled_students = CourseEnrollment.users_enrolled_in(course_id)
print "Total students enrolled in {0}: {1}".format(course_id, enrolled_students.count())
calculate_task_statistics(enrolled_students, course, location, task_number)
except KeyboardInterrupt:
print "\nOperation Cancelled"
def calculate_task_statistics(students, course, location, task_number, write_to_file=True):
"""Print stats of students."""
stats = {
OpenEndedChild.INITIAL: 0,
OpenEndedChild.ASSESSING: 0,
OpenEndedChild.POST_ASSESSMENT: 0,
OpenEndedChild.DONE: 0
}
students_with_saved_answers = []
students_with_ungraded_submissions = [] # pylint: disable=invalid-name
students_with_graded_submissions = [] # pylint: disable=invalid-name
students_with_no_state = []
student_modules = StudentModule.objects.filter(module_state_key=location, student__in=students).order_by('student')
print "Total student modules: {0}".format(student_modules.count())
for index, student_module in enumerate(student_modules):
if index % 100 == 0:
print "--- {0} students processed ---".format(index)
student = student_module.student
print "{0}:{1}".format(student.id, student.username)
module = get_module_for_student(student, course, location)
if module is None:
print " WARNING: No state found"
students_with_no_state.append(student)
continue
latest_task = module.child_module.get_task_number(task_number)
if latest_task is None:
print " No task state found"
students_with_no_state.append(student)
continue
task_state = latest_task.child_state
stats[task_state] += 1
print " State: {0}".format(task_state)
if task_state == OpenEndedChild.INITIAL:
if latest_task.stored_answer is not None:
students_with_saved_answers.append(student)
elif task_state == OpenEndedChild.ASSESSING:
students_with_ungraded_submissions.append(student)
elif task_state == OpenEndedChild.POST_ASSESSMENT or task_state == OpenEndedChild.DONE:
students_with_graded_submissions.append(student)
location = Location(location)
print "----------------------------------"
print "Time: {0}".format(time.strftime("%Y %b %d %H:%M:%S +0000", time.gmtime()))
print "Course: {0}".format(course.id)
print "Location: {0}".format(location)
print "No state: {0}".format(len(students_with_no_state))
print "Initial State: {0}".format(stats[OpenEndedChild.INITIAL] - len(students_with_saved_answers))
print "Saved answers: {0}".format(len(students_with_saved_answers))
print "Submitted answers: {0}".format(stats[OpenEndedChild.ASSESSING])
print "Received grades: {0}".format(stats[OpenEndedChild.POST_ASSESSMENT] + stats[OpenEndedChild.DONE])
print "----------------------------------"
if write_to_file:
filename = "stats.{0}.{1}".format(location.course, location.name)
time_stamp = time.strftime("%Y%m%d-%H%M%S")
with open('{0}.{1}.csv'.format(filename, time_stamp), 'wb') as csv_file:
writer = csv.writer(csv_file, delimiter=' ', quoting=csv.QUOTE_MINIMAL)
for student in students_with_ungraded_submissions:
writer.writerow(("ungraded", student.id, anonymous_id_for_user(student, ''), student.username))
for student in students_with_graded_submissions:
writer.writerow(("graded", student.id, anonymous_id_for_user(student, ''), student.username))
return stats
|
agpl-3.0
|
jaggu303619/asylum-v2.0
|
openerp/addons/base/res/res_bank.py
|
14
|
10539
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class Bank(osv.osv):
_description='Bank'
_name = 'res.bank'
_order = 'name'
_columns = {
'name': fields.char('Name', size=128, required=True),
'street': fields.char('Street', size=128),
'street2': fields.char('Street2', size=128),
'zip': fields.char('Zip', change_default=True, size=24),
'city': fields.char('City', size=128),
'state': fields.many2one("res.country.state", 'Fed. State',
domain="[('country_id', '=', country)]"),
'country': fields.many2one('res.country', 'Country'),
'email': fields.char('Email', size=64),
'phone': fields.char('Phone', size=64),
'fax': fields.char('Fax', size=64),
'active': fields.boolean('Active'),
'bic': fields.char('Bank Identifier Code', size=64,
help="Sometimes called BIC or Swift."),
}
_defaults = {
'active': lambda *a: 1,
}
def name_get(self, cr, uid, ids, context=None):
result = []
for bank in self.browse(cr, uid, ids, context):
result.append((bank.id, (bank.bic and (bank.bic + ' - ') or '') + bank.name))
return result
Bank()
class res_partner_bank_type(osv.osv):
_description='Bank Account Type'
_name = 'res.partner.bank.type'
_order = 'name'
_columns = {
'name': fields.char('Name', size=64, required=True, translate=True),
'code': fields.char('Code', size=64, required=True),
'field_ids': fields.one2many('res.partner.bank.type.field', 'bank_type_id', 'Type Fields'),
'format_layout': fields.text('Format Layout', translate=True)
}
_defaults = {
'format_layout': lambda *args: "%(bank_name)s: %(acc_number)s"
}
res_partner_bank_type()
class res_partner_bank_type_fields(osv.osv):
_description='Bank type fields'
_name = 'res.partner.bank.type.field'
_order = 'name'
_columns = {
'name': fields.char('Field Name', size=64, required=True, translate=True),
'bank_type_id': fields.many2one('res.partner.bank.type', 'Bank Type', required=True, ondelete='cascade'),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'size': fields.integer('Max. Size'),
}
res_partner_bank_type_fields()
class res_partner_bank(osv.osv):
'''Bank Accounts'''
_name = "res.partner.bank"
_rec_name = "acc_number"
_description = __doc__
_order = 'sequence'
def _bank_type_get(self, cr, uid, context=None):
bank_type_obj = self.pool.get('res.partner.bank.type')
result = []
type_ids = bank_type_obj.search(cr, uid, [])
bank_types = bank_type_obj.browse(cr, uid, type_ids, context=context)
for bank_type in bank_types:
result.append((bank_type.code, bank_type.name))
return result
def _default_value(self, cursor, user, field, context=None):
if context is None: context = {}
if field in ('country_id', 'state_id'):
value = False
else:
value = ''
if not context.get('address'):
return value
for address in self.pool.get('res.partner').resolve_2many_commands(
cursor, user, 'address', context['address'], ['type', field], context=context):
if address.get('type') == 'default':
return address.get(field, value)
elif not address.get('type'):
value = address.get(field, value)
return value
_columns = {
'name': fields.char('Bank Account', size=64), # to be removed in v6.2 ?
'acc_number': fields.char('Account Number', size=64, required=True),
'bank': fields.many2one('res.bank', 'Bank'),
'bank_bic': fields.char('Bank Identifier Code', size=16),
'bank_name': fields.char('Bank Name', size=32),
'owner_name': fields.char('Account Owner Name', size=128),
'street': fields.char('Street', size=128),
'zip': fields.char('Zip', change_default=True, size=24),
'city': fields.char('City', size=128),
'country_id': fields.many2one('res.country', 'Country',
change_default=True),
'state_id': fields.many2one("res.country.state", 'Fed. State',
change_default=True, domain="[('country_id','=',country_id)]"),
'company_id': fields.many2one('res.company', 'Company',
ondelete='cascade', help="Only if this bank account belong to your company"),
'partner_id': fields.many2one('res.partner', 'Account Owner', required=True,
ondelete='cascade', select=True),
'state': fields.selection(_bank_type_get, 'Bank Account Type', required=True,
change_default=True),
'sequence': fields.integer('Sequence'),
'footer': fields.boolean("Display on Reports", help="Display this bank account on the footer of printed documents like invoices and sales orders.")
}
_defaults = {
'owner_name': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'name', context=context),
'street': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'street', context=context),
'city': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'city', context=context),
'zip': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'zip', context=context),
'country_id': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'country_id', context=context),
'state_id': lambda obj, cursor, user, context: obj._default_value(
cursor, user, 'state_id', context=context),
'name': '/'
}
def fields_get(self, cr, uid, allfields=None, context=None):
res = super(res_partner_bank, self).fields_get(cr, uid, allfields=allfields, context=context)
bank_type_obj = self.pool.get('res.partner.bank.type')
type_ids = bank_type_obj.search(cr, uid, [])
types = bank_type_obj.browse(cr, uid, type_ids)
for type in types:
for field in type.field_ids:
if field.name in res:
res[field.name].setdefault('states', {})
res[field.name]['states'][type.code] = [
('readonly', field.readonly),
('required', field.required)]
return res
def _prepare_name_get(self, cr, uid, bank_dicts, context=None):
""" Format the name of a res.partner.bank.
This function is designed to be inherited to add replacement fields.
:param bank_dicts: a list of res.partner.bank dicts, as returned by the method read()
:return: [(id, name), ...], as returned by the method name_get()
"""
# prepare a mapping {code: format_layout} for all bank types
bank_type_obj = self.pool.get('res.partner.bank.type')
bank_types = bank_type_obj.browse(cr, uid, bank_type_obj.search(cr, uid, []), context=context)
bank_code_format = dict((bt.code, bt.format_layout) for bt in bank_types)
res = []
for data in bank_dicts:
name = data['acc_number']
if data['state'] and bank_code_format.get(data['state']):
try:
if not data.get('bank_name'):
data['bank_name'] = _('BANK')
data = dict((k, v or '') for (k, v) in data.iteritems())
name = bank_code_format[data['state']] % data
except Exception:
raise osv.except_osv(_("Formating Error"), _("Invalid Bank Account Type Name format."))
res.append((data.get('id', False), name))
return res
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
bank_dicts = self.read(cr, uid, ids, context=context)
return self._prepare_name_get(cr, uid, bank_dicts, context=context)
def onchange_company_id(self, cr, uid, ids, company_id, context=None):
result = {}
if company_id:
c = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
if c.partner_id:
r = self.onchange_partner_id(cr, uid, ids, c.partner_id.id, context=context)
r['value']['partner_id'] = c.partner_id.id
r['value']['footer'] = 1
result = r
return result
def onchange_bank_id(self, cr, uid, ids, bank_id, context=None):
result = {}
if bank_id:
bank = self.pool.get('res.bank').browse(cr, uid, bank_id, context=context)
result['bank_name'] = bank.name
result['bank_bic'] = bank.bic
return {'value': result}
def onchange_partner_id(self, cr, uid, id, partner_id, context=None):
result = {}
if partner_id:
part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
result['owner_name'] = part.name
result['street'] = part.street or False
result['city'] = part.city or False
result['zip'] = part.zip or False
result['country_id'] = part.country_id.id
result['state_id'] = part.state_id.id
return {'value': result}
res_partner_bank()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
hgrif/incubator-airflow
|
airflow/api/common/experimental/trigger_dag.py
|
9
|
1706
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
from airflow.exceptions import AirflowException
from airflow.models import DagRun, DagBag
from airflow.utils.state import State
def trigger_dag(dag_id, run_id=None, conf=None, execution_date=None):
dagbag = DagBag()
if dag_id not in dagbag.dags:
raise AirflowException("Dag id {} not found".format(dag_id))
dag = dagbag.get_dag(dag_id)
if not execution_date:
execution_date = datetime.datetime.utcnow()
assert isinstance(execution_date, datetime.datetime)
execution_date = execution_date.replace(microsecond=0)
if not run_id:
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
raise AirflowException("Run id {} already exists for dag id {}".format(
run_id,
dag_id
))
run_conf = None
if conf:
run_conf = json.loads(conf)
trigger = dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
return trigger
|
apache-2.0
|
popazerty/try
|
lib/python/Plugins/Extensions/IniEcasa/__init__.py
|
4
|
1094
|
#pragma mark - Config
from Components.config import config, ConfigSubsection, ConfigText, \
ConfigPassword, ConfigLocations, ConfigSet, ConfigNumber, \
ConfigSelection
from Tools.Directories import resolveFilename, SCOPE_HDD
config.plugins.ecasa = ConfigSubsection()
config.plugins.ecasa.google_username = ConfigText(default="", fixed_size=False)
config.plugins.ecasa.google_password = ConfigPassword(default="")
config.plugins.ecasa.cachedirs = ConfigLocations(default="/tmp/ecasa")
config.plugins.ecasa.cache = ConfigText(default="/tmp/ecasa")
config.plugins.ecasa.user = ConfigText(default='default')
config.plugins.ecasa.searchhistory = ConfigSet(choices = [])
config.plugins.ecasa.userhistory = ConfigSet(choices = [])
config.plugins.ecasa.searchlimit = ConfigNumber(default=30)
config.plugins.ecasa.cachesize = ConfigNumber(default=30)
config.plugins.ecasa.slideshow_interval = ConfigNumber(default=30)
config.plugins.ecasa.flickr_api_key = ConfigText(default="", fixed_size=False)
config.plugins.ecasa.last_backend = ConfigSelection(default='picasa', choices=['picasa', 'flickr'])
|
gpl-2.0
|
dcjohnson1989/selenium
|
py/selenium/webdriver/safari/webdriver.py
|
71
|
2802
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
try:
import http.client as http_client
except ImportError:
import httplib as http_client
import os
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from .service import Service
class WebDriver(RemoteWebDriver):
"""
Controls the SafariDriver and allows you to drive the browser.
"""
def __init__(self, executable_path=None, port=0,
desired_capabilities=DesiredCapabilities.SAFARI, quiet=False):
"""
Creates a new instance of the Safari driver.
Starts the service and then creates new instance of Safari Driver.
:Args:
- executable_path - path to the executable. If the default is used it assumes the executable is in the
Environment Variable SELENIUM_SERVER_JAR
- port - port you would like the service to run, if left as 0, a free port will be found.
- desired_capabilities: Dictionary object with desired capabilities (Can be used to provide various Safari switches).
"""
if executable_path is None:
try:
executable_path = os.environ["SELENIUM_SERVER_JAR"]
except:
raise Exception("No executable path given, please add one to Environment Variable \
'SELENIUM_SERVER_JAR'")
self.service = Service(executable_path, port=port, quiet=quiet)
self.service.start()
RemoteWebDriver.__init__(self,
command_executor=self.service.service_url,
desired_capabilities=desired_capabilities)
self._is_remote = False
def quit(self):
"""
Closes the browser and shuts down the SafariDriver executable
that is started when starting the SafariDriver
"""
try:
RemoteWebDriver.quit(self)
except http_client.BadStatusLine:
pass
finally:
self.service.stop()
|
apache-2.0
|
elba7r/frameworking
|
frappe/www/desk.py
|
2
|
2141
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
no_sitemap = 1
no_cache = 1
base_template_path = "templates/www/desk.html"
import os, re
import frappe
from frappe import _
import frappe.sessions
def get_context(context):
if (frappe.session.user == "Guest" or
frappe.db.get_value("User", frappe.session.user, "user_type")=="Website User"):
frappe.throw(_("You are not permitted to access this page."), frappe.PermissionError)
hooks = frappe.get_hooks()
boot = frappe.sessions.get()
# this needs commit
csrf_token = frappe.sessions.get_csrf_token()
frappe.db.commit()
boot_json = frappe.as_json(boot)
# remove script tags from boot
boot_json = re.sub("\<script\>[^<]*\</script\>", "", boot_json)
return {
"build_version": get_build_version(),
"include_js": hooks["app_include_js"],
"include_css": hooks["app_include_css"],
"sounds": hooks["sounds"],
"boot": boot if context.get("for_mobile") else boot_json,
"csrf_token": csrf_token,
"background_image": boot.user.background_image or boot.default_background_image,
"google_analytics_id": frappe.conf.get("google_analytics_id"),
"mixpanel_id": frappe.conf.get("mixpanel_id")
}
@frappe.whitelist()
def get_desk_assets(build_version):
"""Get desk assets to be loaded for mobile app"""
data = get_context({"for_mobile": True})
assets = [{"type": "js", "data": ""}, {"type": "css", "data": ""}]
if build_version != data["build_version"]:
# new build, send assets
for path in data["include_js"]:
with open(os.path.join(frappe.local.sites_path, path) ,"r") as f:
assets[0]["data"] = assets[0]["data"] + "\n" + unicode(f.read(), "utf-8")
for path in data["include_css"]:
with open(os.path.join(frappe.local.sites_path, path) ,"r") as f:
assets[1]["data"] = assets[1]["data"] + "\n" + unicode(f.read(), "utf-8")
return {
"build_version": data["build_version"],
"boot": data["boot"],
"assets": assets
}
def get_build_version():
return str(os.path.getmtime(os.path.join(frappe.local.sites_path, "assets", "js",
"desk.min.js")))
|
mit
|
OpenSciViz/cloudstack
|
openstack/src/python/nova-libvirt/host.py
|
2
|
33050
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the host OS and hypervisor.
This class encapsulates a connection to the libvirt
daemon and provides certain higher level APIs around
the raw libvirt API. These APIs are then used by all
the other libvirt related classes
"""
import operator
import os
import socket
import sys
import threading
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
from oslo_utils import versionutils
import six
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import rpc
from nova import utils
from nova.virt import event as virtevent
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import guest as libvirt_guest
libvirt = None
LOG = logging.getLogger(__name__)
native_socket = patcher.original('socket')
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue" if six.PY2 else "queue")
CONF = nova.conf.CONF
# This list is for libvirt hypervisor drivers that need special handling.
# This is *not* the complete list of supported hypervisor drivers.
HV_DRIVER_QEMU = "QEMU"
HV_DRIVER_XEN = "Xen"
class Host(object):
def __init__(self, uri, read_only=False,
conn_event_handler=None,
lifecycle_event_handler=None):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._uri = uri
self._read_only = read_only
self._initial_connection = True
self._conn_event_handler = conn_event_handler
self._conn_event_handler_queue = six.moves.queue.Queue()
self._lifecycle_event_handler = lifecycle_event_handler
self._caps = None
self._hostname = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._event_queue = None
self._events_delayed = {}
# Note(toabctl): During a reboot of a domain, STOPPED and
# STARTED events are sent. To prevent shutting
# down the domain during a reboot, delay the
# STOPPED lifecycle event some seconds.
self._lifecycle_delay = 15
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
def _conn_event_thread(self):
"""Dispatches async connection events"""
# NOTE(mdbooth): This thread doesn't need to jump through the same
# hoops as _dispatch_thread because it doesn't interact directly
# with the libvirt native thread.
while True:
self._dispatch_conn_event()
def _dispatch_conn_event(self):
# NOTE(mdbooth): Splitting out this loop looks redundant, but it
# means we can easily dispatch events synchronously from tests and
# it isn't completely awful.
handler = self._conn_event_handler_queue.get()
try:
handler()
except Exception:
LOG.exception(_LE('Exception handling connection event'))
finally:
self._conn_event_handler_queue.task_done()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self._queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug('Connection to libvirt broke')
return False
raise
@staticmethod
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
raise exception.InternalError(
_("Can not handle authentication request for %d credentials")
% len(creds))
@staticmethod
def _connect(uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
Host._connect_auth_cb,
None]
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
# tpool.proxy_call creates a native thread. Due to limitations
# with eventlet locking we cannot use the logging API inside
# the called function.
return tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
libvirt.openAuth, uri, auth, flags)
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
# call possibly with delay
self._event_emit_delayed(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = str(last_close_event['reason'])
msg = _("Connection to libvirt lost: %s") % reason
self._wrapped_conn = None
self._queue_conn_event_handler(False, msg)
def _event_emit_delayed(self, event):
"""Emit events - possibly delayed."""
def event_cleanup(gt, *args, **kwargs):
"""Callback function for greenthread. Called
to cleanup the _events_delayed dictionary when an event
was called.
"""
event = args[0]
self._events_delayed.pop(event.uuid, None)
# Cleanup possible delayed stop events.
if event.uuid in self._events_delayed.keys():
self._events_delayed[event.uuid].cancel()
self._events_delayed.pop(event.uuid, None)
LOG.debug("Removed pending event for %s due to "
"lifecycle event", event.uuid)
if event.transition == virtevent.EVENT_LIFECYCLE_STOPPED:
# Delay STOPPED event, as they may be followed by a STARTED
# event in case the instance is rebooting
id_ = greenthread.spawn_after(self._lifecycle_delay,
self._event_emit, event)
self._events_delayed[event.uuid] = id_
# add callback to cleanup self._events_delayed dict after
# event was called
id_.link(event_cleanup, event)
else:
self._event_emit(event)
def _event_emit(self, event):
if self._lifecycle_event_handler is not None:
self._lifecycle_event_handler(event)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
self._event_thread = native_threading.Thread(
target=self._native_thread)
self._event_thread.setDaemon(True)
self._event_thread.start()
LOG.debug("Starting green dispatch thread")
utils.spawn(self._dispatch_thread)
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug('Connecting to libvirt: %s', self._uri)
# This will raise an exception on failure
wrapped_conn = self._connect(self._uri, self._read_only)
try:
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warning(_LW("URI %(uri)s does not support events: %(error)s"),
{'uri': self._uri, 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except (TypeError, AttributeError) as e:
# NOTE: The registerCloseCallback of python-libvirt 1.0.1+
# is defined with 3 arguments, and the above registerClose-
# Callback succeeds. However, the one of python-libvirt 1.0.0
# is defined with 4 arguments and TypeError happens here.
# Then python-libvirt 0.9 does not define a method register-
# CloseCallback.
LOG.debug("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s", e)
except libvirt.libvirtError as e:
LOG.warning(_LW("URI %(uri)s does not support connection"
" events: %(error)s"),
{'uri': self._uri, 'error': e})
return wrapped_conn
def _queue_conn_event_handler(self, *args, **kwargs):
if self._conn_event_handler is None:
return
def handler():
return self._conn_event_handler(*args, **kwargs)
self._conn_event_handler_queue.put(handler)
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
# Drop the existing connection if it is not usable
if (self._wrapped_conn is not None and
not self._test_connection(self._wrapped_conn)):
self._wrapped_conn = None
# Connection was previously up, and went down
self._queue_conn_event_handler(
False, _('Connection to libvirt lost'))
if self._wrapped_conn is None:
try:
# This will raise if it fails to get a connection
self._wrapped_conn = self._get_new_connection()
except Exception as ex:
with excutils.save_and_reraise_exception():
# If we previously had a connection and it went down,
# we generated a down event for that above.
# We also want to generate a down event for an initial
# failure, which won't be handled above.
if self._initial_connection:
self._queue_conn_event_handler(
False,
_('Failed to connect to libvirt: %(msg)s') %
{'msg': ex})
finally:
self._initial_connection = False
self._queue_conn_event_handler(True, None)
return self._wrapped_conn
def get_connection(self):
"""Returns a connection to the hypervisor
This method should be used to create and return a well
configured connection to the hypervisor.
:returns: a libvirt.virConnect object
"""
try:
conn = self._get_connection()
except libvirt.libvirtError as ex:
LOG.exception(_LE("Connection to libvirt failed: %s"), ex)
payload = dict(ip=CONF.my_ip,
method='_connect',
reason=ex)
rpc.get_notifier('compute').error(nova_context.get_admin_context(),
'compute.libvirt.error',
payload)
raise exception.HypervisorUnavailable(host=CONF.host)
return conn
@staticmethod
def _libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
def initialize(self):
# NOTE(dkliban): Error handler needs to be registered before libvirt
# connection is used for the first time. Otherwise, the
# handler does not get registered.
libvirt.registerErrorHandler(self._libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._init_events()
LOG.debug("Starting connection event dispatch thread")
utils.spawn(self._conn_event_thread)
self._initialized = True
def _version_check(self, lv_ver=None, hv_ver=None, hv_type=None,
op=operator.lt):
"""Check libvirt version, hypervisor version, and hypervisor type
:param hv_type: hypervisor driver from the top of this file.
"""
conn = self.get_connection()
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if op(libvirt_version,
versionutils.convert_version_to_int(lv_ver)):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if op(hypervisor_version,
versionutils.convert_version_to_int(hv_ver)):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.lt)
def has_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.ne)
def get_guest(self, instance):
"""Retrieve libvirt guest object for an instance.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
:param instance: a nova.objects.Instance object
:returns: a nova.virt.libvirt.Guest object
:raises exception.InstanceNotFound: The domain was not found
:raises exception.InternalError: A libvirt error occured
"""
return libvirt_guest.Guest(self.get_domain(instance))
# TODO(sahid): needs to be private
def get_domain(self, instance):
"""Retrieve libvirt domain object for an instance.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
:param instance: a nova.objects.Instance object
:returns: a libvirt.Domain object
:raises exception.InstanceNotFound: The domain was not found
:raises exception.InternalError: A libvirt error occured
"""
LOG.debug('hon: get_domain ...'+repr(instance))
try:
conn = self.get_connection()
return conn.lookupByName(instance.name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance.uuid)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance.name,
'error_code': error_code,
'ex': ex})
raise exception.InternalError(msg)
def list_guests(self, only_running=True, only_guests=True):
"""Get a list of Guest objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
See method "list_instance_domains" for more information.
:returns: list of Guest objects
"""
return [libvirt_guest.Guest(dom) for dom in self.list_instance_domains(
only_running=only_running, only_guests=only_guests)]
def list_instance_domains(self, only_running=True, only_guests=True):
"""Get a list of libvirt.Domain objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
Query libvirt to a get a list of all libvirt.Domain objects
that correspond to nova instances. If the only_running parameter
is true this list will only include active domains, otherwise
inactive domains will be included too. If the only_guests parameter
is true the list will have any "host" domain (aka Xen Domain-0)
filtered out.
:returns: list of libvirt.Domain objects
"""
flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE
if not only_running:
flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE
alldoms = self.get_connection().listAllDomains(flags)
doms = []
for dom in alldoms:
if only_guests and dom.ID() == 0:
continue
doms.append(dom)
return doms
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
Method is only used by NUMA code paths which check on
libvirt version >= 1.0.4. getCPUMap() was introduced in
libvirt 1.0.0.
:returns: set of online CPUs, raises libvirtError on error
"""
(cpus, cpu_map, online) = self.get_connection().getCPUMap()
online_cpus = set()
for cpu in range(cpus):
if cpu_map[cpu]:
online_cpus.add(cpu)
return online_cpus
def get_capabilities(self):
"""Returns the host capabilities information
Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
Note: The result is cached in the member attribute _caps.
:returns: a config.LibvirtConfigCaps object
"""
if not self._caps:
xmlstr = self.get_connection().getCapabilities()
LOG.info(_LI("Libvirt host capabilities %s"), xmlstr)
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
# NOTE(mriedem): Don't attempt to get baseline CPU features
# if libvirt can't determine the host cpu model.
if (hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES')
and self._caps.host.cpu.model is not None):
try:
xml_str = self._caps.host.cpu.to_xml()
if six.PY3 and isinstance(xml_str, six.binary_type):
xml_str = xml_str.decode('utf-8')
features = self.get_connection().baselineCPU(
[xml_str],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
if features:
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warning(_LW("URI %(uri)s does not support full set"
" of host capabilities: %(error)s"),
{'uri': self._uri, 'error': ex})
else:
raise
return self._caps
def get_driver_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self.get_connection().getType()
def get_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
return self.get_connection().getVersion()
def get_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self.get_connection().getHostname()
LOG.debug('hon: get_hostname> '+hostname)
if self._hostname is None:
self._hostname = hostname
elif hostname != self._hostname:
LOG.error(_LE('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.'),
{'old': self._hostname,
'new': hostname})
return self._hostname
def find_secret(self, usage_type, usage_id):
"""Find a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
if usage_type == 'iscsi':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_ISCSI
elif usage_type in ('rbd', 'ceph'):
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_CEPH
elif usage_type == 'volume':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_VOLUME
else:
msg = _("Invalid usage_type: %s")
raise exception.InternalError(msg % usage_type)
try:
conn = self.get_connection()
return conn.secretLookupByUsage(usage_type_const, usage_id)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_SECRET:
return None
def create_secret(self, usage_type, usage_id, password=None):
"""Create a secret.
:param usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
'rbd' will be converted to 'ceph'.
:param usage_id: name of resource in secret
:param password: optional secret value to set
"""
secret_conf = vconfig.LibvirtConfigSecret()
secret_conf.ephemeral = False
secret_conf.private = False
secret_conf.usage_id = usage_id
if usage_type in ('rbd', 'ceph'):
secret_conf.usage_type = 'ceph'
elif usage_type == 'iscsi':
secret_conf.usage_type = 'iscsi'
elif usage_type == 'volume':
secret_conf.usage_type = 'volume'
else:
msg = _("Invalid usage_type: %s")
raise exception.InternalError(msg % usage_type)
xml = secret_conf.to_xml()
try:
LOG.debug('Secret XML: %s', xml)
conn = self.get_connection()
secret = conn.secretDefineXML(xml)
if password is not None:
secret.setValue(password)
return secret
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error defining a secret with XML: %s'), xml)
def delete_secret(self, usage_type, usage_id):
"""Delete a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
secret = self.find_secret(usage_type, usage_id)
if secret is not None:
secret.undefine()
def _get_hardware_info(self):
"""Returns hardware information about the Node.
Note that the memory size is reported in MiB instead of KiB.
"""
return self.get_connection().getInfo()
def get_cpu_count(self):
"""Returns the total numbers of cpu in the host."""
return self._get_hardware_info()[2]
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._get_hardware_info()[1]
def get_memory_mb_used(self):
"""Get the used memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt.virt_type == 'xen':
used = 0
for guest in self.list_guests(only_guests=False):
try:
# TODO(sahid): Use get_info...
dom_mem = int(guest._get_domain_info(self)[2])
except libvirt.libvirtError as e:
LOG.warning(_LW("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s"),
{"uuid": guest.uuid, "ex": e})
continue
# skip dom0
if guest.id != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used // units.Ki
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self.get_memory_mb_total() - avail // units.Ki
def get_cpu_stats(self):
"""Returns the current CPU state of the host with frequency."""
stats = self.get_connection().getCPUStats(
libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
# getInfo() returns various information about the host node
# No. 3 is the expected CPU frequency.
stats["frequency"] = self._get_hardware_info()[3]
return stats
def write_instance_config(self, xml):
"""Defines a domain, but does not start it.
:param xml: XML domain definition of the guest.
:returns: an instance of Guest
"""
domain = self.get_connection().defineXML(xml)
return libvirt_guest.Guest(domain)
def device_lookup_by_name(self, name):
"""Lookup a node device by its name.
:returns: a virNodeDevice instance
"""
return self.get_connection().nodeDeviceLookupByName(name)
def list_pci_devices(self, flags=0):
"""Lookup pci devices.
:returns: a list of virNodeDevice instance
"""
return self.get_connection().listDevices("pci", flags)
def compare_cpu(self, xmlDesc, flags=0):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
def is_cpu_control_policy_capable(self):
"""Returns whether kernel configuration CGROUP_SCHED is enabled
CONFIG_CGROUP_SCHED may be disabled in some kernel configs to
improve scheduler latency.
"""
try:
with open("/proc/self/mounts", "r") as fd:
for line in fd.readlines():
# mount options and split options
bits = line.split()[3].split(",")
if "cpu" in bits:
return True
return False
except IOError:
return False
|
mit
|
uzgit/ardupilot
|
Tools/ardupilotwaf/cmake.py
|
42
|
13322
|
#!/usr/bin/env python
# encoding: utf-8
# Copyright (C) 2015-2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Waf tool for external builds with cmake. This tool defines the feature
'cmake_build', for building through the cmake interface.
You can use CMAKE_MIN_VERSION environment variable before loading this tool in
the configuration to set a minimum version required for cmake. Example::
def configure(cfg):
cfg.CMAKE_MIN_VERSION = '3.5.2'
cfg.load('cmake')
Usage example::
def build(bld):
# cmake configuration
foo = bld.cmake(
name='foo',
cmake_src='path/to/foosrc', # where is the source tree
cmake_bld='path/to/foobld', # where to generate the build system
cmake_vars=dict(
CMAKE_BUILD_TYPE='Release',
...
),
)
# cmake build for external target 'bar'
bld(
features='cmake_build',
cmake_config='foo', # this build depends on the cmake generation above defined
cmake_target='bar', # what to pass to option --target of cmake
)
# cmake build for target 'baz' (syntactic sugar)
foo.build('baz')
The keys of cmake_vars are sorted so that unnecessary execution is avoided. If
you want to ensure an order in which the variables are passed to cmake, use an
OrderedDict. Example::
def build(bld):
foo_vars = OrderedDict()
foo_vars['CMAKE_BUILD_TYPE'] = 'Release'
foo_vars['FOO'] = 'value_of_foo'
foo_vars['BAR'] = 'value_of_bar'
# cmake configuration
foo = bld.cmake(
cmake_vars=foo_vars,
...
)
There may be cases when you want to establish dependency between other tasks and
the external build system's products (headers and libraries, for example). In
that case, you can specify the specific files in the option 'target' of your
cmake_build task generator. Example::
def build(bld):
...
# declaring on target only what I'm interested in
foo.build('baz', target='path/to/foobld/include/baz.h')
# myprogram.c includes baz.h, so the dependency is (implicitly)
# established
bld.program(target='myprogram', source='myprogram.c')
# another example
foo.build('another', target='another.txt')
bld(
rule='${CP} ${SRC} ${TGT}',
source=bld.bldnode.find_or_declare('another.txt'),
target='another_copied.txt',
)
You can also establish the dependency directly on a task object::
@feature('myfeature')
def process_myfeature(self):
baz_taskgen = self.bld.get_tgen_by_name('baz')
baz_taskgen.post()
# every cmake_build taskgen stores its task in cmake_build_task
baz_task = baz_taskgen.cmake_build_task
tsk = self.create_task('mytask')
tsk.set_run_after(baz_task)
# tsk is run whenever baz_task changes its outputs, namely,
# path/to/foobld/include/baz.h
tsk.dep_nodes.extend(baz_task.outputs)
If your cmake build creates several files (that may be dependency for several
tasks), you can use the parameter cmake_output_patterns. It receives a pattern
or a list of patterns relative to the cmake build directory. After the build
task is run, the files that match those patterns are set as output of the cmake
build task, so that they get a signature. Example::
def build(bld):
...
foo.build('baz', cmake_output_patterns='include/*.h')
...
"""
from waflib import Context, Node, Task, Utils
from waflib.Configure import conf
from waflib.TaskGen import feature, taskgen_method
from collections import OrderedDict
import os
import re
import sys
class cmake_configure_task(Task.Task):
vars = ['CMAKE_BLD_DIR']
run_str = '${CMAKE} ${CMAKE_FLAGS} ${CMAKE_SRC_DIR} ${CMAKE_VARS} ${CMAKE_GENERATOR_OPTION}'
color = 'BLUE'
def exec_command(self, cmd, **kw):
kw['stdout'] = sys.stdout
return super(cmake_configure_task, self).exec_command(cmd, **kw)
def uid(self):
if not hasattr(self, 'uid_'):
m = Utils.md5()
def u(s):
m.update(s.encode('utf-8'))
u(self.__class__.__name__)
u(self.env.get_flat('CMAKE_SRC_DIR'))
u(self.env.get_flat('CMAKE_BLD_DIR'))
u(self.env.get_flat('CMAKE_VARS'))
u(self.env.get_flat('CMAKE_FLAGS'))
self.uid_ = m.digest()
return self.uid_
def __str__(self):
return self.cmake.name
def keyword(self):
return 'CMake Configure'
# Clean cmake configuration
cmake_configure_task._original_run = cmake_configure_task.run
def _cmake_configure_task_run(self):
cmakecache_path = self.outputs[0].abspath()
if os.path.exists(cmakecache_path):
os.remove(cmakecache_path)
self._original_run()
cmake_configure_task.run = _cmake_configure_task_run
class cmake_build_task(Task.Task):
run_str = '${CMAKE} --build ${CMAKE_BLD_DIR} --target ${CMAKE_TARGET}'
color = 'BLUE'
# the cmake-generated build system is responsible of managing its own
# dependencies
always_run = True
def exec_command(self, cmd, **kw):
kw['stdout'] = sys.stdout
return super(cmake_build_task, self).exec_command(cmd, **kw)
def uid(self):
if not hasattr(self, 'uid_'):
m = Utils.md5()
def u(s):
m.update(s.encode('utf-8'))
u(self.__class__.__name__)
u(self.env.get_flat('CMAKE_BLD_DIR'))
u(self.env.get_flat('CMAKE_TARGET'))
self.uid_ = m.digest()
return self.uid_
def __str__(self):
return '%s %s' % (self.cmake.name, self.cmake_target)
def keyword(self):
return 'CMake Build'
# allow tasks to depend on possible headers or other resources if the user
# declares outputs for the cmake build
cmake_build_task = Task.update_outputs(cmake_build_task)
cmake_build_task.original_post_run = cmake_build_task.post_run
def _cmake_build_task_post_run(self):
self.output_patterns = Utils.to_list(self.output_patterns)
if not self.output_patterns:
return self.original_post_run()
bldnode = self.cmake.bldnode
for node in bldnode.ant_glob(self.output_patterns, remove=False):
self.set_outputs(node)
return self.original_post_run()
cmake_build_task.post_run = _cmake_build_task_post_run
class CMakeConfig(object):
'''
CMake configuration. This object shouldn't be instantiated directly. Use
bld.cmake().
'''
def __init__(self, bld, name, srcnode, bldnode, cmake_vars, cmake_flags):
self.bld = bld
self.name = name
self.srcnode = srcnode
self.bldnode = bldnode
self.vars = cmake_vars
self.flags = cmake_flags
self._config_task = None
self.last_build_task = None
def vars_keys(self):
keys = list(self.vars.keys())
if not isinstance(self.vars, OrderedDict):
keys.sort()
return keys
def config_sig(self):
m = Utils.md5()
def u(s):
m.update(s.encode('utf-8'))
u(self.srcnode.abspath())
u(self.bldnode.abspath())
for v in self.flags:
u(v)
keys = self.vars_keys()
for k in keys:
u(k)
u(self.vars[k])
return m.digest()
def config_task(self, taskgen):
sig = self.config_sig()
if self._config_task and self._config_task.cmake_config_sig == sig:
return self._config_task
self._config_task = taskgen.create_task('cmake_configure_task')
self._config_task.cwd = self.bldnode
self._config_task.cmake = self
self._config_task.cmake_config_sig = sig
env = self._config_task.env
env.CMAKE_BLD_DIR = self.bldnode.abspath()
env.CMAKE_SRC_DIR = self.srcnode.abspath()
keys = self.vars_keys()
env.CMAKE_VARS = ["-D%s='%s'" % (k, self.vars[k]) for k in keys]
env.CMAKE_FLAGS = self.flags
self._config_task.set_outputs(
self.bldnode.find_or_declare('CMakeCache.txt'),
)
if self.last_build_task:
self._config_task.set_run_after(self.last_build_task)
self.bldnode.mkdir()
return self._config_task
def build(self, cmake_target, **kw):
return self.bld.cmake_build(self.name, cmake_target, **kw)
_cmake_instances = {}
def get_cmake(name):
if name not in _cmake_instances:
raise Exception('cmake: configuration named "%s" not found' % name)
return _cmake_instances[name]
@conf
def cmake(bld, name, cmake_src=None, cmake_bld=None, cmake_vars={}, cmake_flags=''):
'''
This function has two signatures:
- bld.cmake(name, cmake_src, cmake_bld, cmake_vars):
Create a cmake configuration.
- bld.cmake(name):
Get the cmake configuration with name.
'''
if not cmake_src and not cmake_bld and not cmake_vars:
return get_cmake(name)
if name in _cmake_instances:
bld.fatal('cmake: configuration named "%s" already exists' % name)
if not isinstance(cmake_src, Node.Node):
cmake_src = bld.path.find_dir(cmake_src)
if not cmake_bld:
cmake_bld = cmake_src.get_bld()
elif not isinstance(cmake_bld, Node.Node):
cmake_bld = bld.bldnode.make_node(cmake_bld)
c = CMakeConfig(bld, name, cmake_src, cmake_bld, cmake_vars, cmake_flags)
_cmake_instances[name] = c
return c
@feature('cmake_build')
def process_cmake_build(self):
if not hasattr(self, 'cmake_target'):
self.bld.fatal('cmake_build: taskgen is missing cmake_target')
if not hasattr(self, 'cmake_config'):
self.bld.fatal('cmake_build: taskgen is missing cmake_config')
tsk = self.create_cmake_build_task(self.cmake_config, self.cmake_target)
self.cmake_build_task = tsk
outputs = Utils.to_list(getattr(self, 'target', ''))
if not isinstance(outputs, list):
outputs = [outputs]
for o in outputs:
if not isinstance(o, Node.Node):
o = self.path.find_or_declare(o)
tsk.set_outputs(o)
tsk.output_patterns = getattr(self, 'cmake_output_patterns', [])
@conf
def cmake_build(bld, cmake_config, cmake_target, **kw):
kw['cmake_config'] = cmake_config
kw['cmake_target'] = cmake_target
kw['features'] = Utils.to_list(kw.get('features', [])) + ['cmake_build']
if 'name' not in kw:
kw['name'] = '%s_%s' % (cmake_config, cmake_target)
return bld(**kw)
@taskgen_method
def create_cmake_build_task(self, cmake_config, cmake_target):
cmake = get_cmake(cmake_config)
tsk = self.create_task('cmake_build_task')
tsk.cmake = cmake
tsk.cmake_target = cmake_target
tsk.output_patterns = []
tsk.env.CMAKE_BLD_DIR = cmake.bldnode.abspath()
tsk.env.CMAKE_TARGET = cmake_target
self.cmake_config_task = cmake.config_task(self)
tsk.set_run_after(self.cmake_config_task)
if cmake.last_build_task:
tsk.set_run_after(cmake.last_build_task)
cmake.last_build_task = tsk
return tsk
def _check_min_version(cfg):
cfg.start_msg('Checking cmake version')
cmd = cfg.env.get_flat('CMAKE'), '--version'
out = cfg.cmd_and_log(cmd, quiet=Context.BOTH)
m = re.search(r'\d+\.\d+(\.\d+(\.\d+)?)?', out)
if not m:
cfg.end_msg(
'unable to parse version, build is not guaranteed to succeed',
color='YELLOW',
)
else:
version = Utils.num2ver(m.group(0))
minver_str = cfg.env.get_flat('CMAKE_MIN_VERSION')
minver = Utils.num2ver(minver_str)
if version < minver:
cfg.fatal('cmake must be at least at version %s' % minver_str)
cfg.end_msg(m.group(0))
generators = dict(
default=[
(['ninja', 'ninja-build'], 'Ninja'),
(['make'], 'Unix Makefiles'),
],
win32=[
(['ninja', 'ninja-build'], 'Ninja'),
(['nmake'], 'NMake Makefiles'),
],
)
def configure(cfg):
cfg.find_program('cmake')
if cfg.env.CMAKE_MIN_VERSION:
_check_min_version(cfg)
l = generators.get(Utils.unversioned_sys_platform(), generators['default'])
for names, generator in l:
if cfg.find_program(names, mandatory=False):
cfg.env.CMAKE_GENERATOR_OPTION = '-G%s' % generator
break
else:
cfg.fatal("cmake: couldn't find a suitable CMake generator. " +
"The ones supported by this Waf tool for this platform are: %s" % ', '.join(g for _, g in l))
|
gpl-3.0
|
lavalamp-/ws-backend-community
|
lib/filesystem.py
|
1
|
16528
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import errno
import json
from uuid import uuid4
import shutil
import math
import logging
from subprocess import check_output, CalledProcessError
from .config import ConfigManager
config = ConfigManager.instance()
logger = logging.getLogger(__name__)
class FilesystemHelper(object):
"""
A class for containing all methods related to file system operations.
"""
# Class Members
# Instantiation
# Static Methods
@staticmethod
def calculate_hash_of_file(file_path):
"""
Calculate the SHA256 hash hex digest of the file at the specified path.
:param file_path: The path to the file that the SHA256 hex digest should be calculated for.
:return: The SHA256 hash hex digest of the file at the specified path.
"""
from .crypto import HashHelper
with open(file_path, "r") as f:
contents = f.read()
return HashHelper.sha256_digest(contents)
@staticmethod
def count_lines_in_file(file_path):
"""
Count the total number of lines in the file at file_path.
:param file_path: The path for the file to review.
:return: The number of lines found in the file.
"""
line_count = 0
with open(file_path, "r") as f:
for line in f:
line_count += 1
return line_count
@staticmethod
def create_directories(dir_path):
"""
Creates all of the currently non-existing directories found in dir_path.
:param dir_path: A path containing directories (or a single directory) to
create.
:return: None
"""
try:
os.makedirs(dir_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
@staticmethod
def delete_directory(dir_path):
"""
Deletes the directory specified at dir_path.
:param dir_path: The path to the directory to delete.
:return: None
"""
shutil.rmtree(dir_path)
@staticmethod
def delete_file(file_path):
"""
Deletes the file at the path denoted by file_path.
:param file_path: The path to the file that should be deleted.
:return: None
"""
try:
os.remove(file_path)
except OSError:
pass
@staticmethod
def does_directory_exist(dir_path):
"""
Checks to see whether a directory currently exists at dir_path. Note
that if dir_path points to a file that does exist, this method will
return False.
:param dir_path: The path of the directory to check for.
:return: True if a directory exists at dir_path, otherwise False.
"""
return os.path.isdir(dir_path)
@staticmethod
def does_file_exist(file_path):
"""
Checks to see whether a file currently exists at file_path. Note that
if file_path points to a directory that does exist, this method will
return False.
:param file_path: The path of the file to check for.
:return: True if a file exists at file_path, otherwise False.
"""
return os.path.isfile(file_path)
@staticmethod
def get_json_from_file(file_path, raise_error=True):
"""
Attempt to read the contents of the specified file as a JSON object. Raises
an error if the specified file does not contain valid JSON and raise_error
is True, and returns None if the file does not contain valid JSON and raise_error
is False.
:param file_path: The path to the file to read.
:return: A JSON object representing the file's contents if the file contains valid
JSON, or None.
"""
contents = FilesystemHelper.get_file_contents(file_path)
try:
return json.loads(contents)
except ValueError as e:
if raise_error:
raise e
else:
return None
@staticmethod
def get_file_contents(path=None, read_mode="r"):
"""
Get the contents of the file at the specified path.
:param path: The path of the file to retrieve the contents of.
:return: The contents of the file at the specified path.
"""
if FilesystemHelper.is_dir_path(path):
raise ValueError(
"FilesystemHelper.get_file_contents received a path argument fhat pointed to a directory. "
"Path was %s."
% (path,)
)
with open(path, read_mode) as f:
contents = f.read()
return contents
@staticmethod
def get_file_information(file_path):
"""
Get a number of data points about the given file.
:param file_path: The local file path to the file to process.
:return: A tuple containing (1) the file name, (2) a SHA-256 hash digest of the file's contents,
(3) the number of lines in the file, and (4) the size of the file in bytes.
"""
file_name = FilesystemHelper.get_file_name_from_path(file_path)
file_hash = FilesystemHelper.calculate_hash_of_file(file_path)
file_line_count = FilesystemHelper.count_lines_in_file(file_path)
file_size = FilesystemHelper.get_file_size(file_path)
return file_name, file_hash, file_line_count, file_size
@staticmethod
def get_file_name_from_path(path):
"""
Parses the file name from the given path and returns it.
:param path: The path to parse.
:return: The file name from the given path.
"""
if FilesystemHelper.is_dir_path(path):
raise ValueError(
"FilesystemHelper.get_file_name_from_path received a path argument that pointed to a "
"directory. Path was %s."
% (path,)
)
return os.path.basename(path)
@staticmethod
def get_file_size(file_path):
"""
Get the size in bytes of the file at file_path.
:param file_path: The path on disk to the file in question.
:return: The size in bytes of the referenced file.
"""
return os.path.getsize(file_path)
@staticmethod
def get_files_with_extension_from_directory(start_dir=".", extension=None):
"""
Recursively walk the directories found within start_dir to find all files that have the specified
extension and return a list of file paths to the discovered files.
:param start_dir: Where to start walking directories from.
:param extension: The extension to look for (ex: .py).
:return: A list containing file paths pointing to all files sharing the specified extension as found
recursively from start_dir.
"""
to_return = []
for root, dirs, files in os.walk(start_dir):
match_files = filter(lambda x: x.endswith(extension), files)
to_return.extend([os.path.join(root, match_file) for match_file in match_files])
return to_return
@staticmethod
def get_lines_from_file(file_path=None, strip=True):
"""
Get all of the lines in the file specified by file_path in an array.
:param file_path: The path to the file to read.
:param strip: Whether or not to aggressively strip whitespace from the file's contents.
:return: An array of strings representing the lines in the specified file.
"""
with open(file_path, "r") as f:
contents = f.read()
if strip:
return [x.strip() for x in contents.strip().split("\n")]
else:
return contents.split("\n")
@staticmethod
def get_parent_directory_name(file_path):
"""
Get the name of the parent directory found in the given path.
:param file_path: The path to parse.
:return: The name of the parent directory found in the given path.
"""
return os.path.dirname(file_path).split(os.path.sep)[-1]
@staticmethod
def get_temporary_directory_path(path_base=None):
"""
Returns a directory path that can be used to create a temporary directory.
Note that the caller is responsible for deleting this directory when done
using it.
:param path_base: The base of the path to use for the temporary directory path.
If this is None, config.temporary_file_dir will be used in its place.
:return: A directory path that can be used to create a temporary directory.
"""
if path_base:
return "".join([path_base, str(uuid4()), os.pathsep])
else:
return "".join([config.fs_temporary_file_dir, str(uuid4()), os.pathsep])
@staticmethod
def get_temporary_file_path(path_base=None, file_ext=None):
"""
Returns a file path that can be used to create a temporary file. Note
that the caller is responsible for deleting this file when done using it.
:param path_base: The base of the path to use for the temporary file path.
If this is None, config.temporary_file_dir will be used in its place.
:param file_ext: The file extension to place at the end of the created
path
:return: A file path that can be used to create a temporary file.
"""
if file_ext is not None and not file_ext.startswith("."):
file_ext = ".%s" % (file_ext,)
else:
file_ext = ""
if path_base:
return "".join([path_base, str(uuid4()), file_ext])
else:
return "".join([config.fs_temporary_file_dir, str(uuid4()), file_ext])
@staticmethod
def is_dir_path(path):
"""
Check to see if the given path specifies a directory.
:param path: The path to parse.
:return: True if the path specifies a directory, False otherwise.
"""
return os.path.isdir(path)
@staticmethod
def is_file_path(path):
"""
Check to see if the given path specifies a file.
:param path: The path to parse.
:return: True if the path specifies a file, False otherwise.
"""
return os.path.isfile(path)
@staticmethod
def move_file(from_path=None, to_path=None):
"""
Move the file found at the specified from_path to to_path.
:param from_path: The file path where the file currently resides.
:param to_path: The path where the file should be moved to.
:return: None
"""
os.rename(from_path, to_path)
@staticmethod
def split_file(file_path=None, output_file_name=None, chunk_count=None):
"""
Split the file pointed to by file_path into chunk_count number of files, and name
these new files based on output_file_name.
:param file_path: The local file path to the file to split up.
:param output_file_name: The file name base to write resulting files to.
:param chunk_count: The number of chunks to split the file into.
:return: None
"""
contents = FilesystemHelper.get_file_contents(path=file_path, read_mode="rb")
content_length = len(contents)
chunk_size = int(math.ceil(content_length / float(chunk_count)))
logger.debug(
"Now splitting file at %s into %s chunks (%s bytes each) and writing to file with name %s."
% (file_path, chunk_count, chunk_size, output_file_name)
)
start_offset = 0
end_offset = chunk_size + 1
for i in range(chunk_count):
file_name = "%s.%s" % (output_file_name, i)
logger.debug(
"Writing first chunk of length %s to %s."
% (file_name, chunk_size)
)
FilesystemHelper.write_to_file(
file_path=file_name,
data=contents[start_offset:end_offset],
write_mode="wb+",
)
start_offset += chunk_size
end_offset += chunk_size
logger.debug("File split successfully.")
@staticmethod
def touch(path):
"""
Emulates the Linux 'touch' utility - creates a file if it
does not exist.
:param path: The path of the file to create.
:return: None
"""
# TODO handle exceptions
if os.path.exists(path):
os.utime(path, None)
else:
open(path, "a").close()
@staticmethod
def write_to_file(file_path=None, data=None, write_mode="w+"):
"""
Write the contents of data to the file at file_path using the specified write mode.
:param file_path: The file path where the data should be written to.
:param data: The data to write.
:param write_mode: The mode that the data should be written to the file.
:return: None
"""
with open(file_path, write_mode) as f:
f.write(data)
# Class Methods
# Public Methods
# Protected Methods
# Private Methods
# Properties
# Representation and Comparison
class FileHelper(object):
"""
This class contains helper methods for retrieving the contents of files associated with the
Web Sight platform.
"""
@staticmethod
def get_default_scan_configs():
"""
Get a list of JSON dictionaries representing the default ScanConfig objects to populate in the
database.
:return: A list of JSON dictionaries representing the default ScanConfig objects to populate in the
database.
"""
contents = FilesystemHelper.get_file_contents(path=config.files_default_scan_config_path)
return json.loads(contents)
@staticmethod
def get_dns_record_types():
"""
Get a list of tuples containing (1) the DNS record type, (2) whether or not to collect data
about the DNS record type by default and (3) whether or not to scan IP addresses associated with the
the record type from the default DNS record types file.
:return: A list of tuples containing (1) the DNS record type, (2) whether or not to collect data
about the DNS record type by default and (3) whether or not to scan IP addresses associated with the
the record type from the default DNS record types file.
"""
contents = FilesystemHelper.get_file_contents(path=config.files_dns_record_types_path)
contents = [x.strip() for x in contents.strip().split("\n")]
to_return = []
for line in contents:
line_split = [x.strip() for x in line.split(",")]
to_return.append((
line_split[0],
line_split[1].lower() == "true",
line_split[2].lower() == "true",
))
return to_return
@staticmethod
def get_scan_ports_and_protocols():
"""
Get a list of tuples containing (1) the port number and (2) the protocol for all of the
ports that are scanned by default.
:return: A list of tuples containing (1) the port number and (2) the protocol for all of the
ports that are scanned by default.
"""
contents = FilesystemHelper.get_file_contents(path=config.files_default_scan_ports_path)
contents = [x.strip() for x in contents.strip().split("\n")]
ports = []
for line in contents:
line_split = [x.strip() for x in line.split(",")]
ports.append((int(line_split[0]), line_split[1]))
return ports
class PathHelper(object):
"""
This class contains helper methods for interacting with the current PATH environment
variable.
"""
# Class Members
# Instantiation
# Static Methods
@staticmethod
def is_executable_in_path(to_check):
"""
Check to see if the specified executable is found in the current environment's
PATH environment variable.
:param to_check: The executable to search for.
:return: True of the referenced executable is found, False otherwise.
"""
try:
result = check_output(["which", to_check]).strip()
return bool(result)
except CalledProcessError:
return False
# Class Methods
# Public Methods
# Protected Methods
# Private Methods
# Properties
# Representation and Comparison
|
gpl-3.0
|
mefryar/epodx-dashboards
|
code/update_dashboard.py
|
1
|
9234
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Program: Update Dashboard
Programmer: Michael Fryar, Research Fellow, EPoD
Date created: January 5, 2017
Purpose: Establish SSH tunnel to edX Analytics API, download learner
data, and write data to Google Sheets via Sheets API.
First time: Must run get_credentials.py first
"""
# Standard library imports
import csv # For reading data in comma separated value format
import os # For manipulating paths and changing directory
import subprocess # For spawning ssh tunnel
import time # For calculating run time
# Third-party imports
import httplib2 # "A comprehensive HTTP client library"
import requests # "HTTP for Humans"
from apiclient import discovery # For acessing Google Sheets API
# User-written imports
import secrets # Token for edX Analytics API authentication
# For getting OAuth2 credentials to interact with Google Sheets API
from get_credentials import get_credentials
# Start timer
START_TIME = time.time()
# Get token for edX Analytics API authentication
HKS_SECRET_TOKEN = secrets.HKS_SECRET_TOKEN
def ssh():
"""SSH tunnel to EPoDX API"""
# Change to directory containing configuration files.
home_dir = os.path.expanduser('~')
epodx_dir = os.path.join(home_dir, 'epodx')
os.chdir(epodx_dir)
# Establish SHH tunnel in background that auto-closes.
# -f "fork into background"
# -F "use configuration file"
# -o ExistOnForwardFailure=yes "wait until connection and port
# forwardings are set up before placing in background"
# sleep 10 "give Python script 10 seconds to start using tunnel and
# close tunnel after python script stops using it"
# Ref 1: https://www.g-loaded.eu/2006/11/24/auto-closing-ssh-tunnels/
# Ref 2: https://gist.github.com/scy/6781836
config = "-F ./ssh-config epodx-analytics-api"
option = "-o ExitOnForwardFailure=yes"
command = "ssh -f {} {} sleep 10".format(config, option)
subprocess.run(command, shell=True)
def write_to_g_sheet(course, partner, data_selection='both'):
"""Downloads learner data from EPoDx and writes to Google Sheets.
edX stores identifiable information about learners separately from
problem response data, which is identifiable by user_id only. This
function downloads learner data and problem response data via the
edX Analytics API and then writes this data to a Google Sheet via
the Sheets API.
Args:
course (str): Three letter course code. Known values are:
AGG - Aggregating Evidence
COM - Commissioning Evidence
CBA - Cost-Benefit Analysis
DES - Descriptive Evidence
IMP - Impact Evaluations
SYS - Systematic Approaches to Policy Decisions
partner (str): Separate dashboards are required for each partner
because of the static nature of the dashboards means that a single
dashboard cannot be used by two different partners simultaneously.
Known values are:
HKS - Write to master sheet reserved for HKS trainings
LBSNAA - Write to master sheet reserved for LBSNAA trainings
NSPP1 - Write to 1st master sheet reserved for NSPP trainings
NSPP2 - Write to 2nd master sheet reserved for NSPP trainings
data_selection (str): Specifies whether to download and write only
learner profiles, only problem responses or both. Known values are:
both - Download and write both learner profiles & problem responses
problems - Only download problem responses
profiles - Only download learner profiles
"""
if course == "DTA":
course_id = "course-v1:epodx+BCURE-{}+2018_v1".format(course)
else:
course_id = "course-v1:epodx+BCURE-{}+2016_v1".format(course)
spreadsheetId = secrets.PARTNER_SHEET_KEYS["{}_{}".format(course, partner)]
if data_selection == "both":
message_to_print = ("Downloading and writing {} learner profiles and "
"problem responses.".format(course)
)
print(message_to_print)
if data_selection in ("both", "profiles"):
# Define parameters for extracting learner profile data.
learner_profile_report_url = "http://localhost:18100/api/v0/learners/"
headers = {
"Authorization": "Token {}".format(HKS_SECRET_TOKEN),
"Accept": "text/csv",
}
# The list of fields you've requested.
# Leave this parameter off to see the full list of fields.
fields = ','.join(["user_id", "username", "name", "email", "language",
"location", "year_of_birth", "gender",
"level_of_education", "mailing_address", "goals",
"enrollment_mode", "segments", "cohort", "city",
"country", "enrollment_date", "last_updated"])
params = {
"course_id": course_id,
"fields": fields,
}
# Download learner data.
with requests.Session() as s:
download = s.get(
learner_profile_report_url, headers=headers, params=params)
# Decode learner data.
decoded_content = download.content.decode('ascii', 'ignore')
# Extract data from CSV into list.
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
learner_profiles = list(cr)
# TODO: Explore deleting all but specified cohort. Be sure to plan.
elif data_selection == "problems":
message_to_print = ("Downloading and writing {} problem responses "
"only.".format(course)
)
print(message_to_print)
if data_selection in ("both", "problems"):
# Define parameters for extracting problem response data.
problem_api_url = ("http://localhost:18100/api/v0/courses/"
"{}/reports/problem_response".format(course_id))
headers = {"Authorization": "Token {}".format(HKS_SECRET_TOKEN)}
problem_data = requests.get(problem_api_url, headers=headers).json()
problem_download_url = problem_data['download_url']
# Download the CSV from download_url.
with requests.Session() as s:
download = s.get(problem_download_url)
# Decode problem response data.
decoded_content = download.content.decode('ascii', 'ignore')
# Extract data from CSV into list.
cr = csv.reader(decoded_content.splitlines(), delimiter=',')
problem_responses = list(cr)
# TODO: Explore deleting all responses older than 31 days
elif data_selection == "profiles":
message_to_print = ("Downloading and writing {} learner profiles "
"only.".format(course)
)
print(message_to_print)
# This section builds on Google quickstart template.
# https://developers.google.com/sheets/api/quickstart/python
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
if data_selection in ("both", "profiles"):
learners_range = 'student_profile_info'
if data_selection in ("both", "problems"):
problem_range = 'problem_responses'
if data_selection == "both":
data = [
{
'range': learners_range,
'values': learner_profiles
},
{
'range': problem_range,
'values': problem_responses
}
]
elif data_selection == "profiles":
data = [
{
'range': learners_range,
'values': learner_profiles
}
]
elif data_selection == "problems":
data = [
{
'range': problem_range,
'values': problem_responses
}
]
body = {'valueInputOption': 'RAW', 'data': data}
result = service.spreadsheets().values().batchUpdate(
spreadsheetId=spreadsheetId, body=body).execute()
def tunnel_and_write_to_g_sheet(dashboard):
"""Establish SSH tunnel, download data, and write to Google Sheet"""
ssh()
course = dashboard[0]
partner = dashboard[1]
if "profiles" in dashboard:
data_selection = "profiles"
elif "problems" in dashboard:
data_selection = "problems"
else:
data_selection = "both"
write_to_g_sheet(course, partner, data_selection)
print("Upload {} to {} {} master sheet complete".format(
data_selection, course, partner))
if __name__ == '__main__':
dashboards = [
["IMP", "LBSNAA"],
]
for dashboard in dashboards:
tunnel_and_write_to_g_sheet(dashboard)
TOTAL_TIME = round((time.time() - START_TIME), 2)
print("Total run time: {} seconds".format(TOTAL_TIME))
|
mit
|
OpenWinCon/OpenWinNet
|
web-gui/myvenv/lib/python3.4/site-packages/rest_framework/decorators.py
|
20
|
4560
|
"""
The most important decorator in this module is `@api_view`, which is used
for writing function-based views with REST framework.
There are also various decorators for setting the API policies on function
based views, as well as the `@detail_route` and `@list_route` decorators, which are
used to annotate methods on viewsets that should be included by routers.
"""
from __future__ import unicode_literals
import types
from django.utils import six
from rest_framework.views import APIView
def api_view(http_method_names=None, exclude_from_schema=False):
"""
Decorator that converts a function-based view into an APIView subclass.
Takes a list of allowed methods for the view as an argument.
"""
http_method_names = ['GET'] if (http_method_names is None) else http_method_names
def decorator(func):
WrappedAPIView = type(
six.PY3 and 'WrappedAPIView' or b'WrappedAPIView',
(APIView,),
{'__doc__': func.__doc__}
)
# Note, the above allows us to set the docstring.
# It is the equivalent of:
#
# class WrappedAPIView(APIView):
# pass
# WrappedAPIView.__doc__ = func.doc <--- Not possible to do this
# api_view applied without (method_names)
assert not(isinstance(http_method_names, types.FunctionType)), \
'@api_view missing list of allowed HTTP methods'
# api_view applied with eg. string instead of list of strings
assert isinstance(http_method_names, (list, tuple)), \
'@api_view expected a list of strings, received %s' % type(http_method_names).__name__
allowed_methods = set(http_method_names) | set(('options',))
WrappedAPIView.http_method_names = [method.lower() for method in allowed_methods]
def handler(self, *args, **kwargs):
return func(*args, **kwargs)
for method in http_method_names:
setattr(WrappedAPIView, method.lower(), handler)
WrappedAPIView.__name__ = func.__name__
WrappedAPIView.__module__ = func.__module__
WrappedAPIView.renderer_classes = getattr(func, 'renderer_classes',
APIView.renderer_classes)
WrappedAPIView.parser_classes = getattr(func, 'parser_classes',
APIView.parser_classes)
WrappedAPIView.authentication_classes = getattr(func, 'authentication_classes',
APIView.authentication_classes)
WrappedAPIView.throttle_classes = getattr(func, 'throttle_classes',
APIView.throttle_classes)
WrappedAPIView.permission_classes = getattr(func, 'permission_classes',
APIView.permission_classes)
WrappedAPIView.exclude_from_schema = exclude_from_schema
return WrappedAPIView.as_view()
return decorator
def renderer_classes(renderer_classes):
def decorator(func):
func.renderer_classes = renderer_classes
return func
return decorator
def parser_classes(parser_classes):
def decorator(func):
func.parser_classes = parser_classes
return func
return decorator
def authentication_classes(authentication_classes):
def decorator(func):
func.authentication_classes = authentication_classes
return func
return decorator
def throttle_classes(throttle_classes):
def decorator(func):
func.throttle_classes = throttle_classes
return func
return decorator
def permission_classes(permission_classes):
def decorator(func):
func.permission_classes = permission_classes
return func
return decorator
def detail_route(methods=None, **kwargs):
"""
Used to mark a method on a ViewSet that should be routed for detail requests.
"""
methods = ['get'] if (methods is None) else methods
def decorator(func):
func.bind_to_methods = methods
func.detail = True
func.kwargs = kwargs
return func
return decorator
def list_route(methods=None, **kwargs):
"""
Used to mark a method on a ViewSet that should be routed for list requests.
"""
methods = ['get'] if (methods is None) else methods
def decorator(func):
func.bind_to_methods = methods
func.detail = False
func.kwargs = kwargs
return func
return decorator
|
apache-2.0
|
Jumpscale/jumpscale_core8
|
lib/JumpScale/baselib/atyourservice81/tests/unit__test.py
|
1
|
2343
|
import unittest
from JumpScale import j
class AYSKey(unittest.TestCase):
def setUp(self):
self.test_table = [
{
'key': 'domain|name!instance@role',
'expect': {
'domain': 'domain',
'name': 'name',
'instance': 'instance',
'role': 'role'
}
},
{
'key': 'domain|name',
'expect': {
'domain': 'domain',
'name': 'name',
'instance': '',
'role': 'name'
}
},
{
'key': 'domain|role.name',
'expect': {
'domain': 'domain',
'name': 'role.name',
'instance': '',
'role': 'role'
}
},
{
'key': 'role!instance',
'expect': {
'domain': '',
'name': '',
'instance': 'instance',
'role': 'role'
}
},
{
'key': 'role.name',
'expect': {
'domain': '',
'name': 'role.name',
'instance': '',
'role': 'role'
}
}
]
def test_parse(self):
for test in self.test_table:
domain, name, instance, role = j.atyourservice._parseKey(test[
'key'])
self.assertEqual(domain, test['expect'][
'domain'], "domain should be %s, found %s" % (test['expect']['domain'], domain))
self.assertEqual(name, test['expect']['name'], "name should be %s, found %s" % (
test['expect']['name'], name))
self.assertEqual(instance, test['expect'][
'instance'], "instance should be %s, found %s" % (test['expect']['instance'], instance))
self.assertEqual(role, test['expect']['role'], "role should be %s, found %s" % (
test['expect']['role'], role))
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
ncos/lisa
|
src/lisa_drive/scripts/venv/lib/python3.5/site-packages/pip-10.0.1-py3.5.egg/pip/_vendor/colorama/ansitowin32.py
|
5
|
9904
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import re
import sys
import os
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll, winapi_test
winterm = None
if windll is not None:
winterm = WinTerm()
def is_stream_closed(stream):
return not hasattr(stream, 'closed') or stream.closed
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer
ANSI_OSC_RE = re.compile('\001?\033\\]((?:.|;)*?)(\x07)\002?') # Operating System Command
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = os.name == 'nt'
# We test if the WinAPI works, because even if we are on Windows
# we may be using a terminal that doesn't support the WinAPI
# (e.g. Cygwin Terminal). In this case it's up to the terminal
# to support the ANSI codes.
conversion_supported = on_windows and winapi_test()
# should we strip ANSI sequences from our output?
if strip is None:
strip = conversion_supported or (not is_stream_closed(wrapped) and not is_a_tty(wrapped))
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = conversion_supported and not is_stream_closed(wrapped) and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
}
return dict()
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif not self.strip and not is_stream_closed(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
text = self.convert_osc(text)
for match in self.ANSI_CSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(command, paramstring)
self.call_win32(command, params)
def extract_params(self, command, paramstring):
if command in 'Hf':
params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
while len(params) < 2:
# defaults:
params = params + (1,)
else:
params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
if len(params) == 0:
# defaults:
if command in 'JKm':
params = (0,)
elif command in 'ABCD':
params = (1,)
return params
def call_win32(self, command, params):
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in 'J':
winterm.erase_screen(params[0], on_stderr=self.on_stderr)
elif command in 'K':
winterm.erase_line(params[0], on_stderr=self.on_stderr)
elif command in 'Hf': # cursor position - absolute
winterm.set_cursor_position(params, on_stderr=self.on_stderr)
elif command in 'ABCD': # cursor position - relative
n = params[0]
# A - up, B - down, C - forward, D - back
x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
def convert_osc(self, text):
for match in self.ANSI_OSC_RE.finditer(text):
start, end = match.span()
text = text[:start] + text[end:]
paramstring, command = match.groups()
if command in '\x07': # \x07 = BEL
params = paramstring.split(";")
# 0 - change title and icon (we will only change title)
# 1 - change icon (we don't support this)
# 2 - change title
if params[0] in '02':
winterm.set_title(params[1])
return text
|
mit
|
duguyue100/telaugesa
|
scripts/mnist_convnet_test.py
|
1
|
3733
|
"""ConvNet MNIST teset"""
import numpy as np;
import theano;
import theano.tensor as T;
import telaugesa.datasets as ds;
from telaugesa.fflayers import ReLULayer;
from telaugesa.fflayers import SoftmaxLayer;
from telaugesa.convnet import ReLUConvLayer;
from telaugesa.convnet import MaxPooling;
from telaugesa.convnet import Flattener;
from telaugesa.model import FeedForward;
from telaugesa.optimize import gd_updates;
from telaugesa.optimize import multi_dropout;
from telaugesa.cost import categorical_cross_entropy_cost;
from telaugesa.cost import L2_regularization;
n_epochs=100;
batch_size=100;
datasets=ds.load_mnist("../data/mnist.pkl.gz");
train_set_x, train_set_y = datasets[0];
valid_set_x, valid_set_y = datasets[1];
test_set_x, test_set_y = datasets[2];
n_train_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size;
n_valid_batches=valid_set_x.get_value(borrow=True).shape[0]/batch_size;
n_test_batches=test_set_x.get_value(borrow=True).shape[0]/batch_size;
print "[MESSAGE] The data is loaded"
X=T.matrix("data");
y=T.ivector("label");
idx=T.lscalar();
images=X.reshape((batch_size, 1, 28, 28))
layer_0=ReLUConvLayer(filter_size=(7,7),
num_filters=50,
num_channels=1,
fm_size=(28,28),
batch_size=batch_size);
pool_0=MaxPooling(pool_size=(2,2));
layer_1=ReLUConvLayer(filter_size=(4,4),
num_filters=20,
num_channels=50,
fm_size=(11,11),
batch_size=batch_size);
pool_1=MaxPooling(pool_size=(2,2));
flattener=Flattener();
layer_2=ReLULayer(in_dim=320,
out_dim=200);
layer_3=SoftmaxLayer(in_dim=200,
out_dim=10);
#dropout=multi_dropout([(batch_size, 1, 28, 28), None, (batch_size, 50, 11, 11), None, None, None, None], prob=0.5);
dropout=multi_dropout([(batch_size, 1, 28, 28), None, (batch_size, 50, 11, 11), None, None, None, None], prob=0.5);
model=FeedForward(layers=[layer_0, pool_0, layer_1, pool_1, flattener, layer_2, layer_3],
dropout=dropout);
out=model.fprop(images);
cost=categorical_cross_entropy_cost(out[-1], y)+L2_regularization(model.params, 0.01);
updates=gd_updates(cost=cost, params=model.params, method="sgd", learning_rate=0.1);
train=theano.function(inputs=[idx],
outputs=cost,
updates=updates,
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size],
y: train_set_y[idx * batch_size: (idx + 1) * batch_size]});
test=theano.function(inputs=[idx],
outputs=model.layers[-1].error(out[-1], y),
givens={X: test_set_x[idx * batch_size: (idx + 1) * batch_size],
y: test_set_y[idx * batch_size: (idx + 1) * batch_size]});
print "[MESSAGE] The model is built"
test_record=np.zeros((n_epochs, 1));
epoch = 0;
while (epoch < n_epochs):
epoch+=1;
for minibatch_index in xrange(n_train_batches):
mlp_minibatch_avg_cost = train(minibatch_index);
iteration = (epoch - 1) * n_train_batches + minibatch_index;
if (iteration + 1) % n_train_batches == 0:
print 'MLP MODEL';
test_losses = [test(i) for i in xrange(n_test_batches)];
test_record[epoch-1] = np.mean(test_losses);
print((' epoch %i, minibatch %i/%i, test error %f %%') %
(epoch, minibatch_index + 1, n_train_batches, test_record[epoch-1] * 100.));
|
mit
|
bregman-arie/ansible
|
test/units/module_utils/basic/test_run_command.py
|
86
|
7863
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import errno
from itertools import product
from io import BytesIO
import pytest
from ansible.module_utils._text import to_native
class OpenBytesIO(BytesIO):
"""BytesIO with dummy close() method
So that you can inspect the content after close() was called.
"""
def close(self):
pass
@pytest.fixture
def mock_os(mocker):
def mock_os_read(fd, nbytes):
return os._cmd_out[fd].read(nbytes)
def mock_os_chdir(path):
if path == '/inaccessible':
raise OSError(errno.EPERM, "Permission denied: '/inaccessible'")
def mock_os_abspath(path):
if path.startswith('/'):
return path
else:
return os.getcwd.return_value + '/' + path
os = mocker.patch('ansible.module_utils.basic.os')
os._cmd_out = {
# os.read() is returning 'bytes', not strings
mocker.sentinel.stdout: BytesIO(),
mocker.sentinel.stderr: BytesIO(),
}
os.path.expandvars.side_effect = lambda x: x
os.path.expanduser.side_effect = lambda x: x
os.environ = {'PATH': '/bin'}
os.getcwd.return_value = '/home/foo'
os.path.isdir.return_value = True
os.chdir.side_effect = mock_os_chdir
os.read.side_effect = mock_os_read
os.path.abspath.side_effect = mock_os_abspath
yield os
@pytest.fixture
def mock_subprocess(mocker):
def mock_select(rlist, wlist, xlist, timeout=1):
return (rlist, [], [])
fake_select = mocker.patch('ansible.module_utils.basic.select')
fake_select.select.side_effect = mock_select
subprocess = mocker.patch('ansible.module_utils.basic.subprocess')
cmd = mocker.MagicMock()
cmd.returncode = 0
cmd.stdin = OpenBytesIO()
cmd.stdout.fileno.return_value = mocker.sentinel.stdout
cmd.stderr.fileno.return_value = mocker.sentinel.stderr
subprocess.Popen.return_value = cmd
yield subprocess
@pytest.fixture()
def rc_am(mocker, am, mock_os, mock_subprocess):
am.fail_json = mocker.MagicMock(side_effect=SystemExit)
am._os = mock_os
am._subprocess = mock_subprocess
yield am
class TestRunCommandArgs:
# Format is command as passed to run_command, command to Popen as list, command to Popen as string
ARGS_DATA = (
(['/bin/ls', 'a', 'b', 'c'], ['/bin/ls', 'a', 'b', 'c'], '/bin/ls a b c'),
('/bin/ls a " b" "c "', ['/bin/ls', 'a', ' b', 'c '], '/bin/ls a " b" "c "'),
)
# pylint bug: https://github.com/PyCQA/pylint/issues/511
# pylint: disable=undefined-variable
@pytest.mark.parametrize('cmd, expected, shell, stdin',
((arg, cmd_str if sh else cmd_lst, sh, {})
for (arg, cmd_lst, cmd_str), sh in product(ARGS_DATA, (True, False))),
indirect=['stdin'])
def test_args(self, cmd, expected, shell, rc_am):
rc_am.run_command(cmd, use_unsafe_shell=shell)
assert rc_am._subprocess.Popen.called
args, kwargs = rc_am._subprocess.Popen.call_args
assert args == (expected, )
assert kwargs['shell'] == shell
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_tuple_as_args(self, rc_am):
with pytest.raises(SystemExit):
rc_am.run_command(('ls', '/'))
assert rc_am.fail_json.called
class TestRunCommandCwd:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd(self, mocker, rc_am):
rc_am._os.getcwd.return_value = '/old'
rc_am.run_command('/bin/ls', cwd='/new')
assert rc_am._os.chdir.mock_calls == [mocker.call('/new'), mocker.call('/old'), ]
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd_relative_path(self, mocker, rc_am):
rc_am._os.getcwd.return_value = '/old'
rc_am.run_command('/bin/ls', cwd='sub-dir')
assert rc_am._os.chdir.mock_calls == [mocker.call('/old/sub-dir'), mocker.call('/old'), ]
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd_not_a_dir(self, mocker, rc_am):
rc_am._os.getcwd.return_value = '/old'
rc_am._os.path.isdir.side_effect = lambda d: d != '/not-a-dir'
rc_am.run_command('/bin/ls', cwd='/not-a-dir')
assert rc_am._os.chdir.mock_calls == [mocker.call('/old'), ]
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd_inaccessible(self, rc_am):
with pytest.raises(SystemExit):
rc_am.run_command('/bin/ls', cwd='/inaccessible')
assert rc_am.fail_json.called
args, kwargs = rc_am.fail_json.call_args
assert kwargs['rc'] == errno.EPERM
class TestRunCommandPrompt:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_prompt_bad_regex(self, rc_am):
with pytest.raises(SystemExit):
rc_am.run_command('foo', prompt_regex='[pP)assword:')
assert rc_am.fail_json.called
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_prompt_no_match(self, mocker, rc_am):
rc_am._os._cmd_out[mocker.sentinel.stdout] = BytesIO(b'hello')
(rc, _, _) = rc_am.run_command('foo', prompt_regex='[pP]assword:')
assert rc == 0
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_prompt_match_wo_data(self, mocker, rc_am):
rc_am._os._cmd_out[mocker.sentinel.stdout] = BytesIO(b'Authentication required!\nEnter password: ')
(rc, _, _) = rc_am.run_command('foo', prompt_regex=r'[pP]assword:', data=None)
assert rc == 257
class TestRunCommandRc:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_check_rc_false(self, rc_am):
rc_am._subprocess.Popen.return_value.returncode = 1
(rc, _, _) = rc_am.run_command('/bin/false', check_rc=False)
assert rc == 1
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_check_rc_true(self, rc_am):
rc_am._subprocess.Popen.return_value.returncode = 1
with pytest.raises(SystemExit):
rc_am.run_command('/bin/false', check_rc=True)
assert rc_am.fail_json.called
args, kwargs = rc_am.fail_json.call_args
assert kwargs['rc'] == 1
class TestRunCommandOutput:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_text_stdin(self, rc_am):
(rc, stdout, stderr) = rc_am.run_command('/bin/foo', data='hello world')
assert rc_am._subprocess.Popen.return_value.stdin.getvalue() == b'hello world\n'
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_ascii_stdout(self, mocker, rc_am):
rc_am._os._cmd_out[mocker.sentinel.stdout] = BytesIO(b'hello')
(rc, stdout, stderr) = rc_am.run_command('/bin/cat hello.txt')
assert rc == 0
# module_utils function. On py3 it returns text and py2 it returns
# bytes because it's returning native strings
assert stdout == 'hello'
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_utf8_output(self, mocker, rc_am):
rc_am._os._cmd_out[mocker.sentinel.stdout] = BytesIO(u'Žarn§'.encode('utf-8'))
rc_am._os._cmd_out[mocker.sentinel.stderr] = BytesIO(u'لرئيسية'.encode('utf-8'))
(rc, stdout, stderr) = rc_am.run_command('/bin/something_ugly')
assert rc == 0
# module_utils function. On py3 it returns text and py2 it returns
# bytes because it's returning native strings
assert stdout == to_native(u'Žarn§')
assert stderr == to_native(u'لرئيسية')
|
gpl-3.0
|
PsycheDox/CalCoin
|
contrib/spendfrom/spendfrom.py
|
792
|
10053
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19332 if testnet else 9332
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
mit
|
vanant/googleads-dfa-reporting-samples
|
python/v2.0/download_floodlight_tag.py
|
1
|
2053
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example downloads activity tags for a given floodlight activity.
Tags: floodlightActivities.generatetag
"""
__author__ = ('[email protected] (Jonathon Imperiosi)')
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to download tags for')
argparser.add_argument(
'activity_id', type=int,
help='The ID of the floodlight activity to download tags for')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.0', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
activity_id = flags.activity_id
try:
# Construct the request.
request = service.floodlightActivities().generatetag(
profileId=profile_id, floodlightActivityId=activity_id)
# Execute request and print response.
response = request.execute()
print response['floodlightActivityTag']
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
|
apache-2.0
|
yongtang/tensorflow
|
tensorflow/python/kernel_tests/segment_reduction_ops_test.py
|
6
|
43677
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for segment reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class SegmentReductionHelper(test.TestCase):
def _input(self, input_shape, dtype=dtypes_lib.int32):
num_elem = 1
for x in input_shape:
num_elem *= x
values = np.arange(1, num_elem + 1)
np_values = values.reshape(input_shape).astype(dtype.as_numpy_dtype)
# Add a non-zero imaginary component to complex types.
if dtype.is_complex:
np_values -= 1j * np_values
return constant_op.constant(
np_values, shape=input_shape, dtype=dtype), np_values
def _segmentReduce(self, indices, x, op1, op2=None, num_segments=None,
initial_value=0):
if not x.size:
return np.array([])
indices = np.asarray(indices)
if num_segments is None:
num_segments = indices[-1] + 1
output = [None] * num_segments
slice_shape = x.shape[indices.ndim:]
x_flat = x.reshape((indices.size,) + slice_shape)
for i, index in enumerate(indices.ravel()):
if (output[index] is not None) and op1 == np.max:
for j in range(0, output[index].shape[0]):
output[index][j] = op1([output[index][j], x_flat[i][j]])
elif output[index] is not None:
output[index] = op1(output[index], x_flat[i])
else:
output[index] = x_flat[i]
# zero initialize values that are still uncalculated.
initial_value_slice = np.ones(slice_shape) * initial_value
output = [o if o is not None else initial_value_slice for o in output]
if op2 is not None:
output = [op2(o) for o in output]
output = [o.reshape(slice_shape) for o in output]
return np.array(output)
def _mean_cum_op(self, x, y):
return (x[0] + y, x[1] + 1) if isinstance(x, tuple) else (x + y, 2)
def _mean_reduce_op(self, x):
return x[0] / x[1] if isinstance(x, tuple) else x
def _sqrt_n_reduce_op(self, x):
return x[0] / np.sqrt(x[1]) if isinstance(x, tuple) else x
class SegmentReductionOpTest(SegmentReductionHelper):
def testValues(self):
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
dtypes_lib.int32, dtypes_lib.complex64, dtypes_lib.complex128
]
# Each item is np_op1, np_op2, tf_op
ops_list = [(np.add, None, math_ops.segment_sum),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.segment_mean),
(np.ndarray.__mul__, None, math_ops.segment_prod),
(np.minimum, None, math_ops.segment_min),
(np.maximum, None, math_ops.segment_max)]
# A subset of ops has been enabled for complex numbers
complex_ops_list = [(np.add, None, math_ops.segment_sum),
(np.ndarray.__mul__, None, math_ops.segment_prod),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.segment_mean)]
n = 10
shape = [n, 2]
indices = [i // 3 for i in range(n)]
for dtype in dtypes:
if dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
curr_ops_list = complex_ops_list
else:
curr_ops_list = ops_list
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtype)
for np_op1, np_op2, tf_op in curr_ops_list:
np_ans = self._segmentReduce(indices, np_x, np_op1, np_op2)
s = tf_op(data=tf_x, segment_ids=indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
# NOTE(mrry): The static shape inference that computes
# `tf_ans.shape` can only infer that sizes from dimension 1
# onwards, because the size of dimension 0 is data-dependent
# and may therefore vary dynamically.
self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
@test_util.run_deprecated_v1
def testSegmentIdsShape(self):
shape = [4, 4]
tf_x, _ = self._input(shape)
indices = constant_op.constant([0, 1, 2, 2], shape=[2, 2])
with self.assertRaises(ValueError):
math_ops.segment_sum(data=tf_x, segment_ids=indices)
@test_util.run_deprecated_v1
def testSegmentIdsSize(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape)
indices = [0, 1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment_ids should be the same size"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentIdsValid(self):
# This is a baseline for the following SegmentIdsInvalid* tests.
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, 1]
result = math_ops.segment_sum(data=tf_x, segment_ids=indices).eval()
self.assertAllEqual([[15, 18, 21, 24], [13, 14, 15, 16]], result)
def testSegmentIdsGreaterThanZero(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float32)
indices = [1, 1, 2, 2]
np_ans = self._segmentReduce(indices, np_x, np.add)
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
def testSegmentIdsHole(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 3, 3]
np_ans = self._segmentReduce(indices, np_x, np.add)
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
@test_util.run_deprecated_v1
def testSegmentIdsInvalid1(self):
shape = [4, 4]
with self.cached_session():
tf_x, _ = self._input(shape)
indices = [-1, -1, 0, 0]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError(
r"Segment id -1 out of range \[0, 1\), possibly because "
"'segment_ids' input is not sorted."):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentIdsInvalid2(self):
shape = [4, 4]
with self.cached_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 0, 1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids are not increasing"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentIdsInvalid3(self):
shape = [4, 4]
with self.cached_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 2, 0]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError(
r"Segment id 1 out of range \[0, 1\), possibly "
"because 'segment_ids' input is not sorted."):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentIdsInvalid4(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, -1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentIdsInvalid5(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, -2]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradient(self):
shape = [4, 4]
indices = [0, 1, 2, 2]
for tf_op in [
math_ops.segment_sum, math_ops.segment_mean, math_ops.segment_min,
math_ops.segment_max
]:
with self.cached_session():
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float64)
s = tf_op(data=tf_x, segment_ids=indices)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n)
def testDataInvalid(self):
# Test case for GitHub issue 40653.
for use_gpu in [True, False]:
with self.cached_session(use_gpu=use_gpu):
with self.assertRaisesRegex(
(ValueError, errors_impl.InvalidArgumentError),
"must be at least rank 1"):
s = math_ops.segment_mean(
data=np.uint16(10), segment_ids=np.array([]).astype("int64"))
self.evaluate(s)
class UnsortedSegmentTest(SegmentReductionHelper):
def __init__(self, methodName='runTest'):
# Each item is np_op1, np_op2, tf_op, initial_value functor
self.ops_list = [(np.add, None,
math_ops.unsorted_segment_sum, lambda t: 0),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.unsorted_segment_mean, lambda t: 0),
(self._mean_cum_op, self._sqrt_n_reduce_op,
math_ops.unsorted_segment_sqrt_n, lambda t: 0),
(np.ndarray.__mul__, None,
math_ops.unsorted_segment_prod, lambda t: 1),
(np.minimum, None,
math_ops.unsorted_segment_min, lambda t: t.max),
(np.maximum, None,
math_ops.unsorted_segment_max, lambda t: t.min)]
# A subset of ops has been enabled for complex numbers
self.complex_ops_list = [(np.add, None,
math_ops.unsorted_segment_sum, lambda t: 0),
(np.ndarray.__mul__, None,
math_ops.unsorted_segment_prod, lambda t: 1)]
self.differentiable_dtypes = [dtypes_lib.float16, dtypes_lib.float32,
dtypes_lib.float64]
self.all_dtypes = (self.differentiable_dtypes +
[dtypes_lib.bfloat16,
dtypes_lib.int64, dtypes_lib.int32,
dtypes_lib.complex64, dtypes_lib.complex128])
super(UnsortedSegmentTest, self).__init__(methodName=methodName)
def testValues(self):
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in self.all_dtypes:
ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list
tf_x, np_x = self._input(shape, dtype=dtype)
for use_gpu in [True, False]:
with self.cached_session():
for np_op1, np_op2, tf_op, init_op in ops_list:
# sqrt_n doesn't support integers
if (np_op2 == self._sqrt_n_reduce_op and dtype.is_integer):
continue
# todo(philjd): enable this test once real_div supports bfloat16
if (np_op2 in [self._sqrt_n_reduce_op, self._mean_reduce_op] and
dtype == dtypes_lib.bfloat16):
continue
np_ans = self._segmentReduce(
indices, np_x, np_op1, np_op2, num_segments=num_segments,
initial_value=init_op(dtype))
s = tf_op(tf_x, segment_ids=indices, num_segments=num_segments)
tf_ans = self.evaluate(s)
if dtype is dtypes_lib.bfloat16:
tf_ans = tf_ans.astype(np.float32)
self.assertAllCloseAccordingToType(np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
def testNumSegmentsTypes(self):
dtypes = [dtypes_lib.int32, dtypes_lib.int64]
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in dtypes:
with self.cached_session():
tf_x, np_x = self._input(shape)
num_segments_constant = constant_op.constant(
num_segments, dtype=dtype)
np_ans = self._segmentReduce(
indices, np_x, np.add, op2=None, num_segments=num_segments)
s = math_ops.unsorted_segment_sum(
data=tf_x,
segment_ids=indices,
num_segments=num_segments_constant)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
@test_util.run_deprecated_v1
def testGradientsTFGradients(self):
num_cols = 2
indices_flat = np.array([0, 4, 0, -1, 3, -1, 4, 7, 7, 3])
num_segments = max(indices_flat) + 3
for dtype in self.differentiable_dtypes:
ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (num_cols,)
# test CPU and GPU as tf.gather behaves differently on each device
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
for _, _, tf_op, _ in ops_list:
tf_x, np_x = self._input(shape, dtype=dtype)
s = tf_op(tf_x, indices, num_segments)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [num_segments, num_cols],
x_init_value=np_x,
delta=1.)
self.assertAllCloseAccordingToType(jacob_t, jacob_n,
half_atol=1e-2)
@test_util.run_in_graph_and_eager_modes
def testGradientsGradientTape(self):
num_cols = 2
indices_flat = np.array([0, 4, 0, -1, 3, -1, 4, 7, 7, 3])
num_segments = max(indices_flat) + 3
for dtype in self.differentiable_dtypes:
ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (num_cols,)
# test CPU and GPU as tf.gather behaves differently on each device
for use_gpu in [test_util.use_gpu, test_util.force_cpu]:
with use_gpu():
for _, _, tf_op, _ in ops_list:
_, np_x = self._input(shape, dtype=dtype)
# pylint: disable=cell-var-from-loop
def f(x):
return tf_op(x, indices, num_segments)
gradient_tape_jacob_t, jacob_n = (
gradient_checker_v2.compute_gradient(
f, [np_x], delta=1.))
# pylint: enable=cell-var-from-loop
self.assertAllCloseAccordingToType(jacob_n, gradient_tape_jacob_t,
half_atol=1e-2)
@test_util.run_deprecated_v1
def testProdGrad(self):
# additional test for the prod gradient to ensure correct handling of zeros
values = np.array([0, 0, 1, 0, 2, 2, 3, 3, 3], dtype=np.float32)
indices = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int32)
indices_neg = np.array([-1, 0, 0, -1, 1, 1, -1, 2, 2], dtype=np.int32)
values_tf = constant_op.constant(values)
# ground truth partial derivatives
gradients_indices = np.zeros((9, 3), dtype=np.float32)
gradients_indices_neg = np.zeros((9, 3), dtype=np.float32)
# the derivative w.r.t. to the other segments is zero, so here we only
# explicitly set the grad values for the corresponding segment
gradients_indices[range(9), indices] = [0, 0, 0, 4, 0, 0, 9, 9, 9]
gradients_indices_neg[range(9), indices_neg] = [0, 1, 0, 0, 2, 2, 0, 3, 3]
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu):
for ind, grad_gt in [(indices, gradients_indices),
(indices_neg, gradients_indices_neg)]:
s = math_ops.unsorted_segment_prod(values_tf,
constant_op.constant(ind), 3)
jacob_t, jacob_n = gradient_checker.compute_gradient(
values_tf, (9,), s, (3,), x_init_value=values, delta=1)
self.assertAllClose(jacob_t, jacob_n)
self.assertAllClose(jacob_t, grad_gt)
@test_util.run_deprecated_v1
def testGradientMatchesSegmentSum(self):
# Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum
# and compare the outputs, which should be identical.
# NB: for this test to work, indices must be valid for SegmentSum, namely
# it must be sorted, the indices must be contiguous, and num_segments
# must be max(indices) + 1.
indices = [0, 0, 1, 1, 1, 2, 3, 4, 5]
n = len(indices)
num_cols = 2
shape = [n, num_cols]
num_segments = max(indices) + 1
for dtype in self.differentiable_dtypes:
with self.cached_session():
tf_x, np_x = self._input(shape, dtype=dtype)
# Results from UnsortedSegmentSum
unsorted_s = math_ops.unsorted_segment_sum(
data=tf_x, segment_ids=indices, num_segments=num_segments)
unsorted_jacob_t, unsorted_jacob_n = (
gradient_checker.compute_gradient(tf_x, shape, unsorted_s,
[num_segments, num_cols],
x_init_value=np_x, delta=1))
# Results from SegmentSum
sorted_s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
sorted_jacob_t, sorted_jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
sorted_s, [num_segments, num_cols],
x_init_value=np_x,
delta=1)
self.assertAllClose(unsorted_jacob_t, sorted_jacob_t)
self.assertAllClose(unsorted_jacob_n, sorted_jacob_n)
@test_util.run_deprecated_v1
def testBadIndices(self):
# Note: GPU kernel does not return the out-of-range error needed for this
# test, so this test is marked as cpu-only.
# Note: With PR #13055 a negative index will be ignored silently.
with self.session(use_gpu=False):
for bad in [[2]], [[7]]:
unsorted = math_ops.unsorted_segment_sum([[17]], bad, num_segments=2)
with self.assertRaisesOpError(
r"segment_ids\[0,0\] = %d is out of range \[0, 2\)" % bad[0][0]):
self.evaluate(unsorted)
@test_util.run_deprecated_v1
def testEmptySecondDimension(self):
dtypes = [np.float16, np.float32, np.float64, np.int64, np.int32,
np.complex64, np.complex128]
with self.session():
for dtype in dtypes:
for itype in (np.int32, np.int64):
data = np.zeros((2, 0), dtype=dtype)
segment_ids = np.array([0, 1], dtype=itype)
unsorted = math_ops.unsorted_segment_sum(data, segment_ids, 2)
self.assertAllEqual(unsorted, np.zeros((2, 0), dtype=dtype))
def testDropNegatives(self):
# Note: the test is done by replacing segment_ids with 8 to -1
# for index and replace values generated by numpy with 0.
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in self.all_dtypes:
with self.session():
tf_x, np_x = self._input(shape, dtype=dtype)
np_ans = self._segmentReduce(
indices, np_x, np.add, op2=None, num_segments=num_segments)
# Replace np_ans[8] with 0 for the value
np_ans[8:] = 0
# Replace 8 with -1 in indices
np.place(indices, indices == 8, [-1])
s = math_ops.unsorted_segment_sum(
data=tf_x, segment_ids=indices, num_segments=num_segments)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
class SparseSegmentReductionHelper(SegmentReductionHelper):
def _sparse_input(self, input_shape, num_indices, dtype=dtypes_lib.int32):
a, b = super(SparseSegmentReductionHelper, self)._input(input_shape, dtype)
indices = np.random.randint(0, input_shape[0], num_indices).astype(np.int32)
return (constant_op.constant(
indices, dtype=dtypes_lib.int32), indices, a, b)
def _sparseSegmentReduce(self,
x,
indices,
segment_indices,
op1,
op2=None,
num_segments=None):
return self._segmentReduce(
segment_indices, x[indices], op1, op2, num_segments=num_segments)
def _sparseSegmentReduceGrad(self, ygrad, indices, segment_ids, output_dim0,
mode):
assert mode in ("sum", "mean", "sqrtn")
if mode != "sum":
weights = np.zeros(ygrad.shape[0], ygrad.dtype)
for segment in segment_ids:
weights[segment] += 1
weights = 1. / weights if mode == "mean" else 1. / np.sqrt(weights)
xgrad = np.zeros([output_dim0, ygrad.shape[1]], ygrad.dtype)
for segment, index in zip(segment_ids, indices):
if mode == "sum":
xgrad[index] += ygrad[segment]
else:
xgrad[index] += ygrad[segment] * weights[segment]
return xgrad
class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
def testValues(self):
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
dtypes_lib.int32
]
index_dtypes = [dtypes_lib.int32, dtypes_lib.int64]
segment_ids_dtypes = [dtypes_lib.int32, dtypes_lib.int64]
mean_dtypes = [dtypes_lib.float32, dtypes_lib.float64]
# Each item is np_op1, np_op2, tf_op
ops_list = [(np.add, None, math_ops.sparse_segment_sum),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.sparse_segment_mean)]
n = 400
# Note that the GPU implem has different paths for different inner sizes.
for inner_size in [1, 2, 3, 32]:
shape = [n, inner_size]
segment_indices = []
for i in range(20):
for _ in range(i + 1):
segment_indices.append(i)
num_indices = len(segment_indices)
for dtype in dtypes:
for index_dtype in index_dtypes:
for segment_ids_dtype in segment_ids_dtypes:
with self.cached_session():
tf_indices, np_indices, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=dtype)
for np_op1, np_op2, tf_op in ops_list:
if (tf_op == math_ops.sparse_segment_mean and
dtype not in mean_dtypes):
continue
np_ans = self._sparseSegmentReduce(np_x, np_indices,
segment_indices, np_op1,
np_op2)
s = tf_op(
data=tf_x,
indices=math_ops.cast(tf_indices, index_dtype),
segment_ids=math_ops.cast(segment_indices,
segment_ids_dtype))
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
# NOTE(mrry): The static shape inference that computes
# `tf_ans.shape` can only infer that sizes from dimension 1
# onwards, because the size of dimension 0 is data-dependent
# and may therefore vary dynamically.
self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
def testSegmentIdsHole(self):
tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [(np.add, None, math_ops.sparse_segment_sum), (
self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean)]
segment_indices = [0, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.session():
for np_op1, np_op2, tf_op in ops_list:
np_ans = self._sparseSegmentReduce(np_x, tf_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
def testWithNumSegments(self):
tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [(np.add, None, math_ops.sparse_segment_sum_with_num_segments),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.sparse_segment_mean_with_num_segments)]
segment_indices = [0, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
num_segments = 5
with self.session():
for np_op1, np_op2, tf_op in ops_list:
np_ans = self._sparseSegmentReduce(
np_x,
tf_indices,
segment_indices,
np_op1,
np_op2,
num_segments=num_segments)
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
def testWithEmptySegments(self):
tf_x = constant_op.constant([], shape=[0, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments
]
segment_indices = []
tf_indices = []
num_segments = 5
with self.session():
for tf_op in ops_list:
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
tf_ans = self.evaluate(s)
self.assertAllClose(np.zeros([5, 4]), tf_ans)
@test_util.run_in_graph_and_eager_modes
def testSegmentScalarIdiRaisesInvalidArgumentError(self):
"""Test for github #46897."""
ops_list = [
math_ops.sparse_segment_sum,
math_ops.sparse_segment_mean,
math_ops.sparse_segment_sqrt_n,
]
for op in ops_list:
with self.assertRaisesRegex(
(ValueError, errors_impl.InvalidArgumentError),
"Shape must be at least rank 1"):
op(data=1.0, indices=[0], segment_ids=[3])
def testSegmentIdsGreaterThanZero(self):
tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [(np.add, None, math_ops.sparse_segment_sum), (
self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean)]
segment_indices = [1, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.session():
for np_op1, np_op2, tf_op in ops_list:
np_ans = self._sparseSegmentReduce(np_x, tf_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = self.evaluate(s)
self.assertAllClose(np_ans, tf_ans)
def testValid(self):
# Baseline for the test*Invalid* methods below.
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.session():
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
self.evaluate(s)
@test_util.run_deprecated_v1
def testIndicesInvalid1(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, -1, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"indices\[1\] == -1 out of range \[0, 10\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testIndicesInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 10]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"indices\[3\] == 10 out of range \[0, 10\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentsInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 0, 1]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids are not increasing"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentsInvalid3(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 0]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"Segment id 1 out of range \[0, 1\), possibly because "
"'segment_ids' input is not sorted"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentsInvalid4(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [-1, 0, 1, 1]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"Segment id -1 out of range \[0, 2\), possibly because "
"'segment_ids' input is not sorted"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentsInvalid6(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 0, 0, -1]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentsInvalid7(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 0, 0, -2]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
self.evaluate(s)
def testSegmentWithNumSegmentsValid(self):
# Baseline for the test*WithNumSegmentsInvalid* methods below.
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments,
]
num_segments = 5
segment_indices = [0, 1, 3, 3]
tf_indices = [8, 3, 0, 9]
with self.session():
for tf_op in ops_list:
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentWithNumSegmentsInvalid1(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments,
]
num_segments = 5
segment_indices = [0, 1, 3, 5]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
with self.assertRaisesOpError("segment ids must be < num_segments"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testSegmentWithNumSegmentsInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments,
]
num_segments = -2
segment_indices = [0, 1, 3, 3]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
with self.assertRaisesRegex(
ValueError, "Cannot specify a negative value for num_segments"):
tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
@test_util.run_deprecated_v1
def testGradient(self):
shape = [10, 4]
segment_indices = [0, 1, 2, 2]
num_indices = len(segment_indices)
for tf_op in [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]:
with self.cached_session():
tf_indices, _, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=dtypes_lib.float64)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n)
@test_util.run_deprecated_v1
def testGradientWithEmptySegmentsAtEnd(self):
shape = [10, 4]
num_segments = 5
segment_indices = [0, 1, 2, 2]
num_indices = len(segment_indices)
for tf_op in [
math_ops.sparse_segment_sum_with_num_segments,
math_ops.sparse_segment_mean_with_num_segments,
]:
with self.cached_session():
tf_indices, _, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=dtypes_lib.float64)
s = tf_op(
data=tf_x,
indices=tf_indices,
segment_ids=segment_indices,
num_segments=num_segments)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [5, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n)
def testGradientExplicit(self):
# Note that the GPU implem has different paths for different inner sizes.
for inner_size in (1, 2, 3, 32):
with self.session():
tf_ygrad, np_ygrad = self._input([3, inner_size],
dtype=dtypes_lib.float32)
segment_ids = [0, 1, 2, 2, 2]
indices = [8, 3, 0, 9, 3]
output_dim0 = 10
ops_list = [
(math_ops.sparse_segment_sum_grad, "sum"),
(math_ops.sparse_segment_mean_grad, "mean"),
(math_ops.sparse_segment_sqrt_n_grad, "sqrtn"),
]
for tf_op, mode in ops_list:
np_xgrad = self._sparseSegmentReduceGrad(np_ygrad, indices,
segment_ids, output_dim0,
mode)
tf_xgrad = tf_op(tf_ygrad, indices, segment_ids, output_dim0)
self.assertAllClose(tf_xgrad, np_xgrad)
def testGradientValid(self):
# Baseline for the testGradient*Invalid* methods below.
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_grad, math_ops.sparse_segment_mean_grad,
math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientIndicesInvalid1(self):
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_grad, math_ops.sparse_segment_mean_grad,
math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 10]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Index 10 out of range \[0, 10\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientIndicesInvalid2(self):
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_grad, math_ops.sparse_segment_mean_grad,
math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, -1, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Index -1 out of range \[0, 10\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientSegmentsInvalid1(self):
tf_x, _ = self._input(
[3, 4], dtype=dtypes_lib.float32) # expecting 3 segments
ops_list = [
math_ops.sparse_segment_sum_grad, math_ops.sparse_segment_mean_grad,
math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 1, 4] # 5 segments
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError("Invalid number of segments"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientSegmentsInvalid2(self):
tf_x, _ = self._input([1, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_grad, math_ops.sparse_segment_mean_grad,
math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 0]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id 1 out of range \[0, 1\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientSegmentsInvalid3(self):
tf_x, _ = self._input([2, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_grad, math_ops.sparse_segment_mean_grad,
math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [-1, 0, 1, 1]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id -1 out of range \[0, 2\)"):
self.evaluate(s)
@test_util.run_deprecated_v1
def testGradientSegmentsInvalid4(self):
tf_x, _ = self._input([0, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_sum_grad, math_ops.sparse_segment_mean_grad,
math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, -1]
tf_indices = [8, 3, 0, 9]
with self.session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id 0 out of range \[0, 0\)"):
self.evaluate(s)
class SegmentReductionOpBenchmark(test.Benchmark):
outer_dim_options = [2**x for x in range(9, 14, 2)]
ratio_options = [2**x for x in range(1, 6, 2)]
inner_dim_options = [2**x for x in range(9, 14, 2)]
# randomly generated sizes with less alignments
inner_dim_options += [
1120, 1215, 1856, 1302, 1329, 1531, 1313, 1672, 1851, 1584
]
dtype_options = [np.float32, np.float64]
options = (outer_dim_options, ratio_options, inner_dim_options, dtype_options)
# pylint: disable=g-long-lambda
op_functors = [lambda vc, vs, seg_ids:
("sorted", math_ops.segment_sum(vc, vs)),
lambda vc, vs, seg_ids:
("unsorted",
math_ops.unsorted_segment_sum(vc, vs, seg_ids[-1]+1))]
# pylint: enable=g-long-lambda
repeat = 10
def _npTypeToStr(self, t):
if t == np.float32:
return "fp32"
if t == np.float64:
return "fp64"
def _runGraph(self, op_functor, outer_dim, ratio, inner_dim, dtype):
output_outer_dim = int(outer_dim / ratio)
const = np.random.randint(5, size=(outer_dim, inner_dim))
seg_ids = np.sort(np.random.randint(output_outer_dim, size=outer_dim))
vs = variables.Variable(seg_ids.astype(np.int32))
with ops.device("/gpu:0"):
vc = variables.Variable(const.astype(dtype))
name, op = op_functor(vc, vs, seg_ids)
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
r = self.run_op_benchmark(
sess,
op,
min_iters=self.repeat,
name="_".join(
map(str,
[name, outer_dim, ratio, inner_dim,
self._npTypeToStr(dtype)])))
return name, r["wall_time"]
def benchmarkSegmentSumGPU(self):
if not test.is_gpu_available(cuda_only=True):
return
for outer_dim, ratio, inner_dim, dtype in itertools.product(*self.options):
op_functor = self.op_functors[0]
with ops.Graph().as_default():
self._runGraph(op_functor, outer_dim, ratio, inner_dim, dtype)
def benchmarkUnsortedSegmentSumGPU(self):
if not test.is_gpu_available(cuda_only=True):
return
for outer_dim, ratio, inner_dim, dtype in itertools.product(*self.options):
op_functor = self.op_functors[1]
with ops.Graph().as_default():
self._runGraph(op_functor, outer_dim, ratio, inner_dim, dtype)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
maxtors/suricata
|
qa/sock_to_gzip_file.py
|
31
|
1995
|
#!/usr/bin/python
#I love the python Power Glove. It's so bad!
#Usage: sudo -u suricata ./sock_to_gzip_file.py --output-file="http.log.gz" --listen-sock="http.log.sock"
import socket,os
import gzip
import sys
from optparse import OptionParser
if __name__ == "__main__":
parser = OptionParser()
#Path to the socket
parser.add_option("--listen-sock", dest="lsock", type="string", help="Path to the socket we will listen on.")
#Path to gzip file we will write
parser.add_option("--output-file", dest="output", type="string", help="Path to file name to output gzip file we will write to.")
#parse the opts
(options, args) = parser.parse_args()
options.usage = "example: sudo -u suricata ./sock_to_gzip_file.py --output-file=\"http.log.gz\" --listen-sock=\"http.log.sock\"\n"
#Open the output file
if options.output:
try:
f = gzip.open(options.output, 'wb')
except Exception,e:
print("Error: could not open output file %s:\n%s\n", options.output, e)
sys.exit(-1)
else:
print("Error: --output-file option required and was not specified\n%s" % (options.usage))
sys.exit(-1)
#Open our socket and bind
if options.lsock:
if os.path.exists(options.lsock):
try:
os.remove(options.lsock)
except OSError:
pass
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.bind(options.lsock)
s.listen(1)
conn, addr = s.accept()
except Exception,e:
print("Error: Failed to bind socket %s\n%s\n", options.lsock, e)
sys.exit(-1)
else:
print("Error: --listen-sock option required and was not specified\n%s" % (options.usage))
sys.exit(-1)
#Read data from the socket and write to the file
while 1:
data = conn.recv(1024)
if not data: break
f.write(data)
conn.close()
f.close()
|
gpl-2.0
|
jaggu303619/asylum
|
openerp/addons/account_bank_statement_extensions/report/bank_statement_balance_report.py
|
52
|
2883
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
from openerp import pooler
import logging
_logger = logging.getLogger(__name__)
class bank_statement_balance_report(report_sxw.rml_parse):
def set_context(self, objects, data, ids, report_type=None):
#_logger.warning('addons.'+__name__, 'set_context, objects = %s, data = %s, ids = %s' % (objects, data, ids))
cr = self.cr
uid = self.uid
context = self.context
cr.execute('SELECT s.name as s_name, s.date AS s_date, j.code as j_code, s.balance_end_real as s_balance ' \
'FROM account_bank_statement s ' \
'INNER JOIN account_journal j on s.journal_id = j.id ' \
'INNER JOIN ' \
'(SELECT journal_id, max(date) as max_date FROM account_bank_statement ' \
'GROUP BY journal_id) d ' \
'ON (s.journal_id = d.journal_id AND s.date = d.max_date) ' \
'ORDER BY j.code')
lines = cr.dictfetchall()
self.localcontext.update( {
'lines': lines,
})
super(bank_statement_balance_report, self).set_context(objects, data, ids, report_type=report_type)
def __init__(self, cr, uid, name, context):
if context is None:
context = {}
super(bank_statement_balance_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
})
self.context = context
report_sxw.report_sxw(
'report.bank.statement.balance.report',
'account.bank.statement',
'addons/account_bank_statement_extensions/report/bank_statement_balance_report.rml',
parser=bank_statement_balance_report,
header='internal'
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
IV-GII/SocialCookies
|
ENV1/lib/python2.7/site-packages/django/middleware/transaction.py
|
113
|
2641
|
import warnings
from django.core.exceptions import MiddlewareNotUsed
from django.db import connection, transaction
class TransactionMiddleware(object):
"""
Transaction middleware. If this is enabled, each view function will be run
with commit_on_response activated - that way a save() doesn't do a direct
commit, the commit is done when a successful response is created. If an
exception happens, the database is rolled back.
"""
def __init__(self):
warnings.warn(
"TransactionMiddleware is deprecated in favor of ATOMIC_REQUESTS.",
PendingDeprecationWarning, stacklevel=2)
if connection.settings_dict['ATOMIC_REQUESTS']:
raise MiddlewareNotUsed
def process_request(self, request):
"""Enters transaction management"""
transaction.enter_transaction_management()
def process_exception(self, request, exception):
"""Rolls back the database and leaves transaction management"""
if transaction.is_dirty():
# This rollback might fail because of network failure for example.
# If rollback isn't possible it is impossible to clean the
# connection's state. So leave the connection in dirty state and
# let request_finished signal deal with cleaning the connection.
transaction.rollback()
transaction.leave_transaction_management()
def process_response(self, request, response):
"""Commits and leaves transaction management."""
if not transaction.get_autocommit():
if transaction.is_dirty():
# Note: it is possible that the commit fails. If the reason is
# closed connection or some similar reason, then there is
# little hope to proceed nicely. However, in some cases (
# deferred foreign key checks for exampl) it is still possible
# to rollback().
try:
transaction.commit()
except Exception:
# If the rollback fails, the transaction state will be
# messed up. It doesn't matter, the connection will be set
# to clean state after the request finishes. And, we can't
# clean the state here properly even if we wanted to, the
# connection is in transaction but we can't rollback...
transaction.rollback()
transaction.leave_transaction_management()
raise
transaction.leave_transaction_management()
return response
|
gpl-2.0
|
opentracing/opentracing-python
|
tests/mocktracer/test_propagation.py
|
3
|
2742
|
# Copyright (c) The OpenTracing Authors.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import pytest
from opentracing import Format, SpanContextCorruptedException, \
UnsupportedFormatException
from opentracing.mocktracer import MockTracer
def test_propagation():
tracer = MockTracer()
sp = tracer.start_span(operation_name='test')
sp.set_baggage_item('foo', 'bar')
# Test invalid types
with pytest.raises(UnsupportedFormatException):
tracer.inject(sp.context, 'invalid', {})
with pytest.raises(UnsupportedFormatException):
tracer.extract('invalid', {})
tests = [(Format.BINARY, bytearray()),
(Format.TEXT_MAP, {})]
for format, carrier in tests:
tracer.inject(sp.context, format, carrier)
extracted_ctx = tracer.extract(format, carrier)
assert extracted_ctx.trace_id == sp.context.trace_id
assert extracted_ctx.span_id == sp.context.span_id
assert extracted_ctx.baggage == sp.context.baggage
def test_propagation_extract_corrupted_data():
tracer = MockTracer()
tests = [(Format.BINARY, bytearray()),
(Format.TEXT_MAP, {})]
for format, carrier in tests:
with pytest.raises(SpanContextCorruptedException):
tracer.extract(format, carrier)
def test_start_span():
""" Test in process child span creation."""
tracer = MockTracer()
sp = tracer.start_span(operation_name='test')
sp.set_baggage_item('foo', 'bar')
child = tracer.start_span(
operation_name='child', child_of=sp.context)
assert child.context.trace_id == sp.context.trace_id
assert child.context.baggage == sp.context.baggage
assert child.parent_id == sp.context.span_id
|
apache-2.0
|
nnethercote/servo
|
tests/wpt/web-platform-tests/tools/third_party/hpack/test/test_huffman.py
|
36
|
1981
|
# -*- coding: utf-8 -*-
from hpack.exceptions import HPACKDecodingError
from hpack.huffman_table import decode_huffman
from hpack.huffman import HuffmanEncoder
from hpack.huffman_constants import REQUEST_CODES, REQUEST_CODES_LENGTH
from hypothesis import given, example
from hypothesis.strategies import binary
class TestHuffman(object):
def test_request_huffman_decoder(self):
assert (
decode_huffman(b'\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff') ==
b"www.example.com"
)
assert decode_huffman(b'\xa8\xeb\x10d\x9c\xbf') == b"no-cache"
assert decode_huffman(b'%\xa8I\xe9[\xa9}\x7f') == b"custom-key"
assert (
decode_huffman(b'%\xa8I\xe9[\xb8\xe8\xb4\xbf') == b"custom-value"
)
def test_request_huffman_encode(self):
encoder = HuffmanEncoder(REQUEST_CODES, REQUEST_CODES_LENGTH)
assert (
encoder.encode(b"www.example.com") ==
b'\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff'
)
assert encoder.encode(b"no-cache") == b'\xa8\xeb\x10d\x9c\xbf'
assert encoder.encode(b"custom-key") == b'%\xa8I\xe9[\xa9}\x7f'
assert (
encoder.encode(b"custom-value") == b'%\xa8I\xe9[\xb8\xe8\xb4\xbf'
)
class TestHuffmanDecoder(object):
@given(data=binary())
@example(b'\xff')
@example(b'\x5f\xff\xff\xff\xff')
@example(b'\x00\x3f\xff\xff\xff')
def test_huffman_decoder_properly_handles_all_bytestrings(self, data):
"""
When given random bytestrings, either we get HPACKDecodingError or we
get a bytestring back.
"""
# The examples aren't special, they're just known to hit specific error
# paths through the state machine. Basically, they are strings that are
# definitely invalid.
try:
result = decode_huffman(data)
except HPACKDecodingError:
result = b''
assert isinstance(result, bytes)
|
mpl-2.0
|
bigdatauniversity/edx-platform
|
openedx/core/djangoapps/profile_images/tests/helpers.py
|
117
|
1742
|
"""
Helper methods for use in profile image tests.
"""
from contextlib import contextmanager
import os
from tempfile import NamedTemporaryFile
from django.core.files.uploadedfile import UploadedFile
from PIL import Image
@contextmanager
def make_image_file(dimensions=(320, 240), extension=".jpeg", force_size=None):
"""
Yields a named temporary file created with the specified image type and
options.
Note the default dimensions are unequal (not a square) ensuring that center-square
cropping logic will be exercised during tests.
The temporary file will be closed and deleted automatically upon exiting
the `with` block.
"""
image = Image.new('RGB', dimensions, "green")
image_file = NamedTemporaryFile(suffix=extension)
try:
image.save(image_file)
if force_size is not None:
image_file.seek(0, os.SEEK_END)
bytes_to_pad = force_size - image_file.tell()
# write in hunks of 256 bytes
hunk, byte_ = bytearray([0] * 256), bytearray([0])
num_hunks, remainder = divmod(bytes_to_pad, 256)
for _ in xrange(num_hunks):
image_file.write(hunk)
for _ in xrange(remainder):
image_file.write(byte_)
image_file.flush()
image_file.seek(0)
yield image_file
finally:
image_file.close()
@contextmanager
def make_uploaded_file(content_type, *a, **kw):
"""
Wrap the result of make_image_file in a django UploadedFile.
"""
with make_image_file(*a, **kw) as image_file:
yield UploadedFile(
image_file,
content_type=content_type,
size=os.path.getsize(image_file.name),
)
|
agpl-3.0
|
kinooo/Sick-Beard
|
lib/requests/packages/oauthlib/oauth1/rfc5849/parameters.py
|
186
|
4817
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
"""
oauthlib.parameters
~~~~~~~~~~~~~~~~~~~
This module contains methods related to `section 3.5`_ of the OAuth 1.0a spec.
.. _`section 3.5`: http://tools.ietf.org/html/rfc5849#section-3.5
"""
from urlparse import urlparse, urlunparse
from . import utils
from oauthlib.common import extract_params, urlencode
# TODO: do we need filter_params now that oauth_params are handled by Request?
# We can easily pass in just oauth protocol params.
@utils.filter_params
def prepare_headers(oauth_params, headers=None, realm=None):
"""**Prepare the Authorization header.**
Per `section 3.5.1`_ of the spec.
Protocol parameters can be transmitted using the HTTP "Authorization"
header field as defined by `RFC2617`_ with the auth-scheme name set to
"OAuth" (case insensitive).
For example::
Authorization: OAuth realm="Example",
oauth_consumer_key="0685bd9184jfhq22",
oauth_token="ad180jjd733klru7",
oauth_signature_method="HMAC-SHA1",
oauth_signature="wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
oauth_timestamp="137131200",
oauth_nonce="4572616e48616d6d65724c61686176",
oauth_version="1.0"
.. _`section 3.5.1`: http://tools.ietf.org/html/rfc5849#section-3.5.1
.. _`RFC2617`: http://tools.ietf.org/html/rfc2617
"""
headers = headers or {}
# Protocol parameters SHALL be included in the "Authorization" header
# field as follows:
authorization_header_parameters_parts = []
for oauth_parameter_name, value in oauth_params:
# 1. Parameter names and values are encoded per Parameter Encoding
# (`Section 3.6`_)
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
escaped_name = utils.escape(oauth_parameter_name)
escaped_value = utils.escape(value)
# 2. Each parameter's name is immediately followed by an "=" character
# (ASCII code 61), a """ character (ASCII code 34), the parameter
# value (MAY be empty), and another """ character (ASCII code 34).
part = u'{0}="{1}"'.format(escaped_name, escaped_value)
authorization_header_parameters_parts.append(part)
# 3. Parameters are separated by a "," character (ASCII code 44) and
# OPTIONAL linear whitespace per `RFC2617`_.
#
# .. _`RFC2617`: http://tools.ietf.org/html/rfc2617
authorization_header_parameters = ', '.join(
authorization_header_parameters_parts)
# 4. The OPTIONAL "realm" parameter MAY be added and interpreted per
# `RFC2617 section 1.2`_.
#
# .. _`RFC2617 section 1.2`: http://tools.ietf.org/html/rfc2617#section-1.2
if realm:
# NOTE: realm should *not* be escaped
authorization_header_parameters = (u'realm="%s", ' % realm +
authorization_header_parameters)
# the auth-scheme name set to "OAuth" (case insensitive).
authorization_header = u'OAuth %s' % authorization_header_parameters
# contribute the Authorization header to the given headers
full_headers = {}
full_headers.update(headers)
full_headers[u'Authorization'] = authorization_header
return full_headers
def _append_params(oauth_params, params):
"""Append OAuth params to an existing set of parameters.
Both params and oauth_params is must be lists of 2-tuples.
Per `section 3.5.2`_ and `3.5.3`_ of the spec.
.. _`section 3.5.2`: http://tools.ietf.org/html/rfc5849#section-3.5.2
.. _`3.5.3`: http://tools.ietf.org/html/rfc5849#section-3.5.3
"""
merged = list(params)
merged.extend(oauth_params)
# The request URI / entity-body MAY include other request-specific
# parameters, in which case, the protocol parameters SHOULD be appended
# following the request-specific parameters, properly separated by an "&"
# character (ASCII code 38)
merged.sort(key=lambda i: i[0].startswith('oauth_'))
return merged
def prepare_form_encoded_body(oauth_params, body):
"""Prepare the Form-Encoded Body.
Per `section 3.5.2`_ of the spec.
.. _`section 3.5.2`: http://tools.ietf.org/html/rfc5849#section-3.5.2
"""
# append OAuth params to the existing body
return _append_params(oauth_params, body)
def prepare_request_uri_query(oauth_params, uri):
"""Prepare the Request URI Query.
Per `section 3.5.3`_ of the spec.
.. _`section 3.5.3`: http://tools.ietf.org/html/rfc5849#section-3.5.3
"""
# append OAuth params to the existing set of query components
sch, net, path, par, query, fra = urlparse(uri)
query = urlencode(_append_params(oauth_params, extract_params(query) or []))
return urlunparse((sch, net, path, par, query, fra))
|
gpl-3.0
|
almet/whiskerboard
|
settings/base.py
|
2
|
3218
|
from unipath import FSPath as Path
PROJECT_DIR = Path(__file__).absolute().ancestor(2)
######################################
# Main
######################################
DEBUG = True
ROOT_URLCONF = 'urls'
SITE_ID = 1
######################################
# Apps
######################################
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'south',
'board',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
######################################
# Database
######################################
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'whiskerboard',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
######################################
# Localisation
######################################
TIME_ZONE = 'Europe/London'
LANGUAGE_CODE = 'en-gb'
USE_I18N = True
USE_L10N = True
######################################
# Logging
######################################
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
######################################
# Media/Static
######################################
MEDIA_ROOT = PROJECT_DIR.parent.child('data')
MEDIA_URL = '/media/'
STATIC_ROOT = PROJECT_DIR.child('static_root')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
str(PROJECT_DIR.child('static')),
)
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
######################################
# Templates
######################################
TEMPLATE_DEBUG = DEBUG
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
PROJECT_DIR.child('templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
'board.context_processors.current_site',
)
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.