repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
SaberMod/binutils-saber | gdb/python/lib/gdb/command/explore.py | 126 | 26824 | # GDB 'explore' command.
# Copyright (C) 2012-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Implementation of the GDB 'explore' command using the GDB Python API."""
import gdb
import sys
if sys.version_info[0] > 2:
# Python 3 renamed raw_input to input
raw_input = input
class Explorer(object):
"""Internal class which invokes other explorers."""
# This map is filled by the Explorer.init_env() function
type_code_to_explorer_map = { }
_SCALAR_TYPE_LIST = (
gdb.TYPE_CODE_CHAR,
gdb.TYPE_CODE_INT,
gdb.TYPE_CODE_BOOL,
gdb.TYPE_CODE_FLT,
gdb.TYPE_CODE_VOID,
gdb.TYPE_CODE_ENUM,
)
@staticmethod
def guard_expr(expr):
length = len(expr)
guard = False
if expr[0] == '(' and expr[length-1] == ')':
pass
else:
i = 0
while i < length:
c = expr[i]
if (c == '_' or ('a' <= c and c <= 'z') or
('A' <= c and c <= 'Z') or ('0' <= c and c <= '9')):
pass
else:
guard = True
break
i += 1
if guard:
return "(" + expr + ")"
else:
return expr
@staticmethod
def explore_expr(expr, value, is_child):
"""Main function to explore an expression value.
Arguments:
expr: The expression string that is being explored.
value: The gdb.Value value of the expression.
is_child: Boolean value to indicate if the expression is a child.
An expression is a child if it is derived from the main
expression entered by the user. For example, if the user
entered an expression which evaluates to a struct, then
when exploring the fields of the struct, is_child is set
to True internally.
Returns:
No return value.
"""
type_code = value.type.code
if type_code in Explorer.type_code_to_explorer_map:
explorer_class = Explorer.type_code_to_explorer_map[type_code]
while explorer_class.explore_expr(expr, value, is_child):
pass
else:
print ("Explorer for type '%s' not yet available.\n" %
str(value.type))
@staticmethod
def explore_type(name, datatype, is_child):
"""Main function to explore a data type.
Arguments:
name: The string representing the path to the data type being
explored.
datatype: The gdb.Type value of the data type being explored.
is_child: Boolean value to indicate if the name is a child.
A name is a child if it is derived from the main name
entered by the user. For example, if the user entered
the name of struct type, then when exploring the fields
of the struct, is_child is set to True internally.
Returns:
No return value.
"""
type_code = datatype.code
if type_code in Explorer.type_code_to_explorer_map:
explorer_class = Explorer.type_code_to_explorer_map[type_code]
while explorer_class.explore_type(name, datatype, is_child):
pass
else:
print ("Explorer for type '%s' not yet available.\n" %
str(datatype))
@staticmethod
def init_env():
"""Initializes the Explorer environment.
This function should be invoked before starting any exploration. If
invoked before an exploration, it need not be invoked for subsequent
explorations.
"""
Explorer.type_code_to_explorer_map = {
gdb.TYPE_CODE_CHAR : ScalarExplorer,
gdb.TYPE_CODE_INT : ScalarExplorer,
gdb.TYPE_CODE_BOOL : ScalarExplorer,
gdb.TYPE_CODE_FLT : ScalarExplorer,
gdb.TYPE_CODE_VOID : ScalarExplorer,
gdb.TYPE_CODE_ENUM : ScalarExplorer,
gdb.TYPE_CODE_STRUCT : CompoundExplorer,
gdb.TYPE_CODE_UNION : CompoundExplorer,
gdb.TYPE_CODE_PTR : PointerExplorer,
gdb.TYPE_CODE_REF : ReferenceExplorer,
gdb.TYPE_CODE_TYPEDEF : TypedefExplorer,
gdb.TYPE_CODE_ARRAY : ArrayExplorer
}
@staticmethod
def is_scalar_type(type):
"""Checks whether a type is a scalar type.
A type is a scalar type of its type is
gdb.TYPE_CODE_CHAR or
gdb.TYPE_CODE_INT or
gdb.TYPE_CODE_BOOL or
gdb.TYPE_CODE_FLT or
gdb.TYPE_CODE_VOID or
gdb.TYPE_CODE_ENUM.
Arguments:
type: The type to be checked.
Returns:
'True' if 'type' is a scalar type. 'False' otherwise.
"""
return type.code in Explorer._SCALAR_TYPE_LIST
@staticmethod
def return_to_parent_value():
"""A utility function which prints that the current exploration session
is returning to the parent value. Useful when exploring values.
"""
print ("\nReturning to parent value...\n")
@staticmethod
def return_to_parent_value_prompt():
"""A utility function which prompts the user to press the 'enter' key
so that the exploration session can shift back to the parent value.
Useful when exploring values.
"""
raw_input("\nPress enter to return to parent value: ")
@staticmethod
def return_to_enclosing_type():
"""A utility function which prints that the current exploration session
is returning to the enclosing type. Useful when exploring types.
"""
print ("\nReturning to enclosing type...\n")
@staticmethod
def return_to_enclosing_type_prompt():
"""A utility function which prompts the user to press the 'enter' key
so that the exploration session can shift back to the enclosing type.
Useful when exploring types.
"""
raw_input("\nPress enter to return to enclosing type: ")
class ScalarExplorer(object):
"""Internal class used to explore scalar values."""
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore scalar values.
See Explorer.explore_expr and Explorer.is_scalar_type for more
information.
"""
print ("'%s' is a scalar value of type '%s'." %
(expr, value.type))
print ("%s = %s" % (expr, str(value)))
if is_child:
Explorer.return_to_parent_value_prompt()
Explorer.return_to_parent_value()
return False
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore scalar types.
See Explorer.explore_type and Explorer.is_scalar_type for more
information.
"""
if datatype.code == gdb.TYPE_CODE_ENUM:
if is_child:
print ("%s is of an enumerated type '%s'." %
(name, str(datatype)))
else:
print ("'%s' is an enumerated type." % name)
else:
if is_child:
print ("%s is of a scalar type '%s'." %
(name, str(datatype)))
else:
print ("'%s' is a scalar type." % name)
if is_child:
Explorer.return_to_enclosing_type_prompt()
Explorer.return_to_enclosing_type()
return False
class PointerExplorer(object):
"""Internal class used to explore pointer values."""
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore pointer values.
See Explorer.explore_expr for more information.
"""
print ("'%s' is a pointer to a value of type '%s'" %
(expr, str(value.type.target())))
option = raw_input("Continue exploring it as a pointer to a single "
"value [y/n]: ")
if option == "y":
deref_value = None
try:
deref_value = value.dereference()
str(deref_value)
except gdb.MemoryError:
print ("'%s' a pointer pointing to an invalid memory "
"location." % expr)
if is_child:
Explorer.return_to_parent_value_prompt()
return False
Explorer.explore_expr("*%s" % Explorer.guard_expr(expr),
deref_value, is_child)
return False
option = raw_input("Continue exploring it as a pointer to an "
"array [y/n]: ")
if option == "y":
while True:
index = 0
try:
index = int(raw_input("Enter the index of the element you "
"want to explore in '%s': " % expr))
except ValueError:
break
element_expr = "%s[%d]" % (Explorer.guard_expr(expr), index)
element = value[index]
try:
str(element)
except gdb.MemoryError:
print ("Cannot read value at index %d." % index)
continue
Explorer.explore_expr(element_expr, element, True)
return False
if is_child:
Explorer.return_to_parent_value()
return False
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore pointer types.
See Explorer.explore_type for more information.
"""
target_type = datatype.target()
print ("\n%s is a pointer to a value of type '%s'." %
(name, str(target_type)))
Explorer.explore_type("the pointee type of %s" % name,
target_type,
is_child)
return False
class ReferenceExplorer(object):
"""Internal class used to explore reference (TYPE_CODE_REF) values."""
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore array values.
See Explorer.explore_expr for more information.
"""
referenced_value = value.referenced_value()
Explorer.explore_expr(expr, referenced_value, is_child)
return False
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore pointer types.
See Explorer.explore_type for more information.
"""
target_type = datatype.target()
Explorer.explore_type(name, target_type, is_child)
return False
class ArrayExplorer(object):
"""Internal class used to explore arrays."""
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore array values.
See Explorer.explore_expr for more information.
"""
target_type = value.type.target()
print ("'%s' is an array of '%s'." % (expr, str(target_type)))
index = 0
try:
index = int(raw_input("Enter the index of the element you want to "
"explore in '%s': " % expr))
except ValueError:
if is_child:
Explorer.return_to_parent_value()
return False
element = None
try:
element = value[index]
str(element)
except gdb.MemoryError:
print ("Cannot read value at index %d." % index)
raw_input("Press enter to continue... ")
return True
Explorer.explore_expr("%s[%d]" % (Explorer.guard_expr(expr), index),
element, True)
return True
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore array types.
See Explorer.explore_type for more information.
"""
target_type = datatype.target()
print ("%s is an array of '%s'." % (name, str(target_type)))
Explorer.explore_type("the array element of %s" % name, target_type,
is_child)
return False
class CompoundExplorer(object):
"""Internal class used to explore struct, classes and unions."""
@staticmethod
def _print_fields(print_list):
"""Internal function which prints the fields of a struct/class/union.
"""
max_field_name_length = 0
for pair in print_list:
if max_field_name_length < len(pair[0]):
max_field_name_length = len(pair[0])
for pair in print_list:
print (" %*s = %s" % (max_field_name_length, pair[0], pair[1]))
@staticmethod
def _get_real_field_count(fields):
real_field_count = 0;
for field in fields:
if not field.artificial:
real_field_count = real_field_count + 1
return real_field_count
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore structs/classes and union values.
See Explorer.explore_expr for more information.
"""
datatype = value.type
type_code = datatype.code
fields = datatype.fields()
if type_code == gdb.TYPE_CODE_STRUCT:
type_desc = "struct/class"
else:
type_desc = "union"
if CompoundExplorer._get_real_field_count(fields) == 0:
print ("The value of '%s' is a %s of type '%s' with no fields." %
(expr, type_desc, str(value.type)))
if is_child:
Explorer.return_to_parent_value_prompt()
return False
print ("The value of '%s' is a %s of type '%s' with the following "
"fields:\n" % (expr, type_desc, str(value.type)))
has_explorable_fields = False
choice_to_compound_field_map = { }
current_choice = 0
print_list = [ ]
for field in fields:
if field.artificial:
continue
field_full_name = Explorer.guard_expr(expr) + "." + field.name
if field.is_base_class:
field_value = value.cast(field.type)
else:
field_value = value[field.name]
literal_value = ""
if type_code == gdb.TYPE_CODE_UNION:
literal_value = ("<Enter %d to explore this field of type "
"'%s'>" % (current_choice, str(field.type)))
has_explorable_fields = True
else:
if Explorer.is_scalar_type(field.type):
literal_value = ("%s .. (Value of type '%s')" %
(str(field_value), str(field.type)))
else:
if field.is_base_class:
field_desc = "base class"
else:
field_desc = "field"
literal_value = ("<Enter %d to explore this %s of type "
"'%s'>" %
(current_choice, field_desc,
str(field.type)))
has_explorable_fields = True
choice_to_compound_field_map[str(current_choice)] = (
field_full_name, field_value)
current_choice = current_choice + 1
print_list.append((field.name, literal_value))
CompoundExplorer._print_fields(print_list)
print ("")
if has_explorable_fields:
choice = raw_input("Enter the field number of choice: ")
if choice in choice_to_compound_field_map:
Explorer.explore_expr(choice_to_compound_field_map[choice][0],
choice_to_compound_field_map[choice][1],
True)
return True
else:
if is_child:
Explorer.return_to_parent_value()
else:
if is_child:
Explorer.return_to_parent_value_prompt()
return False
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore struct/class and union types.
See Explorer.explore_type for more information.
"""
type_code = datatype.code
type_desc = ""
if type_code == gdb.TYPE_CODE_STRUCT:
type_desc = "struct/class"
else:
type_desc = "union"
fields = datatype.fields()
if CompoundExplorer._get_real_field_count(fields) == 0:
if is_child:
print ("%s is a %s of type '%s' with no fields." %
(name, type_desc, str(datatype)))
Explorer.return_to_enclosing_type_prompt()
else:
print ("'%s' is a %s with no fields." % (name, type_desc))
return False
if is_child:
print ("%s is a %s of type '%s' "
"with the following fields:\n" %
(name, type_desc, str(datatype)))
else:
print ("'%s' is a %s with the following "
"fields:\n" %
(name, type_desc))
has_explorable_fields = False
current_choice = 0
choice_to_compound_field_map = { }
print_list = [ ]
for field in fields:
if field.artificial:
continue
if field.is_base_class:
field_desc = "base class"
else:
field_desc = "field"
rhs = ("<Enter %d to explore this %s of type '%s'>" %
(current_choice, field_desc, str(field.type)))
print_list.append((field.name, rhs))
choice_to_compound_field_map[str(current_choice)] = (
field.name, field.type, field_desc)
current_choice = current_choice + 1
CompoundExplorer._print_fields(print_list)
print ("")
if len(choice_to_compound_field_map) > 0:
choice = raw_input("Enter the field number of choice: ")
if choice in choice_to_compound_field_map:
if is_child:
new_name = ("%s '%s' of %s" %
(choice_to_compound_field_map[choice][2],
choice_to_compound_field_map[choice][0],
name))
else:
new_name = ("%s '%s' of '%s'" %
(choice_to_compound_field_map[choice][2],
choice_to_compound_field_map[choice][0],
name))
Explorer.explore_type(new_name,
choice_to_compound_field_map[choice][1], True)
return True
else:
if is_child:
Explorer.return_to_enclosing_type()
else:
if is_child:
Explorer.return_to_enclosing_type_prompt()
return False
class TypedefExplorer(object):
"""Internal class used to explore values whose type is a typedef."""
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore typedef values.
See Explorer.explore_expr for more information.
"""
actual_type = value.type.strip_typedefs()
print ("The value of '%s' is of type '%s' "
"which is a typedef of type '%s'" %
(expr, str(value.type), str(actual_type)))
Explorer.explore_expr(expr, value.cast(actual_type), is_child)
return False
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore typedef types.
See Explorer.explore_type for more information.
"""
actual_type = datatype.strip_typedefs()
if is_child:
print ("The type of %s is a typedef of type '%s'." %
(name, str(actual_type)))
else:
print ("The type '%s' is a typedef of type '%s'." %
(name, str(actual_type)))
Explorer.explore_type(name, actual_type, is_child)
return False
class ExploreUtils(object):
"""Internal class which provides utilities for the main command classes."""
@staticmethod
def check_args(name, arg_str):
"""Utility to check if adequate number of arguments are passed to an
explore command.
Arguments:
name: The name of the explore command.
arg_str: The argument string passed to the explore command.
Returns:
True if adequate arguments are passed, false otherwise.
Raises:
gdb.GdbError if adequate arguments are not passed.
"""
if len(arg_str) < 1:
raise gdb.GdbError("ERROR: '%s' requires an argument."
% name)
return False
else:
return True
@staticmethod
def get_type_from_str(type_str):
"""A utility function to deduce the gdb.Type value from a string
representing the type.
Arguments:
type_str: The type string from which the gdb.Type value should be
deduced.
Returns:
The deduced gdb.Type value if possible, None otherwise.
"""
try:
# Assume the current language to be C/C++ and make a try.
return gdb.parse_and_eval("(%s *)0" % type_str).type.target()
except RuntimeError:
# If assumption of current language to be C/C++ was wrong, then
# lookup the type using the API.
try:
return gdb.lookup_type(type_str)
except RuntimeError:
return None
@staticmethod
def get_value_from_str(value_str):
"""A utility function to deduce the gdb.Value value from a string
representing the value.
Arguments:
value_str: The value string from which the gdb.Value value should
be deduced.
Returns:
The deduced gdb.Value value if possible, None otherwise.
"""
try:
return gdb.parse_and_eval(value_str)
except RuntimeError:
return None
class ExploreCommand(gdb.Command):
"""Explore a value or a type valid in the current context.
Usage:
explore ARG
- ARG is either a valid expression or a type name.
- At any stage of exploration, hit the return key (instead of a
choice, if any) to return to the enclosing type or value.
"""
def __init__(self):
super(ExploreCommand, self).__init__(name = "explore",
command_class = gdb.COMMAND_DATA,
prefix = True)
def invoke(self, arg_str, from_tty):
if ExploreUtils.check_args("explore", arg_str) == False:
return
# Check if it is a value
value = ExploreUtils.get_value_from_str(arg_str)
if value is not None:
Explorer.explore_expr(arg_str, value, False)
return
# If it is not a value, check if it is a type
datatype = ExploreUtils.get_type_from_str(arg_str)
if datatype is not None:
Explorer.explore_type(arg_str, datatype, False)
return
# If it is neither a value nor a type, raise an error.
raise gdb.GdbError(
("'%s' neither evaluates to a value nor is a type "
"in the current context." %
arg_str))
class ExploreValueCommand(gdb.Command):
"""Explore value of an expression valid in the current context.
Usage:
explore value ARG
- ARG is a valid expression.
- At any stage of exploration, hit the return key (instead of a
choice, if any) to return to the enclosing value.
"""
def __init__(self):
super(ExploreValueCommand, self).__init__(
name = "explore value", command_class = gdb.COMMAND_DATA)
def invoke(self, arg_str, from_tty):
if ExploreUtils.check_args("explore value", arg_str) == False:
return
value = ExploreUtils.get_value_from_str(arg_str)
if value is None:
raise gdb.GdbError(
(" '%s' does not evaluate to a value in the current "
"context." %
arg_str))
return
Explorer.explore_expr(arg_str, value, False)
class ExploreTypeCommand(gdb.Command):
"""Explore a type or the type of an expression valid in the current
context.
Usage:
explore type ARG
- ARG is a valid expression or a type name.
- At any stage of exploration, hit the return key (instead of a
choice, if any) to return to the enclosing type.
"""
def __init__(self):
super(ExploreTypeCommand, self).__init__(
name = "explore type", command_class = gdb.COMMAND_DATA)
def invoke(self, arg_str, from_tty):
if ExploreUtils.check_args("explore type", arg_str) == False:
return
datatype = ExploreUtils.get_type_from_str(arg_str)
if datatype is not None:
Explorer.explore_type(arg_str, datatype, False)
return
value = ExploreUtils.get_value_from_str(arg_str)
if value is not None:
print ("'%s' is of type '%s'." % (arg_str, str(value.type)))
Explorer.explore_type(str(value.type), value.type, False)
return
raise gdb.GdbError(("'%s' is not a type or value in the current "
"context." % arg_str))
Explorer.init_env()
ExploreCommand()
ExploreValueCommand()
ExploreTypeCommand()
| gpl-2.0 |
sbalde/edxplatform | cms/djangoapps/contentstore/tests/test_transcripts_utils.py | 109 | 23402 | # -*- coding: utf-8 -*-
""" Tests for transcripts_utils. """
import unittest
from uuid import uuid4
import copy
import textwrap
from mock import patch, Mock
from django.test.utils import override_settings
from django.conf import settings
from django.utils import translation
from nose.plugins.skip import SkipTest
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.contentstore.content import StaticContent
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.exceptions import NotFoundError
from xmodule.contentstore.django import contentstore
from xmodule.video_module import transcripts_utils
from contentstore.tests.utils import mock_requests_get
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
class TestGenerateSubs(unittest.TestCase):
"""Tests for `generate_subs` function."""
def setUp(self):
super(TestGenerateSubs, self).setUp()
self.source_subs = {
'start': [100, 200, 240, 390, 1000],
'end': [200, 240, 380, 1000, 1500],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
def test_generate_subs_increase_speed(self):
subs = transcripts_utils.generate_subs(2, 1, self.source_subs)
self.assertDictEqual(
subs,
{
'start': [200, 400, 480, 780, 2000],
'end': [400, 480, 760, 2000, 3000],
'text': ['subs #1', 'subs #2', 'subs #3', 'subs #4', 'subs #5']
}
)
def test_generate_subs_decrease_speed_1(self):
subs = transcripts_utils.generate_subs(0.5, 1, self.source_subs)
self.assertDictEqual(
subs,
{
'start': [50, 100, 120, 195, 500],
'end': [100, 120, 190, 500, 750],
'text': ['subs #1', 'subs #2', 'subs #3', 'subs #4', 'subs #5']
}
)
def test_generate_subs_decrease_speed_2(self):
"""Test for correct devision during `generate_subs` process."""
subs = transcripts_utils.generate_subs(1, 2, self.source_subs)
self.assertDictEqual(
subs,
{
'start': [50, 100, 120, 195, 500],
'end': [100, 120, 190, 500, 750],
'text': ['subs #1', 'subs #2', 'subs #3', 'subs #4', 'subs #5']
}
)
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class TestSaveSubsToStore(ModuleStoreTestCase):
"""Tests for `save_subs_to_store` function."""
org = 'MITx'
number = '999'
display_name = 'Test course'
def clear_subs_content(self):
"""Remove, if subtitles content exists."""
try:
content = contentstore().find(self.content_location)
contentstore().delete(content.location)
except NotFoundError:
pass
def setUp(self):
super(TestSaveSubsToStore, self).setUp()
self.course = CourseFactory.create(
org=self.org, number=self.number, display_name=self.display_name)
self.subs = {
'start': [100, 200, 240, 390, 1000],
'end': [200, 240, 380, 1000, 1500],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
self.subs_id = str(uuid4())
filename = 'subs_{0}.srt.sjson'.format(self.subs_id)
self.content_location = StaticContent.compute_location(self.course.id, filename)
self.addCleanup(self.clear_subs_content)
# incorrect subs
self.unjsonable_subs = set([1]) # set can't be serialized
self.unjsonable_subs_id = str(uuid4())
filename_unjsonable = 'subs_{0}.srt.sjson'.format(self.unjsonable_subs_id)
self.content_location_unjsonable = StaticContent.compute_location(self.course.id, filename_unjsonable)
self.clear_subs_content()
def test_save_subs_to_store(self):
with self.assertRaises(NotFoundError):
contentstore().find(self.content_location)
result_location = transcripts_utils.save_subs_to_store(
self.subs,
self.subs_id,
self.course)
self.assertTrue(contentstore().find(self.content_location))
self.assertEqual(result_location, self.content_location)
def test_save_unjsonable_subs_to_store(self):
"""
Assures that subs, that can't be dumped, can't be found later.
"""
with self.assertRaises(NotFoundError):
contentstore().find(self.content_location_unjsonable)
with self.assertRaises(TypeError):
transcripts_utils.save_subs_to_store(
self.unjsonable_subs,
self.unjsonable_subs_id,
self.course)
with self.assertRaises(NotFoundError):
contentstore().find(self.content_location_unjsonable)
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class TestDownloadYoutubeSubs(ModuleStoreTestCase):
"""Tests for `download_youtube_subs` function."""
org = 'MITx'
number = '999'
display_name = 'Test course'
def clear_sub_content(self, subs_id):
"""
Remove, if subtitle content exists.
"""
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(self.course.id, filename)
try:
content = contentstore().find(content_location)
contentstore().delete(content.location)
except NotFoundError:
pass
def clear_subs_content(self, youtube_subs):
"""
Remove, if subtitles content exists.
youtube_subs: dict of '{speed: youtube_id}' format for different speeds.
"""
for subs_id in youtube_subs.values():
self.clear_sub_content(subs_id)
def setUp(self):
super(TestDownloadYoutubeSubs, self).setUp()
self.course = CourseFactory.create(
org=self.org, number=self.number, display_name=self.display_name)
def test_success_downloading_subs(self):
response = textwrap.dedent("""<?xml version="1.0" encoding="utf-8" ?>
<transcript>
<text start="0" dur="0.27"></text>
<text start="0.27" dur="2.45">Test text 1.</text>
<text start="2.72">Test text 2.</text>
<text start="5.43" dur="1.73">Test text 3.</text>
</transcript>
""")
good_youtube_sub = 'good_id_2'
self.clear_sub_content(good_youtube_sub)
with patch('xmodule.video_module.transcripts_utils.requests.get') as mock_get:
mock_get.return_value = Mock(status_code=200, text=response, content=response)
# Check transcripts_utils.GetTranscriptsFromYouTubeException not thrown
transcripts_utils.download_youtube_subs(good_youtube_sub, self.course, settings)
mock_get.assert_any_call('http://video.google.com/timedtext', params={'lang': 'en', 'v': 'good_id_2'})
# Check asset status after import of transcript.
filename = 'subs_{0}.srt.sjson'.format(good_youtube_sub)
content_location = StaticContent.compute_location(self.course.id, filename)
self.assertTrue(contentstore().find(content_location))
self.clear_sub_content(good_youtube_sub)
def test_subs_for_html5_vid_with_periods(self):
"""
This is to verify a fix whereby subtitle files uploaded against
a HTML5 video that contains periods in the name causes
incorrect subs name parsing
"""
html5_ids = transcripts_utils.get_html5_ids(['foo.mp4', 'foo.1.bar.mp4', 'foo/bar/baz.1.4.mp4', 'foo'])
self.assertEqual(4, len(html5_ids))
self.assertEqual(html5_ids[0], 'foo')
self.assertEqual(html5_ids[1], 'foo.1.bar')
self.assertEqual(html5_ids[2], 'baz.1.4')
self.assertEqual(html5_ids[3], 'foo')
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_fail_downloading_subs(self, mock_get):
mock_get.return_value = Mock(status_code=404, text='Error 404')
bad_youtube_sub = 'BAD_YOUTUBE_ID2'
self.clear_sub_content(bad_youtube_sub)
with self.assertRaises(transcripts_utils.GetTranscriptsFromYouTubeException):
transcripts_utils.download_youtube_subs(bad_youtube_sub, self.course, settings)
# Check asset status after import of transcript.
filename = 'subs_{0}.srt.sjson'.format(bad_youtube_sub)
content_location = StaticContent.compute_location(
self.course.id, filename
)
with self.assertRaises(NotFoundError):
contentstore().find(content_location)
self.clear_sub_content(bad_youtube_sub)
def test_success_downloading_chinese_transcripts(self):
# Disabled 11/14/13
# This test is flakey because it performs an HTTP request on an external service
# Re-enable when `requests.get` is patched using `mock.patch`
raise SkipTest
good_youtube_sub = 'j_jEn79vS3g' # Chinese, utf-8
self.clear_sub_content(good_youtube_sub)
# Check transcripts_utils.GetTranscriptsFromYouTubeException not thrown
transcripts_utils.download_youtube_subs(good_youtube_sub, self.course, settings)
# Check assets status after importing subtitles.
for subs_id in good_youtube_subs.values():
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(
self.course.id, filename
)
self.assertTrue(contentstore().find(content_location))
self.clear_sub_content(good_youtube_sub)
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_get_transcript_name_youtube_server_success(self, mock_get):
"""
Get transcript name from transcript_list fetch from youtube server api
depends on language code, default language in YOUTUBE Text Api is "en"
"""
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = 'dummy_video_id'
response_success = """
<transcript_list>
<track id="1" name="Custom" lang_code="en" />
<track id="0" name="Custom1" lang_code="en-GB"/>
</transcript_list>
"""
mock_get.return_value = Mock(status_code=200, text=response_success, content=response_success)
transcript_name = transcripts_utils.youtube_video_transcript_name(youtube_text_api)
self.assertEqual(transcript_name, 'Custom')
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_get_transcript_name_youtube_server_no_transcripts(self, mock_get):
"""
When there are no transcripts of video transcript name will be None
"""
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = 'dummy_video_id'
response_success = "<transcript_list></transcript_list>"
mock_get.return_value = Mock(status_code=200, text=response_success, content=response_success)
transcript_name = transcripts_utils.youtube_video_transcript_name(youtube_text_api)
self.assertIsNone(transcript_name)
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_get_transcript_name_youtube_server_language_not_exist(self, mock_get):
"""
When the language does not exist in transcript_list transcript name will be None
"""
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = 'dummy_video_id'
youtube_text_api['params']['lang'] = 'abc'
response_success = """
<transcript_list>
<track id="1" name="Custom" lang_code="en" />
<track id="0" name="Custom1" lang_code="en-GB"/>
</transcript_list>
"""
mock_get.return_value = Mock(status_code=200, text=response_success, content=response_success)
transcript_name = transcripts_utils.youtube_video_transcript_name(youtube_text_api)
self.assertIsNone(transcript_name)
@patch('xmodule.video_module.transcripts_utils.requests.get', side_effect=mock_requests_get)
def test_downloading_subs_using_transcript_name(self, mock_get):
"""
Download transcript using transcript name in url
"""
good_youtube_sub = 'good_id_2'
self.clear_sub_content(good_youtube_sub)
transcripts_utils.download_youtube_subs(good_youtube_sub, self.course, settings)
mock_get.assert_any_call(
'http://video.google.com/timedtext',
params={'lang': 'en', 'v': 'good_id_2', 'name': 'Custom'}
)
# Check asset status after import of transcript.
filename = 'subs_{0}.srt.sjson'.format(good_youtube_sub)
content_location = StaticContent.compute_location(self.course.id, filename)
self.assertTrue(contentstore().find(content_location))
self.clear_sub_content(good_youtube_sub)
class TestGenerateSubsFromSource(TestDownloadYoutubeSubs):
"""Tests for `generate_subs_from_source` function."""
def test_success_generating_subs(self):
youtube_subs = {
0.5: 'JMD_ifUUfsU',
1.0: 'hI10vDNYz4M',
2.0: 'AKqURZnYqpk'
}
srt_filedata = textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Elephant's Dream
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
""")
self.clear_subs_content(youtube_subs)
# Check transcripts_utils.TranscriptsGenerationException not thrown.
# Also checks that uppercase file extensions are supported.
transcripts_utils.generate_subs_from_source(youtube_subs, 'SRT', srt_filedata, self.course)
# Check assets status after importing subtitles.
for subs_id in youtube_subs.values():
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(
self.course.id, filename
)
self.assertTrue(contentstore().find(content_location))
self.clear_subs_content(youtube_subs)
def test_fail_bad_subs_type(self):
youtube_subs = {
0.5: 'JMD_ifUUfsU',
1.0: 'hI10vDNYz4M',
2.0: 'AKqURZnYqpk'
}
srt_filedata = textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Elephant's Dream
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
""")
with self.assertRaises(transcripts_utils.TranscriptsGenerationException) as cm:
transcripts_utils.generate_subs_from_source(youtube_subs, 'BAD_FORMAT', srt_filedata, self.course)
exception_message = cm.exception.message
self.assertEqual(exception_message, "We support only SubRip (*.srt) transcripts format.")
def test_fail_bad_subs_filedata(self):
youtube_subs = {
0.5: 'JMD_ifUUfsU',
1.0: 'hI10vDNYz4M',
2.0: 'AKqURZnYqpk'
}
srt_filedata = """BAD_DATA"""
with self.assertRaises(transcripts_utils.TranscriptsGenerationException) as cm:
transcripts_utils.generate_subs_from_source(youtube_subs, 'srt', srt_filedata, self.course)
exception_message = cm.exception.message
self.assertEqual(exception_message, "Something wrong with SubRip transcripts file during parsing.")
class TestGenerateSrtFromSjson(TestDownloadYoutubeSubs):
"""Tests for `generate_srt_from_sjson` function."""
def test_success_generating_subs(self):
sjson_subs = {
'start': [100, 200, 240, 390, 54000],
'end': [200, 240, 380, 1000, 78400],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
srt_subs = transcripts_utils.generate_srt_from_sjson(sjson_subs, 1)
self.assertTrue(srt_subs)
expected_subs = [
'00:00:00,100 --> 00:00:00,200\nsubs #1',
'00:00:00,200 --> 00:00:00,240\nsubs #2',
'00:00:00,240 --> 00:00:00,380\nsubs #3',
'00:00:00,390 --> 00:00:01,000\nsubs #4',
'00:00:54,000 --> 00:01:18,400\nsubs #5',
]
for sub in expected_subs:
self.assertIn(sub, srt_subs)
def test_success_generating_subs_speed_up(self):
sjson_subs = {
'start': [100, 200, 240, 390, 54000],
'end': [200, 240, 380, 1000, 78400],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
srt_subs = transcripts_utils.generate_srt_from_sjson(sjson_subs, 0.5)
self.assertTrue(srt_subs)
expected_subs = [
'00:00:00,050 --> 00:00:00,100\nsubs #1',
'00:00:00,100 --> 00:00:00,120\nsubs #2',
'00:00:00,120 --> 00:00:00,190\nsubs #3',
'00:00:00,195 --> 00:00:00,500\nsubs #4',
'00:00:27,000 --> 00:00:39,200\nsubs #5',
]
for sub in expected_subs:
self.assertIn(sub, srt_subs)
def test_success_generating_subs_speed_down(self):
sjson_subs = {
'start': [100, 200, 240, 390, 54000],
'end': [200, 240, 380, 1000, 78400],
'text': [
'subs #1',
'subs #2',
'subs #3',
'subs #4',
'subs #5'
]
}
srt_subs = transcripts_utils.generate_srt_from_sjson(sjson_subs, 2)
self.assertTrue(srt_subs)
expected_subs = [
'00:00:00,200 --> 00:00:00,400\nsubs #1',
'00:00:00,400 --> 00:00:00,480\nsubs #2',
'00:00:00,480 --> 00:00:00,760\nsubs #3',
'00:00:00,780 --> 00:00:02,000\nsubs #4',
'00:01:48,000 --> 00:02:36,800\nsubs #5',
]
for sub in expected_subs:
self.assertIn(sub, srt_subs)
def test_fail_generating_subs(self):
sjson_subs = {
'start': [100, 200],
'end': [100],
'text': [
'subs #1',
'subs #2'
]
}
srt_subs = transcripts_utils.generate_srt_from_sjson(sjson_subs, 1)
self.assertFalse(srt_subs)
class TestYoutubeTranscripts(unittest.TestCase):
"""
Tests for checking right datastructure returning when using youtube api.
"""
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_youtube_bad_status_code(self, mock_get):
mock_get.return_value = Mock(status_code=404, text='test')
youtube_id = 'bad_youtube_id'
with self.assertRaises(transcripts_utils.GetTranscriptsFromYouTubeException):
transcripts_utils.get_transcripts_from_youtube(youtube_id, settings, translation)
@patch('xmodule.video_module.transcripts_utils.requests.get')
def test_youtube_empty_text(self, mock_get):
mock_get.return_value = Mock(status_code=200, text='')
youtube_id = 'bad_youtube_id'
with self.assertRaises(transcripts_utils.GetTranscriptsFromYouTubeException):
transcripts_utils.get_transcripts_from_youtube(youtube_id, settings, translation)
def test_youtube_good_result(self):
response = textwrap.dedent("""<?xml version="1.0" encoding="utf-8" ?>
<transcript>
<text start="0" dur="0.27"></text>
<text start="0.27" dur="2.45">Test text 1.</text>
<text start="2.72">Test text 2.</text>
<text start="5.43" dur="1.73">Test text 3.</text>
</transcript>
""")
expected_transcripts = {
'start': [270, 2720, 5430],
'end': [2720, 2720, 7160],
'text': ['Test text 1.', 'Test text 2.', 'Test text 3.']
}
youtube_id = 'good_youtube_id'
with patch('xmodule.video_module.transcripts_utils.requests.get') as mock_get:
mock_get.return_value = Mock(status_code=200, text=response, content=response)
transcripts = transcripts_utils.get_transcripts_from_youtube(youtube_id, settings, translation)
self.assertEqual(transcripts, expected_transcripts)
mock_get.assert_called_with('http://video.google.com/timedtext', params={'lang': 'en', 'v': 'good_youtube_id'})
class TestTranscript(unittest.TestCase):
"""
Tests for Transcript class e.g. different transcript conversions.
"""
def setUp(self):
super(TestTranscript, self).setUp()
self.srt_transcript = textwrap.dedent("""\
0
00:00:10,500 --> 00:00:13,000
Elephant's Dream
1
00:00:15,000 --> 00:00:18,000
At the left we can see...
""")
self.sjson_transcript = textwrap.dedent("""\
{
"start": [
10500,
15000
],
"end": [
13000,
18000
],
"text": [
"Elephant's Dream",
"At the left we can see..."
]
}
""")
self.txt_transcript = u"Elephant's Dream\nAt the left we can see..."
def test_convert_srt_to_txt(self):
expected = self.txt_transcript
actual = transcripts_utils.Transcript.convert(self.srt_transcript, 'srt', 'txt')
self.assertEqual(actual, expected)
def test_convert_srt_to_srt(self):
expected = self.srt_transcript
actual = transcripts_utils.Transcript.convert(self.srt_transcript, 'srt', 'srt')
self.assertEqual(actual, expected)
def test_convert_sjson_to_txt(self):
expected = self.txt_transcript
actual = transcripts_utils.Transcript.convert(self.sjson_transcript, 'sjson', 'txt')
self.assertEqual(actual, expected)
def test_convert_sjson_to_srt(self):
expected = self.srt_transcript
actual = transcripts_utils.Transcript.convert(self.sjson_transcript, 'sjson', 'srt')
self.assertEqual(actual, expected)
def test_convert_srt_to_sjson(self):
with self.assertRaises(NotImplementedError):
transcripts_utils.Transcript.convert(self.srt_transcript, 'srt', 'sjson')
class TestSubsFilename(unittest.TestCase):
"""
Tests for subs_filename funtion.
"""
def test_unicode(self):
name = transcripts_utils.subs_filename(u"˙∆©ƒƒƒ")
self.assertEqual(name, u'subs_˙∆©ƒƒƒ.srt.sjson')
name = transcripts_utils.subs_filename(u"˙∆©ƒƒƒ", 'uk')
self.assertEqual(name, u'uk_subs_˙∆©ƒƒƒ.srt.sjson')
| agpl-3.0 |
mtlchun/edx | lms/djangoapps/class_dashboard/views.py | 41 | 3837 | """
Handles requests for data, returning a json
"""
import logging
import json
from django.http import HttpResponse
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from courseware.courses import get_course_with_access
from courseware.access import has_access
from class_dashboard import dashboard_data
log = logging.getLogger(__name__)
def has_instructor_access_for_class(user, course_id):
"""
Returns true if the `user` is an instructor for the course.
"""
course = get_course_with_access(user, 'staff', course_id, depth=None)
return has_access(user, 'staff', course)
def all_sequential_open_distrib(request, course_id):
"""
Creates a json with the open distribution for all the subsections in the course.
`request` django request
`course_id` the course ID for the course interested in
Returns the format in dashboard_data.get_d3_sequential_open_distrib
"""
data = {}
# Only instructor for this particular course can request this information
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
if has_instructor_access_for_class(request.user, course_key):
try:
data = dashboard_data.get_d3_sequential_open_distrib(course_key)
except Exception as ex: # pylint: disable=broad-except
log.error('Generating metrics failed with exception: %s', ex)
data = {'error': "error"}
else:
data = {'error': "Access Denied: User does not have access to this course's data"}
return HttpResponse(json.dumps(data), mimetype="application/json")
def all_problem_grade_distribution(request, course_id):
"""
Creates a json with the grade distribution for all the problems in the course.
`Request` django request
`course_id` the course ID for the course interested in
Returns the format in dashboard_data.get_d3_problem_grade_distrib
"""
data = {}
# Only instructor for this particular course can request this information
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
if has_instructor_access_for_class(request.user, course_key):
try:
data = dashboard_data.get_d3_problem_grade_distrib(course_key)
except Exception as ex: # pylint: disable=broad-except
log.error('Generating metrics failed with exception: %s', ex)
data = {'error': "error"}
else:
data = {'error': "Access Denied: User does not have access to this course's data"}
return HttpResponse(json.dumps(data), mimetype="application/json")
def section_problem_grade_distrib(request, course_id, section):
"""
Creates a json with the grade distribution for the problems in the specified section.
`request` django request
`course_id` the course ID for the course interested in
`section` The zero-based index of the section for the course
Returns the format in dashboard_data.get_d3_section_grade_distrib
If this is requested multiple times quickly for the same course, it is better to call all_problem_grade_distribution
and pick out the sections of interest.
"""
data = {}
# Only instructor for this particular course can request this information
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
if has_instructor_access_for_class(request.user, course_key):
try:
data = dashboard_data.get_d3_section_grade_distrib(course_key, section)
except Exception as ex: # pylint: disable=broad-except
log.error('Generating metrics failed with exception: %s', ex)
data = {'error': "error"}
else:
data = {'error': "Access Denied: User does not have access to this course's data"}
return HttpResponse(json.dumps(data), mimetype="application/json")
| agpl-3.0 |
schieb/angr | angr/analyses/reaching_definitions/atoms.py | 1 | 3742 |
class Atom:
"""
This class represents a data storage location manipulated by IR instructions.
It could either be a Tmp (temporary variable), a Register, a MemoryLocation, or a Parameter.
"""
def __repr__(self):
raise NotImplementedError()
class GuardUse(Atom):
def __init__(self, target):
self.target = target
def __repr__(self):
return '<Guard %#x>' % self.target
class Tmp(Atom):
"""
Represents a variable used by the IR to store intermediate values.
"""
__slots__ = ['tmp_idx']
def __init__(self, tmp_idx):
super(Tmp, self).__init__()
self.tmp_idx = tmp_idx
def __repr__(self):
return "<Tmp %d>" % self.tmp_idx
def __eq__(self, other):
return type(other) is Tmp and \
self.tmp_idx == other.tmp_idx
def __hash__(self):
return hash(('tmp', self.tmp_idx))
class Register(Atom):
"""
Represents a given CPU register.
As an IR abstracts the CPU design to target different architectures, registers are represented as a separated memory
space.
Thus a register is defined by its offset from the base of this memory and its size.
:ivar int reg_offset: The offset from the base to define its place in the memory bloc.
:ivar int size: The size, in number of bytes.
"""
__slots__ = ['reg_offset', 'size']
def __init__(self, reg_offset, size):
super(Register, self).__init__()
self.reg_offset = reg_offset
self.size = size
def __repr__(self):
return "<Reg %d<%d>>" % (self.reg_offset, self.size)
def __eq__(self, other):
return type(other) is Register and \
self.reg_offset == other.reg_offset and \
self.size == other.size
def __hash__(self):
return hash(('reg', self.reg_offset, self.size))
@property
def bits(self):
return self.size * 8
class MemoryLocation(Atom):
"""
Represents a memory slice.
It is characterized by its address and its size.
"""
__slots__ = ['addr', 'size']
def __init__(self, addr, size):
super(MemoryLocation, self).__init__()
self.addr = addr
self.size = size
def __repr__(self):
return "<Mem %s<%d>>" % (hex(self.addr) if type(self.addr) is int else self.addr, self.size)
@property
def bits(self):
return self.size * 8
@property
def symbolic(self):
return not type(self.addr) is int
def __eq__(self, other):
return type(other) is MemoryLocation and \
self.addr == other.addr and \
self.size == other.size
def __hash__(self):
return hash(('mem', self.addr, self.size))
class Parameter(Atom):
"""
Represents a function parameter.
Can either be a <angr.engines.light.data.SpOffset> if the parameter was passed on the stack, or a <Register>, depending on the calling
convention.
"""
__slots__ = ['value', 'type_', 'meta']
def __init__(self, value, type_=None, meta=None):
super(Parameter, self).__init__()
self.value = value
self.type_ = type_
self.meta = meta
def __repr__(self):
type_ = ', type=%s' % self.type_ if self.type_ is not None else ''
meta = ', meta=%s' % self.meta if self.meta is not None else ''
return '<Param %s%s%s>' % (self.value, type_, meta)
def __eq__(self, other):
return type(other) is Parameter and \
self.value == other.value and \
self.type_ == other.type_ and \
self.meta == other.meta
def __hash__(self):
return hash(('par', self.value, self.type_, self.meta))
| bsd-2-clause |
hpe-storage/python-3parclient | hpe3parclient/http.py | 2 | 14843 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# (c) Copyright 2012-2015 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" HTTPJSONRESTClient.
.. module: http
:Author: Walter A. Boring IV
:Description: This is the HTTP Client that is used to make the actual calls.
It includes the authentication that knows the cookie name for 3PAR.
"""
import logging
import requests
import time
import ast
try:
import json
except ImportError:
import simplejson as json
from hpe3parclient import exceptions
class HTTPJSONRESTClient(object):
"""
An HTTP REST Client that sends and recieves JSON data as the body of the
HTTP request.
:param api_url: The url to the WSAPI service on 3PAR
ie. http://<3par server>:8080
:type api_url: str
:param secure: Validate SSL cert? Default will not validate
:type secure: bool
:param http_log_debug: Turns on http log debugging. Default will not log
:type http_log_debug: bool
:param suppress_ssl_warnings: Suppresses log warning messages if True.
Default will not suppress warnings.
:type suppress_ssl_warnings: bool
"""
USER_AGENT = 'python-3parclient'
SESSION_COOKIE_NAME = 'X-Hp3Par-Wsapi-Sessionkey'
http_log_debug = False
_logger = logging.getLogger(__name__)
# Retry constants
retry_exceptions = (exceptions.HTTPServiceUnavailable,
requests.exceptions.ConnectionError)
tries = 5
delay = 0
backoff = 2
def __init__(self, api_url, secure=False, http_log_debug=False,
suppress_ssl_warnings=False, timeout=None):
if suppress_ssl_warnings:
requests.packages.urllib3.disable_warnings()
self.session_key = None
# should be http://<Server:Port>/api/v1
self.set_url(api_url)
self.set_debug_flag(http_log_debug)
self.times = [] # [("item", starttime, endtime), ...]
self.secure = secure
self.timeout = timeout
def set_url(self, api_url):
# should be http://<Server:Port>/api/v1
self.api_url = api_url.rstrip('/')
def set_debug_flag(self, flag):
"""
This turns on/off http request/response debugging output to console
:param flag: Set to True to enable debugging output
:type flag: bool
"""
if not HTTPJSONRESTClient.http_log_debug and flag:
ch = logging.StreamHandler()
HTTPJSONRESTClient._logger.setLevel(logging.DEBUG)
HTTPJSONRESTClient._logger.addHandler(ch)
HTTPJSONRESTClient.http_log_debug = True
def authenticate(self, user, password, optional=None):
"""
This tries to create an authenticated session with the 3PAR server
:param user: The username
:type user: str
:param password: Password
:type password: str
"""
# this prevens re-auth attempt if auth fails
self.auth_try = 1
self.session_key = None
info = {'user': user, 'password': password}
self._auth_optional = None
if optional:
self._auth_optional = optional
info.update(optional)
resp, body = self.post('/credentials', body=info)
if body and 'key' in body:
self.session_key = body['key']
self.auth_try = 0
self.user = user
self.password = password
def _reauth(self):
self.authenticate(self.user, self.password, self._auth_optional)
def unauthenticate(self):
"""
This clears the authenticated session with the 3PAR server.
"""
# delete the session on the 3Par
self.delete('/credentials/%s' % self.session_key)
self.session_key = None
def get_timings(self):
"""
Ths gives an array of the request timings since last reset_timings call
"""
return self.times
def reset_timings(self):
"""
This resets the request/response timings array
"""
self.times = []
def _http_log_req(self, args, kwargs):
if not self.http_log_debug:
return
string_parts = ['curl -i']
for element in args:
if element in ('GET', 'POST'):
string_parts.append(' -X %s' % element)
else:
string_parts.append(' %s' % element)
for element in kwargs['headers']:
header = ' -H "%s: %s"' % (element, kwargs['headers'][element])
string_parts.append(header)
HTTPJSONRESTClient._logger.debug("\nREQ: %s\n" % "".join(string_parts))
if 'body' in kwargs:
if 'password' in kwargs['body']:
body_dict = ast.literal_eval(kwargs['body'])
body_dict['password'] = "********"
HTTPJSONRESTClient._logger.debug("REQ BODY: %s\n" %
(kwargs['body']))
def _http_log_resp(self, resp, body):
if not self.http_log_debug:
return
# Replace commas with newlines to break the debug into new lines,
# making it easier to read
HTTPJSONRESTClient._logger.debug("RESP:%s\n",
str(resp).replace("',", "'\n"))
HTTPJSONRESTClient._logger.debug("RESP BODY:%s\n", body)
def request(self, *args, **kwargs):
"""
This makes an HTTP Request to the 3Par server.
You should use get, post, delete instead.
"""
if self.session_key and self.auth_try != 1:
kwargs.setdefault('headers', {})[self.SESSION_COOKIE_NAME] = \
self.session_key
kwargs.setdefault('headers', kwargs.get('headers', {}))
kwargs['headers']['User-Agent'] = self.USER_AGENT
kwargs['headers']['Accept'] = 'application/json'
if 'body' in kwargs:
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['body'] = json.dumps(kwargs['body'])
payload = kwargs['body']
else:
payload = None
# args[0] contains the URL, args[1] contains the HTTP verb/method
http_url = args[0]
http_method = args[1]
self._http_log_req(args, kwargs)
r = None
resp = None
body = None
while r is None and self.tries > 0:
try:
# Check to see if the request is being retried. If it is, we
# want to delay.
if self.delay:
time.sleep(self.delay)
if self.timeout:
r = requests.request(http_method, http_url, data=payload,
headers=kwargs['headers'],
verify=self.secure,
timeout=self.timeout)
else:
r = requests.request(http_method, http_url, data=payload,
headers=kwargs['headers'],
verify=self.secure)
resp = r.headers
body = r.text
if isinstance(body, bytes):
body = body.decode('utf-8')
# resp['status'], status['content-location'], and resp.status
# need to be manually set as Python Requests doesn't provide
# them automatically.
resp['status'] = str(r.status_code)
resp.status = r.status_code
if 'location' not in resp:
resp['content-location'] = r.url
r.close()
self._http_log_resp(resp, body)
# Try and convert the body response to an object
# This assumes the body of the reply is JSON
if body:
try:
body = json.loads(body)
except ValueError:
pass
else:
body = None
if resp.status >= 400:
if body and 'message' in body:
body['desc'] = body['message']
raise exceptions.from_response(resp, body)
except requests.exceptions.SSLError as err:
HTTPJSONRESTClient._logger.error(
"SSL certificate verification failed: (%s). You must have "
"a valid SSL certificate or disable SSL "
"verification.", err)
raise exceptions.SSLCertFailed("SSL Certificate Verification "
"Failed.")
except self.retry_exceptions as ex:
# If we catch an exception where we want to retry, we need to
# decrement the retry count prepare to try again.
r = None
self.tries -= 1
self.delay = self.delay * self.backoff + 1
# Raise exception, we have exhausted all retries.
if self.tries is 0:
raise ex
except requests.exceptions.HTTPError as err:
raise exceptions.HTTPError("HTTP Error: %s" % err)
except requests.exceptions.URLRequired as err:
raise exceptions.URLRequired("URL Required: %s" % err)
except requests.exceptions.TooManyRedirects as err:
raise exceptions.TooManyRedirects(
"Too Many Redirects: %s" % err)
except requests.exceptions.Timeout as err:
raise exceptions.Timeout("Timeout: %s" % err)
except requests.exceptions.RequestException as err:
raise exceptions.RequestException(
"Request Exception: %s" % err)
return resp, body
def _time_request(self, url, method, **kwargs):
start_time = time.time()
resp, body = self.request(url, method, **kwargs)
self.times.append(("%s %s" % (method, url),
start_time, time.time()))
return resp, body
def _do_reauth(self, url, method, ex, **kwargs):
# print("_do_reauth called")
try:
if self.auth_try != 1:
self._reauth()
resp, body = self._time_request(self.api_url + url, method,
**kwargs)
return resp, body
else:
raise ex
except exceptions.HTTPUnauthorized:
raise ex
def _cs_request(self, url, method, **kwargs):
# Perform the request once. If we get a 401 back then it
# might be because the auth token expired, so try to
# re-authenticate and try again. If it still fails, bail.
try:
resp, body = self._time_request(self.api_url + url, method,
**kwargs)
return resp, body
except exceptions.HTTPUnauthorized as ex:
# print("_CS_REQUEST HTTPUnauthorized")
resp, body = self._do_reauth(url, method, ex, **kwargs)
return resp, body
except exceptions.HTTPForbidden as ex:
# print("_CS_REQUEST HTTPForbidden")
resp, body = self._do_reauth(url, method, ex, **kwargs)
return resp, body
def get(self, url, **kwargs):
"""
Make an HTTP GET request to the server.
.. code-block:: python
#example call
try {
headers, body = http.get('/volumes')
} except exceptions.HTTPUnauthorized as ex:
print "Not logged in"
}
:param url: The relative url from the 3PAR api_url
:type url: str
:returns: headers - dict of HTTP Response headers
:returns: body - the body of the response. If the body was JSON, it
will be an object
"""
return self._cs_request(url, 'GET', **kwargs)
def post(self, url, **kwargs):
"""
Make an HTTP POST request to the server.
.. code-block:: python
#example call
try {
info = {'name': 'new volume name', 'cpg': 'MyCPG',
'sizeMiB': 300}
headers, body = http.post('/volumes', body=info)
} except exceptions.HTTPUnauthorized as ex:
print "Not logged in"
}
:param url: The relative url from the 3PAR api_url
:type url: str
:returns: headers - dict of HTTP Response headers
:returns: body - the body of the response. If the body was JSON, it
will be an object
"""
return self._cs_request(url, 'POST', **kwargs)
def put(self, url, **kwargs):
"""
Make an HTTP PUT request to the server.
.. code-block:: python
#example call
try {
info = {'name': 'something'}
headers, body = http.put('/volumes', body=info)
} except exceptions.HTTPUnauthorized as ex:
print "Not logged in"
}
:param url: The relative url from the 3PAR api_url
:type url: str
:returns: headers - dict of HTTP Response headers
:returns: body - the body of the response. If the body was JSON,
it will be an object
"""
return self._cs_request(url, 'PUT', **kwargs)
def delete(self, url, **kwargs):
"""
Make an HTTP DELETE request to the server.
.. code-block:: python
#example call
try {
headers, body = http.delete('/volumes/%s' % name)
} except exceptions.HTTPUnauthorized as ex:
print "Not logged in"
}
:param url: The relative url from the 3PAR api_url
:type url: str
:returns: headers - dict of HTTP Response headers
:returns: body - the body of the response. If the body was JSON, it
will be an object
"""
return self._cs_request(url, 'DELETE', **kwargs)
| apache-2.0 |
jmartinm/invenio-master | modules/websearch/lib/search_engine_summarizer.py | 18 | 15860 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2008, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Search Engine Summarizer, producing summary formats such as citesummary.
The main API is summarize_records().
"""
__lastupdated__ = """$Date$"""
__revision__ = "$Id$"
from operator import itemgetter
from invenio.config import CFG_INSPIRE_SITE, \
CFG_WEBSEARCH_CITESUMMARY_SCAN_THRESHOLD
from invenio.bibrank_citation_searcher import get_citation_dict
from StringIO import StringIO
from invenio.search_engine import search_pattern, perform_request_search
from invenio.intbitset import intbitset
import invenio.template
websearch_templates = invenio.template.load('websearch')
## CFG_CITESUMMARY_COLLECTIONS -- how do we break down cite summary
## results according to collections?
if CFG_INSPIRE_SITE:
CFG_CITESUMMARY_COLLECTIONS = [['Citeable papers', 'collection:citeable'],
['Published only', 'collection:published']]
else:
CFG_CITESUMMARY_COLLECTIONS = [['All papers', ''],
['Published only', 'collection:article']]
## CFG_CITESUMMARY_FAME_THRESHOLDS -- how do we break down cite
## summary results into famous and less famous paper groups?
CFG_CITESUMMARY_FAME_THRESHOLDS = [
(500, 1000000, 'Renowned papers (500+)'),
(250, 499, 'Famous papers (250-499)'),
(100, 249, 'Very well-known papers (100-249)'),
(50, 99, 'Well-known papers (50-99)'),
(10, 49, 'Known papers (10-49)'),
(1, 9, 'Less known papers (1-9)'),
(0, 0, 'Unknown papers (0)')
]
def render_citations_breakdown(req, ln, collections, stats,
search_patterns, searchfield):
"Render citations break down by fame"
header = websearch_templates.tmpl_citesummary_breakdown_header(ln)
req.write(header)
for low, high, fame in CFG_CITESUMMARY_FAME_THRESHOLDS:
counts = {}
for coll, dummy in collections:
counts[coll] = stats[coll]['breakdown'][fame]
fame_info = websearch_templates.tmpl_citesummary_breakdown_by_fame(
counts, low, high, fame, collections,
search_patterns, searchfield, ln)
req.write(fame_info)
def compute_citations_counts(recids, dict_name):
"""Compute # cites for each recid
Input
- d_recids: list of recids for each collection
{'HEP': [1,2,3,5,8]}
Output:
- citers_counts: list of # cites/recid
{'HEP': [(1, 10), (2, 5), (3, 23), (5, 0), (8, 0)]}
"""
cites_count = get_citation_dict(dict_name)
counts = [(recid, cites_count.get(recid, 0)) for recid in recids]
counts.sort(key=itemgetter(1), reverse=True)
return counts
def compute_citation_stats(recids, citers_counts):
# Total citations
total_cites = 0
h_index = 0
h_index_done = False
total_recids_without_cites = len(recids)
# Breakdown
breakdown = {}
for low, high, fame in CFG_CITESUMMARY_FAME_THRESHOLDS:
breakdown[fame] = 0
for recid, citecount in citers_counts:
if recid in recids:
# Total
total_cites += citecount
# Recids without cites
total_recids_without_cites -= 1
# h-index
if not h_index_done:
h_index += 1
if h_index > citecount:
h_index -= 1
h_index_done = True
# Breakdown
for low, high, fame in CFG_CITESUMMARY_FAME_THRESHOLDS:
if low <= citecount <= high:
breakdown[fame] += 1
for low, high, fame in CFG_CITESUMMARY_FAME_THRESHOLDS:
if low == 0:
breakdown[fame] += total_recids_without_cites
# Average citations
try:
avg_cites = float(total_cites) / len(recids)
except ZeroDivisionError:
avg_cites = 0
return {'total_cites': total_cites,
'avg_cites': avg_cites,
'h-index': h_index,
'breakdown': breakdown}
def get_cites_counts(recids):
if len(recids) < CFG_WEBSEARCH_CITESUMMARY_SCAN_THRESHOLD:
cites_counts = compute_citations_counts(recids, 'citations_weights')
else:
cites_counts = get_citation_dict('citations_counts')
return cites_counts
def generate_citation_summary(recids, collections=CFG_CITESUMMARY_COLLECTIONS):
coll_recids = get_recids(recids, collections)
cites_counts = get_cites_counts(recids)
stats = {}
for coll, dummy in collections:
stats[coll] = compute_citation_stats(coll_recids[coll], cites_counts)
return coll_recids, stats
def render_citation_summary(req, ln, recids, searchpattern, searchfield,
stats, collections=CFG_CITESUMMARY_COLLECTIONS):
title = websearch_templates.tmpl_citesummary_title(ln)
req.write(title)
search_patterns = dict([(coll, searchpattern) \
for coll, dummy in collections])
if stats is None:
status = generate_citation_summary(recids, collections)
coll_recids, stats = stats
render_citesummary_prologue(req,
ln,
recids,
collections,
search_patterns,
searchfield,
coll_recids)
render_citesummary_overview(req,
ln,
collections,
stats)
render_citations_breakdown(req,
ln,
collections,
stats,
search_patterns,
searchfield)
render_h_index(req, ln, collections, stats)
eplilogue = websearch_templates.tmpl_citesummary_epilogue()
req.write(eplilogue)
links = websearch_templates.tmpl_citesummary_more_links(searchpattern, ln)
req.write(links)
def render_extended_citation_summary(req, ln, recids, collections,
searchpattern, searchfield):
title = websearch_templates.tmpl_citesummary2_title(searchpattern, ln)
req.write(title)
initial_collections = collections
collections_recids = get_recids(recids, collections)
def coll_self_cites(name):
return name + '<br />excluding self cites'
def coll_not_rpp(name):
return name + '<br />excluding RPP'
# Add self cites sets and "-title:rpp" sets
if CFG_INSPIRE_SITE:
notrpp_searchpattern = searchpattern + ' -title:rpp'
notrpp_recids = intbitset(perform_request_search(p=notrpp_searchpattern))
for coll, coll_recids in collections_recids.items():
collections_recids[coll_self_cites(coll)] = coll_recids
if CFG_INSPIRE_SITE:
collections_recids[coll_not_rpp(coll)] = notrpp_recids & coll_recids
# Add self cites collections
collections = []
search_patterns = {}
for coll, query in initial_collections:
search_patterns[coll] = searchpattern
search_patterns[coll_self_cites(coll)] = searchpattern
if CFG_INSPIRE_SITE:
search_patterns[coll_not_rpp(coll)] = notrpp_searchpattern
collections += [
(coll, query),
(coll_self_cites(coll), query),
(coll_not_rpp(coll), query),
]
else:
collections += [
(coll, query),
(coll_self_cites(coll), query),
]
cites_counts = get_cites_counts(recids)
if len(recids) < CFG_WEBSEARCH_CITESUMMARY_SCAN_THRESHOLD:
selfcites_counts = compute_citations_counts(recids, 'selfcites_weights')
else:
selfcites_counts = get_citation_dict('selfcites_counts')
citers_counts = {}
for coll, dummy in initial_collections:
citers_counts[coll] = cites_counts
citers_counts[coll_self_cites(coll)] = selfcites_counts
citers_counts[coll_not_rpp(coll)] = cites_counts
stats = {}
for coll, dummy in collections:
stats[coll] = compute_citation_stats(collections_recids[coll], citers_counts[coll])
render_citesummary_prologue(req,
ln,
recids,
collections,
search_patterns,
searchfield,
collections_recids)
render_citesummary_overview(req,
ln,
collections,
stats)
render_citations_breakdown(req,
ln,
collections,
stats,
search_patterns,
searchfield)
render_h_index(req, ln, collections, stats)
# 6) hcs epilogue:
eplilogue = websearch_templates.tmpl_citesummary_epilogue()
req.write(eplilogue)
back_link = websearch_templates.tmpl_citesummary_back_link(searchpattern, ln)
req.write(back_link)
def render_citesummary_overview(req, ln, collections, stats):
"""Citations overview: total citations"""
avg_cites = {}
total_cites = {}
for coll, dummy in collections:
avg_cites[coll] = stats[coll]['avg_cites']
total_cites[coll] = stats[coll]['total_cites']
overview = websearch_templates.tmpl_citesummary_overview(collections,
total_cites,
avg_cites,
ln)
req.write(overview)
def get_recids(recids, collections):
"""Compute recids for each column"""
d_recids = {}
for coll, colldef in collections:
if not colldef:
d_recids[coll] = recids
else:
d_recids[coll] = recids & search_pattern(p=colldef)
return d_recids
def render_citesummary_prologue(req, ln, recids, collections, search_patterns,
searchfield, coll_recids):
total_count = len(recids)
citable_recids = recids & search_pattern(p='collection:citeable')
prologue = websearch_templates.tmpl_citesummary_prologue(coll_recids,
collections,
search_patterns,
searchfield,
citable_recids,
total_count,
ln)
req.write(prologue)
def render_h_index(req, ln, collections, stats):
"Calculate and Render h-hep index"
h_indexes = {}
for coll, dummy in collections:
h_indexes[coll] = stats[coll]['h-index']
h_idx = websearch_templates.tmpl_citesummary_h_index(collections,
h_indexes,
ln)
req.write(h_idx)
def summarize_records(recids, of, ln, searchpattern="", searchfield="",
req=None, collections=CFG_CITESUMMARY_COLLECTIONS):
"""Write summary report for records RECIDS in the format OF in language LN.
SEARCHPATTERN and SEARCHFIELD are search query that led to RECIDS,
for instance p='Smith, Paul' and f='author'. They are used for links.
REQ is the Apache/mod_python request object.
"""
# Workaround a intbitset segfault when this is not a intbitset
if not isinstance(recids, intbitset):
recids = intbitset(recids)
if of == 'xcs':
# This is XML cite summary
return render_citation_summary_xml(recids)
has_req = req is not None
if not has_req:
req = StringIO()
if of == 'hcs':
stats = generate_citation_summary(recids, collections)
render_citation_summary(req=req,
ln=ln,
recids=recids,
collections=collections,
searchpattern=searchpattern,
searchfield=searchfield,
stats=stats)
else:
render_extended_citation_summary(req=req,
ln=ln,
recids=recids,
collections=collections,
searchpattern=searchpattern,
searchfield=searchfield)
req.write(websearch_templates.tmpl_citesummary_footer())
if has_req:
return ''
else:
return req.getvalue()
# For citation summary, code xcs/hcs (unless changed)
def render_citation_summary_xml(recids):
"""Prints citation summary in xml."""
total_cites, recids_breakdown = calculate_citations(recids)
# output formatting
out = ["<citationsummary records=\"%s\" citations=\"%s\">" \
% (len(recids), total_cites)]
for dummy, dummy, name in CFG_CITESUMMARY_FAME_THRESHOLDS:
# get the name, print the value
if name in recids_breakdown:
out += ["<citationclass>%s<records>%s</records></citationclass>\n"\
% (name, recids_breakdown[name])]
out += ["</citationsummary>"]
return '\n'.join(out)
def calculate_citations(recids):
"""calculates records in classes of citations
defined by thresholds. returns a dictionary that
contains total, avg, records and a dictionary
of threshold names and number corresponding to it"""
total_cites = 0
recids_breakdown = {}
if len(recids) < CFG_WEBSEARCH_CITESUMMARY_SCAN_THRESHOLD:
cites_counts = compute_citations_counts(recids, 'citations_weights')
else:
cites_counts = get_citation_dict('citations_counts')
for recid, numcites in cites_counts:
if recid in recids:
total_cites += numcites
for low, high, name in CFG_CITESUMMARY_FAME_THRESHOLDS:
if low <= numcites <= high:
recids_breakdown.setdefault(name, []).append(recid)
if low == 0:
non_cited = recids - get_citation_dict("citations_keys")
recids_breakdown.setdefault(name, []).extend(non_cited)
return total_cites, recids_breakdown
| gpl-2.0 |
WhireCrow/openwrt-mt7620 | staging_dir/host/lib/python2.7/test/test_doctest.py | 18 | 83664 | # -*- coding: utf-8 -*-
"""
Test script for doctest.
"""
import sys
from test import test_support
import doctest
# NOTE: There are some additional tests relating to interaction with
# zipimport in the test_zipimport_support test module.
######################################################################
## Sample Objects (used by test cases)
######################################################################
def sample_func(v):
"""
Blah blah
>>> print sample_func(22)
44
Yee ha!
"""
return v+v
class SampleClass:
"""
>>> print 1
1
>>> # comments get ignored. so are empty PS1 and PS2 prompts:
>>>
...
Multiline example:
>>> sc = SampleClass(3)
>>> for i in range(10):
... sc = sc.double()
... print sc.get(),
6 12 24 48 96 192 384 768 1536 3072
"""
def __init__(self, val):
"""
>>> print SampleClass(12).get()
12
"""
self.val = val
def double(self):
"""
>>> print SampleClass(12).double().get()
24
"""
return SampleClass(self.val + self.val)
def get(self):
"""
>>> print SampleClass(-5).get()
-5
"""
return self.val
def a_staticmethod(v):
"""
>>> print SampleClass.a_staticmethod(10)
11
"""
return v+1
a_staticmethod = staticmethod(a_staticmethod)
def a_classmethod(cls, v):
"""
>>> print SampleClass.a_classmethod(10)
12
>>> print SampleClass(0).a_classmethod(10)
12
"""
return v+2
a_classmethod = classmethod(a_classmethod)
a_property = property(get, doc="""
>>> print SampleClass(22).a_property
22
""")
class NestedClass:
"""
>>> x = SampleClass.NestedClass(5)
>>> y = x.square()
>>> print y.get()
25
"""
def __init__(self, val=0):
"""
>>> print SampleClass.NestedClass().get()
0
"""
self.val = val
def square(self):
return SampleClass.NestedClass(self.val*self.val)
def get(self):
return self.val
class SampleNewStyleClass(object):
r"""
>>> print '1\n2\n3'
1
2
3
"""
def __init__(self, val):
"""
>>> print SampleNewStyleClass(12).get()
12
"""
self.val = val
def double(self):
"""
>>> print SampleNewStyleClass(12).double().get()
24
"""
return SampleNewStyleClass(self.val + self.val)
def get(self):
"""
>>> print SampleNewStyleClass(-5).get()
-5
"""
return self.val
######################################################################
## Fake stdin (for testing interactive debugging)
######################################################################
class _FakeInput:
"""
A fake input stream for pdb's interactive debugger. Whenever a
line is read, print it (to simulate the user typing it), and then
return it. The set of lines to return is specified in the
constructor; they should not have trailing newlines.
"""
def __init__(self, lines):
self.lines = lines
def readline(self):
line = self.lines.pop(0)
print line
return line+'\n'
######################################################################
## Test Cases
######################################################################
def test_Example(): r"""
Unit tests for the `Example` class.
Example is a simple container class that holds:
- `source`: A source string.
- `want`: An expected output string.
- `exc_msg`: An expected exception message string (or None if no
exception is expected).
- `lineno`: A line number (within the docstring).
- `indent`: The example's indentation in the input string.
- `options`: An option dictionary, mapping option flags to True or
False.
These attributes are set by the constructor. `source` and `want` are
required; the other attributes all have default values:
>>> example = doctest.Example('print 1', '1\n')
>>> (example.source, example.want, example.exc_msg,
... example.lineno, example.indent, example.options)
('print 1\n', '1\n', None, 0, 0, {})
The first three attributes (`source`, `want`, and `exc_msg`) may be
specified positionally; the remaining arguments should be specified as
keyword arguments:
>>> exc_msg = 'IndexError: pop from an empty list'
>>> example = doctest.Example('[].pop()', '', exc_msg,
... lineno=5, indent=4,
... options={doctest.ELLIPSIS: True})
>>> (example.source, example.want, example.exc_msg,
... example.lineno, example.indent, example.options)
('[].pop()\n', '', 'IndexError: pop from an empty list\n', 5, 4, {8: True})
The constructor normalizes the `source` string to end in a newline:
Source spans a single line: no terminating newline.
>>> e = doctest.Example('print 1', '1\n')
>>> e.source, e.want
('print 1\n', '1\n')
>>> e = doctest.Example('print 1\n', '1\n')
>>> e.source, e.want
('print 1\n', '1\n')
Source spans multiple lines: require terminating newline.
>>> e = doctest.Example('print 1;\nprint 2\n', '1\n2\n')
>>> e.source, e.want
('print 1;\nprint 2\n', '1\n2\n')
>>> e = doctest.Example('print 1;\nprint 2', '1\n2\n')
>>> e.source, e.want
('print 1;\nprint 2\n', '1\n2\n')
Empty source string (which should never appear in real examples)
>>> e = doctest.Example('', '')
>>> e.source, e.want
('\n', '')
The constructor normalizes the `want` string to end in a newline,
unless it's the empty string:
>>> e = doctest.Example('print 1', '1\n')
>>> e.source, e.want
('print 1\n', '1\n')
>>> e = doctest.Example('print 1', '1')
>>> e.source, e.want
('print 1\n', '1\n')
>>> e = doctest.Example('print', '')
>>> e.source, e.want
('print\n', '')
The constructor normalizes the `exc_msg` string to end in a newline,
unless it's `None`:
Message spans one line
>>> exc_msg = 'IndexError: pop from an empty list'
>>> e = doctest.Example('[].pop()', '', exc_msg)
>>> e.exc_msg
'IndexError: pop from an empty list\n'
>>> exc_msg = 'IndexError: pop from an empty list\n'
>>> e = doctest.Example('[].pop()', '', exc_msg)
>>> e.exc_msg
'IndexError: pop from an empty list\n'
Message spans multiple lines
>>> exc_msg = 'ValueError: 1\n 2'
>>> e = doctest.Example('raise ValueError("1\n 2")', '', exc_msg)
>>> e.exc_msg
'ValueError: 1\n 2\n'
>>> exc_msg = 'ValueError: 1\n 2\n'
>>> e = doctest.Example('raise ValueError("1\n 2")', '', exc_msg)
>>> e.exc_msg
'ValueError: 1\n 2\n'
Empty (but non-None) exception message (which should never appear
in real examples)
>>> exc_msg = ''
>>> e = doctest.Example('raise X()', '', exc_msg)
>>> e.exc_msg
'\n'
Compare `Example`:
>>> example = doctest.Example('print 1', '1\n')
>>> same_example = doctest.Example('print 1', '1\n')
>>> other_example = doctest.Example('print 42', '42\n')
>>> example == same_example
True
>>> example != same_example
False
>>> hash(example) == hash(same_example)
True
>>> example == other_example
False
>>> example != other_example
True
"""
def test_DocTest(): r"""
Unit tests for the `DocTest` class.
DocTest is a collection of examples, extracted from a docstring, along
with information about where the docstring comes from (a name,
filename, and line number). The docstring is parsed by the `DocTest`
constructor:
>>> docstring = '''
... >>> print 12
... 12
...
... Non-example text.
...
... >>> print 'another\example'
... another
... example
... '''
>>> globs = {} # globals to run the test in.
>>> parser = doctest.DocTestParser()
>>> test = parser.get_doctest(docstring, globs, 'some_test',
... 'some_file', 20)
>>> print test
<DocTest some_test from some_file:20 (2 examples)>
>>> len(test.examples)
2
>>> e1, e2 = test.examples
>>> (e1.source, e1.want, e1.lineno)
('print 12\n', '12\n', 1)
>>> (e2.source, e2.want, e2.lineno)
("print 'another\\example'\n", 'another\nexample\n', 6)
Source information (name, filename, and line number) is available as
attributes on the doctest object:
>>> (test.name, test.filename, test.lineno)
('some_test', 'some_file', 20)
The line number of an example within its containing file is found by
adding the line number of the example and the line number of its
containing test:
>>> test.lineno + e1.lineno
21
>>> test.lineno + e2.lineno
26
If the docstring contains inconsistant leading whitespace in the
expected output of an example, then `DocTest` will raise a ValueError:
>>> docstring = r'''
... >>> print 'bad\nindentation'
... bad
... indentation
... '''
>>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0)
Traceback (most recent call last):
ValueError: line 4 of the docstring for some_test has inconsistent leading whitespace: 'indentation'
If the docstring contains inconsistent leading whitespace on
continuation lines, then `DocTest` will raise a ValueError:
>>> docstring = r'''
... >>> print ('bad indentation',
... ... 2)
... ('bad', 'indentation')
... '''
>>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0)
Traceback (most recent call last):
ValueError: line 2 of the docstring for some_test has inconsistent leading whitespace: '... 2)'
If there's no blank space after a PS1 prompt ('>>>'), then `DocTest`
will raise a ValueError:
>>> docstring = '>>>print 1\n1'
>>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0)
Traceback (most recent call last):
ValueError: line 1 of the docstring for some_test lacks blank after >>>: '>>>print 1'
If there's no blank space after a PS2 prompt ('...'), then `DocTest`
will raise a ValueError:
>>> docstring = '>>> if 1:\n...print 1\n1'
>>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0)
Traceback (most recent call last):
ValueError: line 2 of the docstring for some_test lacks blank after ...: '...print 1'
Compare `DocTest`:
>>> docstring = '''
... >>> print 12
... 12
... '''
>>> test = parser.get_doctest(docstring, globs, 'some_test',
... 'some_test', 20)
>>> same_test = parser.get_doctest(docstring, globs, 'some_test',
... 'some_test', 20)
>>> test == same_test
True
>>> test != same_test
False
>>> hash(test) == hash(same_test)
True
>>> docstring = '''
... >>> print 42
... 42
... '''
>>> other_test = parser.get_doctest(docstring, globs, 'other_test',
... 'other_file', 10)
>>> test == other_test
False
>>> test != other_test
True
Compare `DocTestCase`:
>>> DocTestCase = doctest.DocTestCase
>>> test_case = DocTestCase(test)
>>> same_test_case = DocTestCase(same_test)
>>> other_test_case = DocTestCase(other_test)
>>> test_case == same_test_case
True
>>> test_case != same_test_case
False
>>> hash(test_case) == hash(same_test_case)
True
>>> test == other_test_case
False
>>> test != other_test_case
True
"""
def test_DocTestFinder(): r"""
Unit tests for the `DocTestFinder` class.
DocTestFinder is used to extract DocTests from an object's docstring
and the docstrings of its contained objects. It can be used with
modules, functions, classes, methods, staticmethods, classmethods, and
properties.
Finding Tests in Functions
~~~~~~~~~~~~~~~~~~~~~~~~~~
For a function whose docstring contains examples, DocTestFinder.find()
will return a single test (for that function's docstring):
>>> finder = doctest.DocTestFinder()
We'll simulate a __file__ attr that ends in pyc:
>>> import test.test_doctest
>>> old = test.test_doctest.__file__
>>> test.test_doctest.__file__ = 'test_doctest.pyc'
>>> tests = finder.find(sample_func)
>>> print tests # doctest: +ELLIPSIS
[<DocTest sample_func from ...:17 (1 example)>]
The exact name depends on how test_doctest was invoked, so allow for
leading path components.
>>> tests[0].filename # doctest: +ELLIPSIS
'...test_doctest.py'
>>> test.test_doctest.__file__ = old
>>> e = tests[0].examples[0]
>>> (e.source, e.want, e.lineno)
('print sample_func(22)\n', '44\n', 3)
By default, tests are created for objects with no docstring:
>>> def no_docstring(v):
... pass
>>> finder.find(no_docstring)
[]
However, the optional argument `exclude_empty` to the DocTestFinder
constructor can be used to exclude tests for objects with empty
docstrings:
>>> def no_docstring(v):
... pass
>>> excl_empty_finder = doctest.DocTestFinder(exclude_empty=True)
>>> excl_empty_finder.find(no_docstring)
[]
If the function has a docstring with no examples, then a test with no
examples is returned. (This lets `DocTestRunner` collect statistics
about which functions have no tests -- but is that useful? And should
an empty test also be created when there's no docstring?)
>>> def no_examples(v):
... ''' no doctest examples '''
>>> finder.find(no_examples) # doctest: +ELLIPSIS
[<DocTest no_examples from ...:1 (no examples)>]
Finding Tests in Classes
~~~~~~~~~~~~~~~~~~~~~~~~
For a class, DocTestFinder will create a test for the class's
docstring, and will recursively explore its contents, including
methods, classmethods, staticmethods, properties, and nested classes.
>>> finder = doctest.DocTestFinder()
>>> tests = finder.find(SampleClass)
>>> for t in tests:
... print '%2s %s' % (len(t.examples), t.name)
3 SampleClass
3 SampleClass.NestedClass
1 SampleClass.NestedClass.__init__
1 SampleClass.__init__
2 SampleClass.a_classmethod
1 SampleClass.a_property
1 SampleClass.a_staticmethod
1 SampleClass.double
1 SampleClass.get
New-style classes are also supported:
>>> tests = finder.find(SampleNewStyleClass)
>>> for t in tests:
... print '%2s %s' % (len(t.examples), t.name)
1 SampleNewStyleClass
1 SampleNewStyleClass.__init__
1 SampleNewStyleClass.double
1 SampleNewStyleClass.get
Finding Tests in Modules
~~~~~~~~~~~~~~~~~~~~~~~~
For a module, DocTestFinder will create a test for the class's
docstring, and will recursively explore its contents, including
functions, classes, and the `__test__` dictionary, if it exists:
>>> # A module
>>> import types
>>> m = types.ModuleType('some_module')
>>> def triple(val):
... '''
... >>> print triple(11)
... 33
... '''
... return val*3
>>> m.__dict__.update({
... 'sample_func': sample_func,
... 'SampleClass': SampleClass,
... '__doc__': '''
... Module docstring.
... >>> print 'module'
... module
... ''',
... '__test__': {
... 'd': '>>> print 6\n6\n>>> print 7\n7\n',
... 'c': triple}})
>>> finder = doctest.DocTestFinder()
>>> # Use module=test.test_doctest, to prevent doctest from
>>> # ignoring the objects since they weren't defined in m.
>>> import test.test_doctest
>>> tests = finder.find(m, module=test.test_doctest)
>>> for t in tests:
... print '%2s %s' % (len(t.examples), t.name)
1 some_module
3 some_module.SampleClass
3 some_module.SampleClass.NestedClass
1 some_module.SampleClass.NestedClass.__init__
1 some_module.SampleClass.__init__
2 some_module.SampleClass.a_classmethod
1 some_module.SampleClass.a_property
1 some_module.SampleClass.a_staticmethod
1 some_module.SampleClass.double
1 some_module.SampleClass.get
1 some_module.__test__.c
2 some_module.__test__.d
1 some_module.sample_func
Duplicate Removal
~~~~~~~~~~~~~~~~~
If a single object is listed twice (under different names), then tests
will only be generated for it once:
>>> from test import doctest_aliases
>>> assert doctest_aliases.TwoNames.f
>>> assert doctest_aliases.TwoNames.g
>>> tests = excl_empty_finder.find(doctest_aliases)
>>> print len(tests)
2
>>> print tests[0].name
test.doctest_aliases.TwoNames
TwoNames.f and TwoNames.g are bound to the same object.
We can't guess which will be found in doctest's traversal of
TwoNames.__dict__ first, so we have to allow for either.
>>> tests[1].name.split('.')[-1] in ['f', 'g']
True
Empty Tests
~~~~~~~~~~~
By default, an object with no doctests doesn't create any tests:
>>> tests = doctest.DocTestFinder().find(SampleClass)
>>> for t in tests:
... print '%2s %s' % (len(t.examples), t.name)
3 SampleClass
3 SampleClass.NestedClass
1 SampleClass.NestedClass.__init__
1 SampleClass.__init__
2 SampleClass.a_classmethod
1 SampleClass.a_property
1 SampleClass.a_staticmethod
1 SampleClass.double
1 SampleClass.get
By default, that excluded objects with no doctests. exclude_empty=False
tells it to include (empty) tests for objects with no doctests. This feature
is really to support backward compatibility in what doctest.master.summarize()
displays.
>>> tests = doctest.DocTestFinder(exclude_empty=False).find(SampleClass)
>>> for t in tests:
... print '%2s %s' % (len(t.examples), t.name)
3 SampleClass
3 SampleClass.NestedClass
1 SampleClass.NestedClass.__init__
0 SampleClass.NestedClass.get
0 SampleClass.NestedClass.square
1 SampleClass.__init__
2 SampleClass.a_classmethod
1 SampleClass.a_property
1 SampleClass.a_staticmethod
1 SampleClass.double
1 SampleClass.get
Turning off Recursion
~~~~~~~~~~~~~~~~~~~~~
DocTestFinder can be told not to look for tests in contained objects
using the `recurse` flag:
>>> tests = doctest.DocTestFinder(recurse=False).find(SampleClass)
>>> for t in tests:
... print '%2s %s' % (len(t.examples), t.name)
3 SampleClass
Line numbers
~~~~~~~~~~~~
DocTestFinder finds the line number of each example:
>>> def f(x):
... '''
... >>> x = 12
...
... some text
...
... >>> # examples are not created for comments & bare prompts.
... >>>
... ...
...
... >>> for x in range(10):
... ... print x,
... 0 1 2 3 4 5 6 7 8 9
... >>> x//2
... 6
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> [e.lineno for e in test.examples]
[1, 9, 12]
"""
def test_DocTestParser(): r"""
Unit tests for the `DocTestParser` class.
DocTestParser is used to parse docstrings containing doctest examples.
The `parse` method divides a docstring into examples and intervening
text:
>>> s = '''
... >>> x, y = 2, 3 # no output expected
... >>> if 1:
... ... print x
... ... print y
... 2
... 3
...
... Some text.
... >>> x+y
... 5
... '''
>>> parser = doctest.DocTestParser()
>>> for piece in parser.parse(s):
... if isinstance(piece, doctest.Example):
... print 'Example:', (piece.source, piece.want, piece.lineno)
... else:
... print ' Text:', `piece`
Text: '\n'
Example: ('x, y = 2, 3 # no output expected\n', '', 1)
Text: ''
Example: ('if 1:\n print x\n print y\n', '2\n3\n', 2)
Text: '\nSome text.\n'
Example: ('x+y\n', '5\n', 9)
Text: ''
The `get_examples` method returns just the examples:
>>> for piece in parser.get_examples(s):
... print (piece.source, piece.want, piece.lineno)
('x, y = 2, 3 # no output expected\n', '', 1)
('if 1:\n print x\n print y\n', '2\n3\n', 2)
('x+y\n', '5\n', 9)
The `get_doctest` method creates a Test from the examples, along with the
given arguments:
>>> test = parser.get_doctest(s, {}, 'name', 'filename', lineno=5)
>>> (test.name, test.filename, test.lineno)
('name', 'filename', 5)
>>> for piece in test.examples:
... print (piece.source, piece.want, piece.lineno)
('x, y = 2, 3 # no output expected\n', '', 1)
('if 1:\n print x\n print y\n', '2\n3\n', 2)
('x+y\n', '5\n', 9)
"""
class test_DocTestRunner:
def basics(): r"""
Unit tests for the `DocTestRunner` class.
DocTestRunner is used to run DocTest test cases, and to accumulate
statistics. Here's a simple DocTest case we can use:
>>> def f(x):
... '''
... >>> x = 12
... >>> print x
... 12
... >>> x//2
... 6
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
The main DocTestRunner interface is the `run` method, which runs a
given DocTest case in a given namespace (globs). It returns a tuple
`(f,t)`, where `f` is the number of failed tests and `t` is the number
of tried tests.
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=3)
If any example produces incorrect output, then the test runner reports
the failure and proceeds to the next example:
>>> def f(x):
... '''
... >>> x = 12
... >>> print x
... 14
... >>> x//2
... 6
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=True).run(test)
... # doctest: +ELLIPSIS
Trying:
x = 12
Expecting nothing
ok
Trying:
print x
Expecting:
14
**********************************************************************
File ..., line 4, in f
Failed example:
print x
Expected:
14
Got:
12
Trying:
x//2
Expecting:
6
ok
TestResults(failed=1, attempted=3)
"""
def verbose_flag(): r"""
The `verbose` flag makes the test runner generate more detailed
output:
>>> def f(x):
... '''
... >>> x = 12
... >>> print x
... 12
... >>> x//2
... 6
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=True).run(test)
Trying:
x = 12
Expecting nothing
ok
Trying:
print x
Expecting:
12
ok
Trying:
x//2
Expecting:
6
ok
TestResults(failed=0, attempted=3)
If the `verbose` flag is unspecified, then the output will be verbose
iff `-v` appears in sys.argv:
>>> # Save the real sys.argv list.
>>> old_argv = sys.argv
>>> # If -v does not appear in sys.argv, then output isn't verbose.
>>> sys.argv = ['test']
>>> doctest.DocTestRunner().run(test)
TestResults(failed=0, attempted=3)
>>> # If -v does appear in sys.argv, then output is verbose.
>>> sys.argv = ['test', '-v']
>>> doctest.DocTestRunner().run(test)
Trying:
x = 12
Expecting nothing
ok
Trying:
print x
Expecting:
12
ok
Trying:
x//2
Expecting:
6
ok
TestResults(failed=0, attempted=3)
>>> # Restore sys.argv
>>> sys.argv = old_argv
In the remaining examples, the test runner's verbosity will be
explicitly set, to ensure that the test behavior is consistent.
"""
def exceptions(): r"""
Tests of `DocTestRunner`'s exception handling.
An expected exception is specified with a traceback message. The
lines between the first line and the type/value may be omitted or
replaced with any other string:
>>> def f(x):
... '''
... >>> x = 12
... >>> print x//0
... Traceback (most recent call last):
... ZeroDivisionError: integer division or modulo by zero
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=2)
An example may not generate output before it raises an exception; if
it does, then the traceback message will not be recognized as
signaling an expected exception, so the example will be reported as an
unexpected exception:
>>> def f(x):
... '''
... >>> x = 12
... >>> print 'pre-exception output', x//0
... pre-exception output
... Traceback (most recent call last):
... ZeroDivisionError: integer division or modulo by zero
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 4, in f
Failed example:
print 'pre-exception output', x//0
Exception raised:
...
ZeroDivisionError: integer division or modulo by zero
TestResults(failed=1, attempted=2)
Exception messages may contain newlines:
>>> def f(x):
... r'''
... >>> raise ValueError, 'multi\nline\nmessage'
... Traceback (most recent call last):
... ValueError: multi
... line
... message
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
If an exception is expected, but an exception with the wrong type or
message is raised, then it is reported as a failure:
>>> def f(x):
... r'''
... >>> raise ValueError, 'message'
... Traceback (most recent call last):
... ValueError: wrong message
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
raise ValueError, 'message'
Expected:
Traceback (most recent call last):
ValueError: wrong message
Got:
Traceback (most recent call last):
...
ValueError: message
TestResults(failed=1, attempted=1)
However, IGNORE_EXCEPTION_DETAIL can be used to allow a mismatch in the
detail:
>>> def f(x):
... r'''
... >>> raise ValueError, 'message' #doctest: +IGNORE_EXCEPTION_DETAIL
... Traceback (most recent call last):
... ValueError: wrong message
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
IGNORE_EXCEPTION_DETAIL also ignores difference in exception formatting
between Python versions. For example, in Python 3.x, the module path of
the exception is in the output, but this will fail under Python 2:
>>> def f(x):
... r'''
... >>> from httplib import HTTPException
... >>> raise HTTPException('message')
... Traceback (most recent call last):
... httplib.HTTPException: message
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 4, in f
Failed example:
raise HTTPException('message')
Expected:
Traceback (most recent call last):
httplib.HTTPException: message
Got:
Traceback (most recent call last):
...
HTTPException: message
TestResults(failed=1, attempted=2)
But in Python 2 the module path is not included, an therefore a test must look
like the following test to succeed in Python 2. But that test will fail under
Python 3.
>>> def f(x):
... r'''
... >>> from httplib import HTTPException
... >>> raise HTTPException('message')
... Traceback (most recent call last):
... HTTPException: message
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=2)
However, with IGNORE_EXCEPTION_DETAIL, the module name of the exception
(if any) will be ignored:
>>> def f(x):
... r'''
... >>> from httplib import HTTPException
... >>> raise HTTPException('message') #doctest: +IGNORE_EXCEPTION_DETAIL
... Traceback (most recent call last):
... HTTPException: message
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=2)
The module path will be completely ignored, so two different module paths will
still pass if IGNORE_EXCEPTION_DETAIL is given. This is intentional, so it can
be used when exceptions have changed module.
>>> def f(x):
... r'''
... >>> from httplib import HTTPException
... >>> raise HTTPException('message') #doctest: +IGNORE_EXCEPTION_DETAIL
... Traceback (most recent call last):
... foo.bar.HTTPException: message
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=2)
But IGNORE_EXCEPTION_DETAIL does not allow a mismatch in the exception type:
>>> def f(x):
... r'''
... >>> raise ValueError, 'message' #doctest: +IGNORE_EXCEPTION_DETAIL
... Traceback (most recent call last):
... TypeError: wrong type
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
raise ValueError, 'message' #doctest: +IGNORE_EXCEPTION_DETAIL
Expected:
Traceback (most recent call last):
TypeError: wrong type
Got:
Traceback (most recent call last):
...
ValueError: message
TestResults(failed=1, attempted=1)
If an exception is raised but not expected, then it is reported as an
unexpected exception:
>>> def f(x):
... r'''
... >>> 1//0
... 0
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
1//0
Exception raised:
Traceback (most recent call last):
...
ZeroDivisionError: integer division or modulo by zero
TestResults(failed=1, attempted=1)
"""
def displayhook(): r"""
Test that changing sys.displayhook doesn't matter for doctest.
>>> import sys
>>> orig_displayhook = sys.displayhook
>>> def my_displayhook(x):
... print('hi!')
>>> sys.displayhook = my_displayhook
>>> def f():
... '''
... >>> 3
... 3
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> r = doctest.DocTestRunner(verbose=False).run(test)
>>> post_displayhook = sys.displayhook
We need to restore sys.displayhook now, so that we'll be able to test
results.
>>> sys.displayhook = orig_displayhook
Ok, now we can check that everything is ok.
>>> r
TestResults(failed=0, attempted=1)
>>> post_displayhook is my_displayhook
True
"""
def optionflags(): r"""
Tests of `DocTestRunner`'s option flag handling.
Several option flags can be used to customize the behavior of the test
runner. These are defined as module constants in doctest, and passed
to the DocTestRunner constructor (multiple constants should be ORed
together).
The DONT_ACCEPT_TRUE_FOR_1 flag disables matches between True/False
and 1/0:
>>> def f(x):
... '>>> True\n1\n'
>>> # Without the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
>>> # With the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.DONT_ACCEPT_TRUE_FOR_1
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
True
Expected:
1
Got:
True
TestResults(failed=1, attempted=1)
The DONT_ACCEPT_BLANKLINE flag disables the match between blank lines
and the '<BLANKLINE>' marker:
>>> def f(x):
... '>>> print "a\\n\\nb"\na\n<BLANKLINE>\nb\n'
>>> # Without the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
>>> # With the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.DONT_ACCEPT_BLANKLINE
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print "a\n\nb"
Expected:
a
<BLANKLINE>
b
Got:
a
<BLANKLINE>
b
TestResults(failed=1, attempted=1)
The NORMALIZE_WHITESPACE flag causes all sequences of whitespace to be
treated as equal:
>>> def f(x):
... '>>> print 1, 2, 3\n 1 2\n 3'
>>> # Without the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print 1, 2, 3
Expected:
1 2
3
Got:
1 2 3
TestResults(failed=1, attempted=1)
>>> # With the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.NORMALIZE_WHITESPACE
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
TestResults(failed=0, attempted=1)
An example from the docs:
>>> print range(20) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
The ELLIPSIS flag causes ellipsis marker ("...") in the expected
output to match any substring in the actual output:
>>> def f(x):
... '>>> print range(15)\n[0, 1, 2, ..., 14]\n'
>>> # Without the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print range(15)
Expected:
[0, 1, 2, ..., 14]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
TestResults(failed=1, attempted=1)
>>> # With the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.ELLIPSIS
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
TestResults(failed=0, attempted=1)
... also matches nothing:
>>> for i in range(100):
... print i**2, #doctest: +ELLIPSIS
0 1...4...9 16 ... 36 49 64 ... 9801
... can be surprising; e.g., this test passes:
>>> for i in range(21): #doctest: +ELLIPSIS
... print i,
0 1 2 ...1...2...0
Examples from the docs:
>>> print range(20) # doctest:+ELLIPSIS
[0, 1, ..., 18, 19]
>>> print range(20) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
[0, 1, ..., 18, 19]
The SKIP flag causes an example to be skipped entirely. I.e., the
example is not run. It can be useful in contexts where doctest
examples serve as both documentation and test cases, and an example
should be included for documentation purposes, but should not be
checked (e.g., because its output is random, or depends on resources
which would be unavailable.) The SKIP flag can also be used for
'commenting out' broken examples.
>>> import unavailable_resource # doctest: +SKIP
>>> unavailable_resource.do_something() # doctest: +SKIP
>>> unavailable_resource.blow_up() # doctest: +SKIP
Traceback (most recent call last):
...
UncheckedBlowUpError: Nobody checks me.
>>> import random
>>> print random.random() # doctest: +SKIP
0.721216923889
The REPORT_UDIFF flag causes failures that involve multi-line expected
and actual outputs to be displayed using a unified diff:
>>> def f(x):
... r'''
... >>> print '\n'.join('abcdefg')
... a
... B
... c
... d
... f
... g
... h
... '''
>>> # Without the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
print '\n'.join('abcdefg')
Expected:
a
B
c
d
f
g
h
Got:
a
b
c
d
e
f
g
TestResults(failed=1, attempted=1)
>>> # With the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_UDIFF
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
print '\n'.join('abcdefg')
Differences (unified diff with -expected +actual):
@@ -1,7 +1,7 @@
a
-B
+b
c
d
+e
f
g
-h
TestResults(failed=1, attempted=1)
The REPORT_CDIFF flag causes failures that involve multi-line expected
and actual outputs to be displayed using a context diff:
>>> # Reuse f() from the REPORT_UDIFF example, above.
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_CDIFF
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
print '\n'.join('abcdefg')
Differences (context diff with expected followed by actual):
***************
*** 1,7 ****
a
! B
c
d
f
g
- h
--- 1,7 ----
a
! b
c
d
+ e
f
g
TestResults(failed=1, attempted=1)
The REPORT_NDIFF flag causes failures to use the difflib.Differ algorithm
used by the popular ndiff.py utility. This does intraline difference
marking, as well as interline differences.
>>> def f(x):
... r'''
... >>> print "a b c d e f g h i j k l m"
... a b c d e f g h i j k 1 m
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_NDIFF
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
print "a b c d e f g h i j k l m"
Differences (ndiff with -expected +actual):
- a b c d e f g h i j k 1 m
? ^
+ a b c d e f g h i j k l m
? + ++ ^
TestResults(failed=1, attempted=1)
The REPORT_ONLY_FIRST_FAILURE suppresses result output after the first
failing example:
>>> def f(x):
... r'''
... >>> print 1 # first success
... 1
... >>> print 2 # first failure
... 200
... >>> print 3 # second failure
... 300
... >>> print 4 # second success
... 4
... >>> print 5 # third failure
... 500
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_ONLY_FIRST_FAILURE
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 5, in f
Failed example:
print 2 # first failure
Expected:
200
Got:
2
TestResults(failed=3, attempted=5)
However, output from `report_start` is not suppressed:
>>> doctest.DocTestRunner(verbose=True, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
Trying:
print 1 # first success
Expecting:
1
ok
Trying:
print 2 # first failure
Expecting:
200
**********************************************************************
File ..., line 5, in f
Failed example:
print 2 # first failure
Expected:
200
Got:
2
TestResults(failed=3, attempted=5)
For the purposes of REPORT_ONLY_FIRST_FAILURE, unexpected exceptions
count as failures:
>>> def f(x):
... r'''
... >>> print 1 # first success
... 1
... >>> raise ValueError(2) # first failure
... 200
... >>> print 3 # second failure
... 300
... >>> print 4 # second success
... 4
... >>> print 5 # third failure
... 500
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_ONLY_FIRST_FAILURE
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 5, in f
Failed example:
raise ValueError(2) # first failure
Exception raised:
...
ValueError: 2
TestResults(failed=3, attempted=5)
New option flags can also be registered, via register_optionflag(). Here
we reach into doctest's internals a bit.
>>> unlikely = "UNLIKELY_OPTION_NAME"
>>> unlikely in doctest.OPTIONFLAGS_BY_NAME
False
>>> new_flag_value = doctest.register_optionflag(unlikely)
>>> unlikely in doctest.OPTIONFLAGS_BY_NAME
True
Before 2.4.4/2.5, registering a name more than once erroneously created
more than one flag value. Here we verify that's fixed:
>>> redundant_flag_value = doctest.register_optionflag(unlikely)
>>> redundant_flag_value == new_flag_value
True
Clean up.
>>> del doctest.OPTIONFLAGS_BY_NAME[unlikely]
"""
def option_directives(): r"""
Tests of `DocTestRunner`'s option directive mechanism.
Option directives can be used to turn option flags on or off for a
single example. To turn an option on for an example, follow that
example with a comment of the form ``# doctest: +OPTION``:
>>> def f(x): r'''
... >>> print range(10) # should fail: no ellipsis
... [0, 1, ..., 9]
...
... >>> print range(10) # doctest: +ELLIPSIS
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print range(10) # should fail: no ellipsis
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=1, attempted=2)
To turn an option off for an example, follow that example with a
comment of the form ``# doctest: -OPTION``:
>>> def f(x): r'''
... >>> print range(10)
... [0, 1, ..., 9]
...
... >>> # should fail: no ellipsis
... >>> print range(10) # doctest: -ELLIPSIS
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False,
... optionflags=doctest.ELLIPSIS).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 6, in f
Failed example:
print range(10) # doctest: -ELLIPSIS
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=1, attempted=2)
Option directives affect only the example that they appear with; they
do not change the options for surrounding examples:
>>> def f(x): r'''
... >>> print range(10) # Should fail: no ellipsis
... [0, 1, ..., 9]
...
... >>> print range(10) # doctest: +ELLIPSIS
... [0, 1, ..., 9]
...
... >>> print range(10) # Should fail: no ellipsis
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print range(10) # Should fail: no ellipsis
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
**********************************************************************
File ..., line 8, in f
Failed example:
print range(10) # Should fail: no ellipsis
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=2, attempted=3)
Multiple options may be modified by a single option directive. They
may be separated by whitespace, commas, or both:
>>> def f(x): r'''
... >>> print range(10) # Should fail
... [0, 1, ..., 9]
... >>> print range(10) # Should succeed
... ... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print range(10) # Should fail
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=1, attempted=2)
>>> def f(x): r'''
... >>> print range(10) # Should fail
... [0, 1, ..., 9]
... >>> print range(10) # Should succeed
... ... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print range(10) # Should fail
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=1, attempted=2)
>>> def f(x): r'''
... >>> print range(10) # Should fail
... [0, 1, ..., 9]
... >>> print range(10) # Should succeed
... ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print range(10) # Should fail
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=1, attempted=2)
The option directive may be put on the line following the source, as
long as a continuation prompt is used:
>>> def f(x): r'''
... >>> print range(10)
... ... # doctest: +ELLIPSIS
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
For examples with multi-line source, the option directive may appear
at the end of any line:
>>> def f(x): r'''
... >>> for x in range(10): # doctest: +ELLIPSIS
... ... print x,
... 0 1 2 ... 9
...
... >>> for x in range(10):
... ... print x, # doctest: +ELLIPSIS
... 0 1 2 ... 9
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=2)
If more than one line of an example with multi-line source has an
option directive, then they are combined:
>>> def f(x): r'''
... Should fail (option directive not on the last line):
... >>> for x in range(10): # doctest: +ELLIPSIS
... ... print x, # doctest: +NORMALIZE_WHITESPACE
... 0 1 2...9
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
It is an error to have a comment of the form ``# doctest:`` that is
*not* followed by words of the form ``+OPTION`` or ``-OPTION``, where
``OPTION`` is an option that has been registered with
`register_option`:
>>> # Error: Option not registered
>>> s = '>>> print 12 #doctest: +BADOPTION'
>>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0)
Traceback (most recent call last):
ValueError: line 1 of the doctest for s has an invalid option: '+BADOPTION'
>>> # Error: No + or - prefix
>>> s = '>>> print 12 #doctest: ELLIPSIS'
>>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0)
Traceback (most recent call last):
ValueError: line 1 of the doctest for s has an invalid option: 'ELLIPSIS'
It is an error to use an option directive on a line that contains no
source:
>>> s = '>>> # doctest: +ELLIPSIS'
>>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0)
Traceback (most recent call last):
ValueError: line 0 of the doctest for s has an option directive on a line with no example: '# doctest: +ELLIPSIS'
"""
def test_unicode_output(self): r"""
Check that unicode output works:
>>> u'\xe9'
u'\xe9'
If we return unicode, SpoofOut's buf variable becomes automagically
converted to unicode. This means all subsequent output becomes converted
to unicode, and if the output contains non-ascii characters that failed.
It used to be that this state change carried on between tests, meaning
tests would fail if unicode has been output previously in the testrun.
This test tests that this is no longer so:
>>> print u'abc'
abc
And then return a string with non-ascii characters:
>>> print u'\xe9'.encode('utf-8')
é
"""
def test_testsource(): r"""
Unit tests for `testsource()`.
The testsource() function takes a module and a name, finds the (first)
test with that name in that module, and converts it to a script. The
example code is converted to regular Python code. The surrounding
words and expected output are converted to comments:
>>> import test.test_doctest
>>> name = 'test.test_doctest.sample_func'
>>> print doctest.testsource(test.test_doctest, name)
# Blah blah
#
print sample_func(22)
# Expected:
## 44
#
# Yee ha!
<BLANKLINE>
>>> name = 'test.test_doctest.SampleNewStyleClass'
>>> print doctest.testsource(test.test_doctest, name)
print '1\n2\n3'
# Expected:
## 1
## 2
## 3
<BLANKLINE>
>>> name = 'test.test_doctest.SampleClass.a_classmethod'
>>> print doctest.testsource(test.test_doctest, name)
print SampleClass.a_classmethod(10)
# Expected:
## 12
print SampleClass(0).a_classmethod(10)
# Expected:
## 12
<BLANKLINE>
"""
def test_debug(): r"""
Create a docstring that we want to debug:
>>> s = '''
... >>> x = 12
... >>> print x
... 12
... '''
Create some fake stdin input, to feed to the debugger:
>>> import tempfile
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput(['next', 'print x', 'continue'])
Run the debugger on the docstring, and then restore sys.stdin.
>>> try: doctest.debug_src(s)
... finally: sys.stdin = real_stdin
> <string>(1)<module>()
(Pdb) next
12
--Return--
> <string>(1)<module>()->None
(Pdb) print x
12
(Pdb) continue
"""
def test_pdb_set_trace():
"""Using pdb.set_trace from a doctest.
You can use pdb.set_trace from a doctest. To do so, you must
retrieve the set_trace function from the pdb module at the time
you use it. The doctest module changes sys.stdout so that it can
capture program output. It also temporarily replaces pdb.set_trace
with a version that restores stdout. This is necessary for you to
see debugger output.
>>> doc = '''
... >>> x = 42
... >>> raise Exception('clé')
... Traceback (most recent call last):
... Exception: clé
... >>> import pdb; pdb.set_trace()
... '''
>>> parser = doctest.DocTestParser()
>>> test = parser.get_doctest(doc, {}, "foo-bär@baz", "foo-bä[email protected]", 0)
>>> runner = doctest.DocTestRunner(verbose=False)
To demonstrate this, we'll create a fake standard input that
captures our debugger input:
>>> import tempfile
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput([
... 'print x', # print data defined by the example
... 'continue', # stop debugging
... ''])
>>> try: runner.run(test)
... finally: sys.stdin = real_stdin
--Return--
> <doctest foo-bär@baz[2]>(1)<module>()->None
-> import pdb; pdb.set_trace()
(Pdb) print x
42
(Pdb) continue
TestResults(failed=0, attempted=3)
You can also put pdb.set_trace in a function called from a test:
>>> def calls_set_trace():
... y=2
... import pdb; pdb.set_trace()
>>> doc = '''
... >>> x=1
... >>> calls_set_trace()
... '''
>>> test = parser.get_doctest(doc, globals(), "foo-bär@baz", "foo-bä[email protected]", 0)
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput([
... 'print y', # print data defined in the function
... 'up', # out of function
... 'print x', # print data defined by the example
... 'continue', # stop debugging
... ''])
>>> try:
... runner.run(test)
... finally:
... sys.stdin = real_stdin
--Return--
> <doctest test.test_doctest.test_pdb_set_trace[8]>(3)calls_set_trace()->None
-> import pdb; pdb.set_trace()
(Pdb) print y
2
(Pdb) up
> <doctest foo-bär@baz[1]>(1)<module>()
-> calls_set_trace()
(Pdb) print x
1
(Pdb) continue
TestResults(failed=0, attempted=2)
During interactive debugging, source code is shown, even for
doctest examples:
>>> doc = '''
... >>> def f(x):
... ... g(x*2)
... >>> def g(x):
... ... print x+3
... ... import pdb; pdb.set_trace()
... >>> f(3)
... '''
>>> test = parser.get_doctest(doc, globals(), "foo-bär@baz", "foo-bä[email protected]", 0)
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput([
... 'list', # list source from example 2
... 'next', # return from g()
... 'list', # list source from example 1
... 'next', # return from f()
... 'list', # list source from example 3
... 'continue', # stop debugging
... ''])
>>> try: runner.run(test)
... finally: sys.stdin = real_stdin
... # doctest: +NORMALIZE_WHITESPACE
--Return--
> <doctest foo-bär@baz[1]>(3)g()->None
-> import pdb; pdb.set_trace()
(Pdb) list
1 def g(x):
2 print x+3
3 -> import pdb; pdb.set_trace()
[EOF]
(Pdb) next
--Return--
> <doctest foo-bär@baz[0]>(2)f()->None
-> g(x*2)
(Pdb) list
1 def f(x):
2 -> g(x*2)
[EOF]
(Pdb) next
--Return--
> <doctest foo-bär@baz[2]>(1)<module>()->None
-> f(3)
(Pdb) list
1 -> f(3)
[EOF]
(Pdb) continue
**********************************************************************
File "foo-bä[email protected]", line 7, in foo-bär@baz
Failed example:
f(3)
Expected nothing
Got:
9
TestResults(failed=1, attempted=3)
"""
def test_pdb_set_trace_nested():
"""This illustrates more-demanding use of set_trace with nested functions.
>>> class C(object):
... def calls_set_trace(self):
... y = 1
... import pdb; pdb.set_trace()
... self.f1()
... y = 2
... def f1(self):
... x = 1
... self.f2()
... x = 2
... def f2(self):
... z = 1
... z = 2
>>> calls_set_trace = C().calls_set_trace
>>> doc = '''
... >>> a = 1
... >>> calls_set_trace()
... '''
>>> parser = doctest.DocTestParser()
>>> runner = doctest.DocTestRunner(verbose=False)
>>> test = parser.get_doctest(doc, globals(), "foo-bär@baz", "foo-bä[email protected]", 0)
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput([
... 'print y', # print data defined in the function
... 'step', 'step', 'step', 'step', 'step', 'step', 'print z',
... 'up', 'print x',
... 'up', 'print y',
... 'up', 'print foo',
... 'continue', # stop debugging
... ''])
>>> try:
... runner.run(test)
... finally:
... sys.stdin = real_stdin
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(5)calls_set_trace()
-> self.f1()
(Pdb) print y
1
(Pdb) step
--Call--
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(7)f1()
-> def f1(self):
(Pdb) step
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(8)f1()
-> x = 1
(Pdb) step
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(9)f1()
-> self.f2()
(Pdb) step
--Call--
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(11)f2()
-> def f2(self):
(Pdb) step
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(12)f2()
-> z = 1
(Pdb) step
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(13)f2()
-> z = 2
(Pdb) print z
1
(Pdb) up
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(9)f1()
-> self.f2()
(Pdb) print x
1
(Pdb) up
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(5)calls_set_trace()
-> self.f1()
(Pdb) print y
1
(Pdb) up
> <doctest foo-bär@baz[1]>(1)<module>()
-> calls_set_trace()
(Pdb) print foo
*** NameError: name 'foo' is not defined
(Pdb) continue
TestResults(failed=0, attempted=2)
"""
def test_DocTestSuite():
"""DocTestSuite creates a unittest test suite from a doctest.
We create a Suite by providing a module. A module can be provided
by passing a module object:
>>> import unittest
>>> import test.sample_doctest
>>> suite = doctest.DocTestSuite(test.sample_doctest)
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=9 errors=0 failures=4>
We can also supply the module by name:
>>> suite = doctest.DocTestSuite('test.sample_doctest')
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=9 errors=0 failures=4>
We can use the current module:
>>> suite = test.sample_doctest.test_suite()
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=9 errors=0 failures=4>
We can supply global variables. If we pass globs, they will be
used instead of the module globals. Here we'll pass an empty
globals, triggering an extra error:
>>> suite = doctest.DocTestSuite('test.sample_doctest', globs={})
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=9 errors=0 failures=5>
Alternatively, we can provide extra globals. Here we'll make an
error go away by providing an extra global variable:
>>> suite = doctest.DocTestSuite('test.sample_doctest',
... extraglobs={'y': 1})
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=9 errors=0 failures=3>
You can pass option flags. Here we'll cause an extra error
by disabling the blank-line feature:
>>> suite = doctest.DocTestSuite('test.sample_doctest',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE)
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=9 errors=0 failures=5>
You can supply setUp and tearDown functions:
>>> def setUp(t):
... import test.test_doctest
... test.test_doctest.sillySetup = True
>>> def tearDown(t):
... import test.test_doctest
... del test.test_doctest.sillySetup
Here, we installed a silly variable that the test expects:
>>> suite = doctest.DocTestSuite('test.sample_doctest',
... setUp=setUp, tearDown=tearDown)
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=9 errors=0 failures=3>
But the tearDown restores sanity:
>>> import test.test_doctest
>>> test.test_doctest.sillySetup
Traceback (most recent call last):
...
AttributeError: 'module' object has no attribute 'sillySetup'
The setUp and tearDown funtions are passed test objects. Here
we'll use the setUp function to supply the missing variable y:
>>> def setUp(test):
... test.globs['y'] = 1
>>> suite = doctest.DocTestSuite('test.sample_doctest', setUp=setUp)
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=9 errors=0 failures=3>
Here, we didn't need to use a tearDown function because we
modified the test globals, which are a copy of the
sample_doctest module dictionary. The test globals are
automatically cleared for us after a test.
"""
def test_DocFileSuite():
"""We can test tests found in text files using a DocFileSuite.
We create a suite by providing the names of one or more text
files that include examples:
>>> import unittest
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt')
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=3 errors=0 failures=3>
The test files are looked for in the directory containing the
calling module. A package keyword argument can be provided to
specify a different relative location.
>>> import unittest
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... package='test')
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=3 errors=0 failures=3>
Support for using a package's __loader__.get_data() is also
provided.
>>> import unittest, pkgutil, test
>>> added_loader = False
>>> if not hasattr(test, '__loader__'):
... test.__loader__ = pkgutil.get_loader(test)
... added_loader = True
>>> try:
... suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... package='test')
... suite.run(unittest.TestResult())
... finally:
... if added_loader:
... del test.__loader__
<unittest.result.TestResult run=3 errors=0 failures=3>
'/' should be used as a path separator. It will be converted
to a native separator at run time:
>>> suite = doctest.DocFileSuite('../test/test_doctest.txt')
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=1 errors=0 failures=1>
If DocFileSuite is used from an interactive session, then files
are resolved relative to the directory of sys.argv[0]:
>>> import types, os.path, test.test_doctest
>>> save_argv = sys.argv
>>> sys.argv = [test.test_doctest.__file__]
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... package=types.ModuleType('__main__'))
>>> sys.argv = save_argv
By setting `module_relative=False`, os-specific paths may be
used (including absolute paths and paths relative to the
working directory):
>>> # Get the absolute path of the test package.
>>> test_doctest_path = os.path.abspath(test.test_doctest.__file__)
>>> test_pkg_path = os.path.split(test_doctest_path)[0]
>>> # Use it to find the absolute path of test_doctest.txt.
>>> test_file = os.path.join(test_pkg_path, 'test_doctest.txt')
>>> suite = doctest.DocFileSuite(test_file, module_relative=False)
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=1 errors=0 failures=1>
It is an error to specify `package` when `module_relative=False`:
>>> suite = doctest.DocFileSuite(test_file, module_relative=False,
... package='test')
Traceback (most recent call last):
ValueError: Package may only be specified for module-relative paths.
You can specify initial global variables:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... globs={'favorite_color': 'blue'})
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=3 errors=0 failures=2>
In this case, we supplied a missing favorite color. You can
provide doctest options:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE,
... globs={'favorite_color': 'blue'})
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=3 errors=0 failures=3>
And, you can provide setUp and tearDown functions:
>>> def setUp(t):
... import test.test_doctest
... test.test_doctest.sillySetup = True
>>> def tearDown(t):
... import test.test_doctest
... del test.test_doctest.sillySetup
Here, we installed a silly variable that the test expects:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... setUp=setUp, tearDown=tearDown)
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=3 errors=0 failures=2>
But the tearDown restores sanity:
>>> import test.test_doctest
>>> test.test_doctest.sillySetup
Traceback (most recent call last):
...
AttributeError: 'module' object has no attribute 'sillySetup'
The setUp and tearDown funtions are passed test objects.
Here, we'll use a setUp function to set the favorite color in
test_doctest.txt:
>>> def setUp(test):
... test.globs['favorite_color'] = 'blue'
>>> suite = doctest.DocFileSuite('test_doctest.txt', setUp=setUp)
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=1 errors=0 failures=0>
Here, we didn't need to use a tearDown function because we
modified the test globals. The test globals are
automatically cleared for us after a test.
Tests in a file run using `DocFileSuite` can also access the
`__file__` global, which is set to the name of the file
containing the tests:
>>> suite = doctest.DocFileSuite('test_doctest3.txt')
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=1 errors=0 failures=0>
If the tests contain non-ASCII characters, we have to specify which
encoding the file is encoded with. We do so by using the `encoding`
parameter:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... encoding='utf-8')
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=3 errors=0 failures=2>
"""
def test_trailing_space_in_test():
"""
Trailing spaces in expected output are significant:
>>> x, y = 'foo', ''
>>> print x, y
foo \n
"""
def test_unittest_reportflags():
"""Default unittest reporting flags can be set to control reporting
Here, we'll set the REPORT_ONLY_FIRST_FAILURE option so we see
only the first failure of each test. First, we'll look at the
output without the flag. The file test_doctest.txt file has two
tests. They both fail if blank lines are disabled:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE)
>>> import unittest
>>> result = suite.run(unittest.TestResult())
>>> print result.failures[0][1] # doctest: +ELLIPSIS
Traceback ...
Failed example:
favorite_color
...
Failed example:
if 1:
...
Note that we see both failures displayed.
>>> old = doctest.set_unittest_reportflags(
... doctest.REPORT_ONLY_FIRST_FAILURE)
Now, when we run the test:
>>> result = suite.run(unittest.TestResult())
>>> print result.failures[0][1] # doctest: +ELLIPSIS
Traceback ...
Failed example:
favorite_color
Exception raised:
...
NameError: name 'favorite_color' is not defined
<BLANKLINE>
<BLANKLINE>
We get only the first failure.
If we give any reporting options when we set up the tests,
however:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE | doctest.REPORT_NDIFF)
Then the default eporting options are ignored:
>>> result = suite.run(unittest.TestResult())
>>> print result.failures[0][1] # doctest: +ELLIPSIS
Traceback ...
Failed example:
favorite_color
...
Failed example:
if 1:
print 'a'
print
print 'b'
Differences (ndiff with -expected +actual):
a
- <BLANKLINE>
+
b
<BLANKLINE>
<BLANKLINE>
Test runners can restore the formatting flags after they run:
>>> ignored = doctest.set_unittest_reportflags(old)
"""
def test_testfile(): r"""
Tests for the `testfile()` function. This function runs all the
doctest examples in a given file. In its simple invokation, it is
called with the name of a file, which is taken to be relative to the
calling module. The return value is (#failures, #tests).
We don't want `-v` in sys.argv for these tests.
>>> save_argv = sys.argv
>>> if '-v' in sys.argv:
... sys.argv = [arg for arg in save_argv if arg != '-v']
>>> doctest.testfile('test_doctest.txt') # doctest: +ELLIPSIS
**********************************************************************
File "...", line 6, in test_doctest.txt
Failed example:
favorite_color
Exception raised:
...
NameError: name 'favorite_color' is not defined
**********************************************************************
1 items had failures:
1 of 2 in test_doctest.txt
***Test Failed*** 1 failures.
TestResults(failed=1, attempted=2)
>>> doctest.master = None # Reset master.
(Note: we'll be clearing doctest.master after each call to
`doctest.testfile`, to suppress warnings about multiple tests with the
same name.)
Globals may be specified with the `globs` and `extraglobs` parameters:
>>> globs = {'favorite_color': 'blue'}
>>> doctest.testfile('test_doctest.txt', globs=globs)
TestResults(failed=0, attempted=2)
>>> doctest.master = None # Reset master.
>>> extraglobs = {'favorite_color': 'red'}
>>> doctest.testfile('test_doctest.txt', globs=globs,
... extraglobs=extraglobs) # doctest: +ELLIPSIS
**********************************************************************
File "...", line 6, in test_doctest.txt
Failed example:
favorite_color
Expected:
'blue'
Got:
'red'
**********************************************************************
1 items had failures:
1 of 2 in test_doctest.txt
***Test Failed*** 1 failures.
TestResults(failed=1, attempted=2)
>>> doctest.master = None # Reset master.
The file may be made relative to a given module or package, using the
optional `module_relative` parameter:
>>> doctest.testfile('test_doctest.txt', globs=globs,
... module_relative='test')
TestResults(failed=0, attempted=2)
>>> doctest.master = None # Reset master.
Verbosity can be increased with the optional `verbose` parameter:
>>> doctest.testfile('test_doctest.txt', globs=globs, verbose=True)
Trying:
favorite_color
Expecting:
'blue'
ok
Trying:
if 1:
print 'a'
print
print 'b'
Expecting:
a
<BLANKLINE>
b
ok
1 items passed all tests:
2 tests in test_doctest.txt
2 tests in 1 items.
2 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=2)
>>> doctest.master = None # Reset master.
The name of the test may be specified with the optional `name`
parameter:
>>> doctest.testfile('test_doctest.txt', name='newname')
... # doctest: +ELLIPSIS
**********************************************************************
File "...", line 6, in newname
...
TestResults(failed=1, attempted=2)
>>> doctest.master = None # Reset master.
The summary report may be suppressed with the optional `report`
parameter:
>>> doctest.testfile('test_doctest.txt', report=False)
... # doctest: +ELLIPSIS
**********************************************************************
File "...", line 6, in test_doctest.txt
Failed example:
favorite_color
Exception raised:
...
NameError: name 'favorite_color' is not defined
TestResults(failed=1, attempted=2)
>>> doctest.master = None # Reset master.
The optional keyword argument `raise_on_error` can be used to raise an
exception on the first error (which may be useful for postmortem
debugging):
>>> doctest.testfile('test_doctest.txt', raise_on_error=True)
... # doctest: +ELLIPSIS
Traceback (most recent call last):
UnexpectedException: ...
>>> doctest.master = None # Reset master.
If the tests contain non-ASCII characters, the tests might fail, since
it's unknown which encoding is used. The encoding can be specified
using the optional keyword argument `encoding`:
>>> doctest.testfile('test_doctest4.txt') # doctest: +ELLIPSIS
**********************************************************************
File "...", line 7, in test_doctest4.txt
Failed example:
u'...'
Expected:
u'f\xf6\xf6'
Got:
u'f\xc3\xb6\xc3\xb6'
**********************************************************************
...
**********************************************************************
1 items had failures:
2 of 4 in test_doctest4.txt
***Test Failed*** 2 failures.
TestResults(failed=2, attempted=4)
>>> doctest.master = None # Reset master.
>>> doctest.testfile('test_doctest4.txt', encoding='utf-8')
TestResults(failed=0, attempted=4)
>>> doctest.master = None # Reset master.
Switch the module encoding to 'utf-8' to test the verbose output without
bothering with the current sys.stdout encoding.
>>> doctest._encoding, saved_encoding = 'utf-8', doctest._encoding
>>> doctest.testfile('test_doctest4.txt', encoding='utf-8', verbose=True)
Trying:
u'föö'
Expecting:
u'f\xf6\xf6'
ok
Trying:
u'bąr'
Expecting:
u'b\u0105r'
ok
Trying:
'föö'
Expecting:
'f\xc3\xb6\xc3\xb6'
ok
Trying:
'bąr'
Expecting:
'b\xc4\x85r'
ok
1 items passed all tests:
4 tests in test_doctest4.txt
4 tests in 1 items.
4 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=4)
>>> doctest._encoding = saved_encoding
>>> doctest.master = None # Reset master.
>>> sys.argv = save_argv
"""
# old_test1, ... used to live in doctest.py, but cluttered it. Note
# that these use the deprecated doctest.Tester, so should go away (or
# be rewritten) someday.
def old_test1(): r"""
>>> from doctest import Tester
>>> t = Tester(globs={'x': 42}, verbose=0)
>>> t.runstring(r'''
... >>> x = x * 2
... >>> print x
... 42
... ''', 'XYZ')
**********************************************************************
Line 3, in XYZ
Failed example:
print x
Expected:
42
Got:
84
TestResults(failed=1, attempted=2)
>>> t.runstring(">>> x = x * 2\n>>> print x\n84\n", 'example2')
TestResults(failed=0, attempted=2)
>>> t.summarize()
**********************************************************************
1 items had failures:
1 of 2 in XYZ
***Test Failed*** 1 failures.
TestResults(failed=1, attempted=4)
>>> t.summarize(verbose=1)
1 items passed all tests:
2 tests in example2
**********************************************************************
1 items had failures:
1 of 2 in XYZ
4 tests in 2 items.
3 passed and 1 failed.
***Test Failed*** 1 failures.
TestResults(failed=1, attempted=4)
"""
def old_test2(): r"""
>>> from doctest import Tester
>>> t = Tester(globs={}, verbose=1)
>>> test = r'''
... # just an example
... >>> x = 1 + 2
... >>> x
... 3
... '''
>>> t.runstring(test, "Example")
Running string Example
Trying:
x = 1 + 2
Expecting nothing
ok
Trying:
x
Expecting:
3
ok
0 of 2 examples failed in string Example
TestResults(failed=0, attempted=2)
"""
def old_test3(): r"""
>>> from doctest import Tester
>>> t = Tester(globs={}, verbose=0)
>>> def _f():
... '''Trivial docstring example.
... >>> assert 2 == 2
... '''
... return 32
...
>>> t.rundoc(_f) # expect 0 failures in 1 example
TestResults(failed=0, attempted=1)
"""
def old_test4(): """
>>> import types
>>> m1 = types.ModuleType('_m1')
>>> m2 = types.ModuleType('_m2')
>>> test_data = \"""
... def _f():
... '''>>> assert 1 == 1
... '''
... def g():
... '''>>> assert 2 != 1
... '''
... class H:
... '''>>> assert 2 > 1
... '''
... def bar(self):
... '''>>> assert 1 < 2
... '''
... \"""
>>> exec test_data in m1.__dict__
>>> exec test_data in m2.__dict__
>>> m1.__dict__.update({"f2": m2._f, "g2": m2.g, "h2": m2.H})
Tests that objects outside m1 are excluded:
>>> from doctest import Tester
>>> t = Tester(globs={}, verbose=0)
>>> t.rundict(m1.__dict__, "rundict_test", m1) # f2 and g2 and h2 skipped
TestResults(failed=0, attempted=4)
Once more, not excluding stuff outside m1:
>>> t = Tester(globs={}, verbose=0)
>>> t.rundict(m1.__dict__, "rundict_test_pvt") # None are skipped.
TestResults(failed=0, attempted=8)
The exclusion of objects from outside the designated module is
meant to be invoked automagically by testmod.
>>> doctest.testmod(m1, verbose=False)
TestResults(failed=0, attempted=4)
"""
######################################################################
## Main
######################################################################
def test_main():
# Check the doctest cases in doctest itself:
test_support.run_doctest(doctest, verbosity=True)
from test import test_doctest
# Ignore all warnings about the use of class Tester in this module.
deprecations = [("class Tester is deprecated", DeprecationWarning)]
if sys.py3kwarning:
deprecations += [("backquote not supported", SyntaxWarning),
("execfile.. not supported", DeprecationWarning)]
with test_support.check_warnings(*deprecations):
# Check the doctest cases defined here:
test_support.run_doctest(test_doctest, verbosity=True)
import sys
def test_coverage(coverdir):
trace = test_support.import_module('trace')
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix,],
trace=0, count=1)
tracer.run('reload(doctest); test_main()')
r = tracer.results()
print 'Writing coverage results...'
r.write_results(show_missing=True, summary=True,
coverdir=coverdir)
if __name__ == '__main__':
if '-c' in sys.argv:
test_coverage('/tmp/doctest.cover')
else:
test_main()
| gpl-2.0 |
Amechi101/concepteur-market-app | venv/lib/python2.7/site-packages/account/migrations/0002_initial.py | 2 | 12387 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Account'
db.create_table(u'account_account', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name=u'account', unique=True, to=orm['auth.User'])),
('timezone', self.gf('account.fields.TimeZoneField')(default=u'', max_length=100, blank=True)),
('language', self.gf('django.db.models.fields.CharField')(default='en-us', max_length=10)),
))
db.send_create_signal(u'account', ['Account'])
# Adding model 'SignupCode'
db.create_table(u'account_signupcode', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)),
('max_uses', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('expiry', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('inviter', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('notes', self.gf('django.db.models.fields.TextField')(blank=True)),
('sent', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('use_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
))
db.send_create_signal(u'account', ['SignupCode'])
# Adding model 'SignupCodeResult'
db.create_table(u'account_signupcoderesult', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('signup_code', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['account.SignupCode'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('timestamp', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal(u'account', ['SignupCodeResult'])
# Adding model 'EmailAddress'
db.create_table(u'account_emailaddress', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('email', self.gf('django.db.models.fields.EmailField')(unique=True, max_length=75)),
('verified', self.gf('django.db.models.fields.BooleanField')(default=False)),
('primary', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'account', ['EmailAddress'])
# Adding model 'EmailConfirmation'
db.create_table(u'account_emailconfirmation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('email_address', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['account.EmailAddress'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 3, 23, 0, 0))),
('sent', self.gf('django.db.models.fields.DateTimeField')(null=True)),
('key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)),
))
db.send_create_signal(u'account', ['EmailConfirmation'])
# Adding model 'AccountDeletion'
db.create_table(u'account_accountdeletion', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.SET_NULL, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('date_requested', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('date_expunged', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal(u'account', ['AccountDeletion'])
def backwards(self, orm):
# Deleting model 'Account'
db.delete_table(u'account_account')
# Deleting model 'SignupCode'
db.delete_table(u'account_signupcode')
# Deleting model 'SignupCodeResult'
db.delete_table(u'account_signupcoderesult')
# Deleting model 'EmailAddress'
db.delete_table(u'account_emailaddress')
# Deleting model 'EmailConfirmation'
db.delete_table(u'account_emailconfirmation')
# Deleting model 'AccountDeletion'
db.delete_table(u'account_accountdeletion')
models = {
u'account.account': {
'Meta': {'object_name': 'Account'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en-us'", 'max_length': '10'}),
'timezone': ('account.fields.TimeZoneField', [], {'default': "u''", 'max_length': '100', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'account'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'account.accountdeletion': {
'Meta': {'object_name': 'AccountDeletion'},
'date_expunged': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_requested': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'})
},
u'account.emailaddress': {
'Meta': {'object_name': 'EmailAddress'},
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'account.emailconfirmation': {
'Meta': {'object_name': 'EmailConfirmation'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 3, 23, 0, 0)'}),
'email_address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['account.EmailAddress']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'account.signupcode': {
'Meta': {'object_name': 'SignupCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'expiry': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'max_uses': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'use_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'account.signupcoderesult': {
'Meta': {'object_name': 'SignupCodeResult'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'signup_code': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['account.SignupCode']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['account'] | mit |
wrshoemaker/ffpopsim | examples/genetic_drift.py | 2 | 2160 | '''
author: Richard Neher, Fabio Zanini
date: 11/07/12
content: Example on genetic drift using haploid_highd
'''
# Import modules (setting the path should not be necessary when the module is
# installed in the PYTHONPATH)
import sys
sys.path.insert(0, '../pkg/python')
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.cm as cm
import FFPopSim as h
# specify parameters
L = 256 # simulate 256 loci
# set up population
pop = h.haploid_highd(L) # produce an instance of haploid_highd with L loci
pop.carrying_capacity = 50000 # set the average population size to 50000
pop.outcrossing_rate = 1 # make the species obligate outcrossing
pop.crossover_rate = 0.02 / pop.L # set the crossover rate of the segment to 2 centimorgans
pop.mutation_rate = 0.1 / pop.carrying_capacity # per locus mutation rate equal to 0.1/N
# initialize the population in linkage equilibrium with the specified allele frequencies
initial_allele_frequencies = 0.5*np.ones(pop.L) # define some initial allele frequencies as 1/2
pop.set_allele_frequencies(initial_allele_frequencies, pop.carrying_capacity)
# evolve for 2000 generations and track the allele frequencies
maxgen = 2000
allele_frequencies = [pop.get_allele_frequencies()]
tp = [pop.generation]
print "Illustrate genetic drift on allele frequency trajectories."
pop.status() #print status message
while pop.generation < maxgen:
if (pop.generation%(maxgen/10)==0): print pop.generation,"out of",maxgen, "generations"
pop.evolve(10)
# save allele frequencies and time
allele_frequencies.append(pop.get_allele_frequencies())
tp.append(pop.generation)
# convert to an array to enable slicing
allele_frequencies = np.array(allele_frequencies)
# plot the result
plt.figure()
for locus in xrange(5,pop.L,50): # plot a few neutral trajectories
plt.plot(tp, allele_frequencies[:,locus], c=cm.cool(locus), lw=2)
plt.title('Genetic Drift')
plt.xlabel('Time [generations]')
plt.ylabel('Allele frequencies')
plt.ion()
plt.show()
| gpl-3.0 |
ntrrgc/snorky | snorky/services/pubsub.py | 1 | 3143 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from snorky.services.base import RPCService, RPCError, rpc_command
from snorky.types import MultiDict
class PubSubService(RPCService):
"""A service which allows clients to send messages to each other over Pub
Sub channels."""
def __init__(self, name):
super(PubSubService, self).__init__(name)
# channel : str -> set<Client>
self.subscriptions = MultiDict()
# Client -> set<channel : str>
self.subscriptions_by_client = MultiDict()
def do_publish(self, channel, message):
"""Common code for publishing a message."""
for client in self.subscriptions.get_set(channel):
self.send_message_to(client, {
"type": "message",
"channel": channel,
"message": message
})
@rpc_command
def publish(self, req, channel, message):
"""RPC command.
Publish a message to a channel."""
if self.can_publish(req.client, channel):
self.do_publish(channel, message)
else:
raise RPCError("Not authorized")
@rpc_command
def subscribe(self, req, channel):
"""RPC command.
Subscribe to a channel."""
if self.subscriptions.in_set(channel, req.client):
raise RPCError("Already subscribed")
self.subscriptions.add(channel, req.client)
self.subscriptions_by_client.add(req.client, channel)
@rpc_command
def unsubscribe(self, req, channel):
"""RPC command.
Cancel the subscription to a channel."""
try:
self.subscriptions.remove(channel, req.client)
self.subscriptions_by_client.remove(req.client, channel)
except KeyError:
raise RPCError("Not subscribed")
def can_publish(self, client, channel):
"""Whether a client can publish to a certain channel.
By default returns always ``True``."""
return True
def client_disconnected(self, client):
"""Exececuted when a client disconnects. Cancels all its subscriptions.
"""
for channel in self.subscriptions_by_client.get_set(client):
self.subscriptions.remove(channel, client)
self.subscriptions_by_client.clear_set(client)
class PubSubBackend(RPCService):
"""Backend service which allows publishing to a Pub Sub service.
:param frontend: The :class:`PubSubService` instance messages will
be published to.
Publishing from this backend will always be allowed regardless of the
policies implemented in :meth:`PubSubService.can_publish`."""
def __init__(self, name, frontend):
""""""
super(PubSubBackend, self).__init__(name)
self.frontend = frontend
@rpc_command
def publish(self, req, channel, message):
"""RPC command.
Publish a message to a channel."""
self.frontend.do_publish(channel, message)
| mpl-2.0 |
palaniyappanBala/rekall | rekall-core/rekall/plugins/darwin/common.py | 1 | 26327 | # Rekall Memory Forensics
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# The code in this directory is based on original code and algorithms by Andrew
# Case ([email protected]).
__author__ = "Michael Cohen <[email protected]>"
import re
from rekall import kb
from rekall import obj
from rekall import plugin
from rekall import scan
from rekall import utils
from rekall.plugins import core
# A few notes on XNU's (64bit) memory layout:
#
# Because of the way the Darwin kernel (XNU) is bootstrapped, a section of its
# virtual address space maps linearly to the base of the physical address space.
# This relationship is basically:
# KERNEL_MIN_ADDRESS + the_physical_address = the_virtual_address
#
# The kernel ensures this when allocating certain data structures, most notably
# the page tables [1]. However, the kernel doesn't actually "know" the value of
# KERNEL_MIN_ADDRESS, which is defined the Makefile [2]. Instead, allocations
# are done by keeping a cursor at the lowest available physical address [3].
#
# Because of this, when the kernel needs to convert an address from the virtual
# address space to the physical address space without relying on the page
# tables, it uses a "safer" variation on the above rule and masks out the first
# 32 bits of the address using a macro called ID_MAP_VTOP [4,5], which is a
# simple bitmask (LOW_4GB_MASK [6]).
#
# We copy/adapt all three #defines below. When we need to bootstrap the virtual
# address space, relying on ID_MAP_VTOP is preferrable, because it's less
# fragile. However, KERNEL_MIN_ADDRESS can be a good heuristic for deciding
# whether a particular value is a valid pointer in the kernel virtual address
# space, so I decided to keep it around.
#
# [1]
# github.com/opensource-apple/xnu/blob/10.9/osfmk/i386/i386_init.c#L134
#
# [2]
# github.com/opensource-apple/xnu/blob/10.9/makedefs/MakeInc.def#L258
#
# [3] This is where physfree is defined as the next free page, after a blank
# page, after the last page of the kernel image as determined by the bootloader.
# github.com/opensource-apple/xnu/blob/10.9/osfmk/i386/i386_init.c#L330
#
# [4]
# github.com/opensource-apple/xnu/blob/10.9/osfmk/i386/pmap.h#L353
#
# [5] Example use, to set the physical address of the DTB when switching address
# spaces, knowing the virtual address of the first page table:
# github.com/opensource-apple/xnu/blob/10.9/osfmk/i386/pal_routines.c#L254
#
# [6]
# github.com/opensource-apple/xnu/blob/10.9/osfmk/i386/pmap.h#L119
LOW_4GB_MASK = 0x00000000ffffffff
KERNEL_MIN_ADDRESS = 0xffffff8000000000
def ID_MAP_VTOP(x):
return x & LOW_4GB_MASK
# On x64, only 48 bits of the pointer are addressable.
X64_POINTER_MASK = 0x0000ffffffffffff
def MOUNTAIN_LION_OR_LATER(profile):
return bool(profile.get_constant("_BootPML4", False))
class AbstractDarwinCommandPlugin(plugin.PhysicalASMixin,
plugin.ProfileCommand):
"""A base class for all darwin based plugins."""
__abstract = True
@classmethod
def is_active(cls, session):
"""We are only active if the profile is darwin."""
return (session.profile.metadata("os") == "darwin" and
plugin.Command.is_active(session))
class AbstractDarwinParameterHook(kb.ParameterHook):
@classmethod
def is_active(cls, session):
"""We are only active if the profile is Darwin."""
return (super(AbstractDarwinParameterHook, cls).is_active(session) and
session.profile.metadata("os") == 'darwin')
class KernelSlideHook(AbstractDarwinParameterHook):
"""Find the kernel slide if needed."""
name = "vm_kernel_slide"
def calculate(self):
if MOUNTAIN_LION_OR_LATER(self.session.profile):
return DarwinFindKASLR(session=self.session).vm_kernel_slide()
# Kernel slide should be treated as 0 if not relevant.
return 0
class CatfishScanner(scan.BaseScanner):
checks = [
("StringCheck", dict(needle="Catfish \x00\x00"))
]
class CatfishOffsetHook(AbstractDarwinParameterHook):
"""Find the actual offset of the _lowGlo struct."""
name = "catfish_offset"
def calculate(self):
for hit in CatfishScanner(
address_space=self.session.physical_address_space,
session=self.session).scan():
return hit
class DarwinKASLRMixin(object):
"""Ensures that KASLR slide is computed and stored in the session."""
@classmethod
def args(cls, parser):
super(DarwinKASLRMixin, cls).args(parser)
parser.add_argument("--vm_kernel_slide", type="IntParser",
help="OS X 10.8 and later: kernel ASLR slide.")
def __init__(self, vm_kernel_slide=None, **kwargs):
"""A mixin for Darwin plugins that require a valid KASLR slide.
Args:
vm_kernel_slide: The integer KASLR slide used in this image. If not
given it will be computed.
"""
super(DarwinKASLRMixin, self).__init__(**kwargs)
if not MOUNTAIN_LION_OR_LATER(self.profile):
return
if vm_kernel_slide is not None:
self.session.SetCache("vm_kernel_slide", vm_kernel_slide)
class DarwinFindKASLR(AbstractDarwinCommandPlugin):
"""A scanner for KASLR slide values in the Darwin kernel.
The scanner works by looking up a known data structure and comparing
its actual location to its expected location. Verification is a similar
process, using a second constant. This takes advantage of the fact that both
data structures are in a region of kernel memory that maps to the physical
memory in a predictable way (see ID_MAP_VTOP).
Human-readable output includes values of the kernel version string (which is
used for validation) for manual review, in case there are false positives.
"""
__name = "find_kaslr"
@classmethod
def is_active(cls, session):
return (super(DarwinFindKASLR, cls).is_active(session) and
MOUNTAIN_LION_OR_LATER(session.profile))
def all_catfish_hits(self):
"""Yields possible lowGlo offsets, starting with session-cached one.
Because the first hit on the catfish string isn't necessarily the right
one, this function will yield subsequent ones by scanning the physical
address space, starting with the offset of the cached first hit.
The caller is responsible for updating the session cache with the
correct offset.
"""
first_hit = self.session.GetParameter("catfish_offset")
yield first_hit
for hit in CatfishScanner(
address_space=self.session.physical_address_space,
session=self.session).scan(offset=first_hit + 1):
yield hit
def vm_kernel_slide_hits(self):
"""Tries to compute the KASLR slide.
In an ideal scenario, this should return exactly one valid result.
Yields:
(int) semi-validated KASLR value
"""
expected_offset = self.profile.get_constant(
"_lowGlo", is_address=False)
expected_offset = ID_MAP_VTOP(expected_offset)
for hit in self.all_catfish_hits():
vm_kernel_slide = int(hit - expected_offset)
if self._validate_vm_kernel_slide(vm_kernel_slide):
self.session.SetCache("catfish_offset", hit)
yield vm_kernel_slide
def vm_kernel_slide(self):
"""Returns the first result of vm_kernel_slide hits and stops the scan.
This is the idiomatic way of using this plugin if all you need is the
likely KASLR slide value.
Returns:
A value for the KASLR slide that appears sane.
"""
self.session.logging.debug("Searching for KASLR hits.")
for vm_kernel_slide in self.vm_kernel_slide_hits():
return vm_kernel_slide
def _lookup_version_string(self, vm_kernel_slide):
"""Uses vm_kernel_slide to look up kernel version string.
This is used for validation only. Physical address space is
asumed to map to kernel virtual address space as expressed by
ID_MAP_VTOP.
Args:
vm_kernel_slide: KASLR slide to be used for lookup. Overrides whatever
may already be set in session.
Returns:
Kernel version string (should start with "Darwin Kernel"
"""
version_offset = self.profile.get_constant(
"_version", is_address=False)
version_offset += vm_kernel_slide
version_offset = ID_MAP_VTOP(version_offset)
return self.profile.String(vm=self.physical_address_space,
offset=version_offset)
def _validate_vm_kernel_slide(self, vm_kernel_slide):
"""Checks sanity of vm_kernel_slide by looking up kernel version.
If the result a string that looks like the kernel version string the
slide value is assumed to be valid. Note that this can theoretically
give false positives.
Args:
vm_kernel_slide: KASLR slide to be used for validation. Overrides
whatever may already be set in session.
Returns:
True if vm_kernel_slide value appears sane. False otherwise.
"""
version_string = self._lookup_version_string(vm_kernel_slide)
return version_string[0:13] == "Darwin Kernel"
def render(self, renderer):
renderer.table_header([
("KASLR Slide", "vm_kernel_slide", "[addrpad]"),
("Kernel Version", "_version", "30"),
])
for vm_kernel_slide in self.vm_kernel_slide_hits():
renderer.table_row(vm_kernel_slide,
self._lookup_version_string(vm_kernel_slide))
class DarwinFindDTB(DarwinKASLRMixin, AbstractDarwinCommandPlugin,
core.FindDTB):
"""Tries to find the DTB address for the Darwin/XNU kernel.
As the XNU kernel developed over the years, the best way of deriving this
information changed. This class now offers multiple methods of finding the
DTB. Calling find_dtb should automatically select the best method for the
job, based on the profile. It will also attempt to fall back on less ideal
ways of getting the DTB if the best way fails.
"""
__name = "find_dtb"
def _dtb_hits_idlepml4(self):
"""On 10.8 and later, x64, tries to determine the DTB using IdlePML4.
IdlePML4 is the address (in Kernel AS) of the kernel DTB [1]. The DTB
itself happens to be located in a section of kernel memory that sits at
the base of the physical address space [2], and its virtual address can
be converted to its physical address using the ID_MAP_VTOP macro
which kernel defines for this express purpose [3].
Should work on: 10.8 and later.
Best for: 10.9 and later.
Yields:
The physical address of the DTB, not verified.
1:
github.com/opensource-apple/xnu/blob/10.9/osfmk/i386/i386_init.c#L281
Here the kernel initializes the page register at the address IdlePML4
points to (masked using the bitmask macro). The same function switches
to the newly initialized address space right before returning.
// IdlePML4 single entry for kernel space.
fillkpt(IdlePML4 + KERNEL_PML4_INDEX,
INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePDPT), 0, 1);
2:
The first page of IdlePML4 is allocated by the ALLOCPAGES function
located here:
github.com/opensource-apple/xnu/blob/10.9/osfmk/i386/i386_init.c#L134
3:
ID_MAP_VTOP is defined here, as simple bitmask:
github.com/opensource-apple/xnu/blob/10.9/osfmk/i386/pmap.h#L353
"""
idlepml4 = ID_MAP_VTOP(self.profile.get_constant(
"_IdlePML4", True))
dtb = self.profile.Object("unsigned int", offset=idlepml4,
vm=self.physical_address_space)
yield int(dtb)
def _dtb_hits_legacy(self):
"""The original way of getting the DTB, adapted from Volatility.
I have no idea how or why this is intended to work, but it seems to for
old images.
Should work on: 10.7 and earlier.
Yields:
The physical address of the DTB, not verified.
"""
if self.profile.metadata("arch") == "I386":
result = self.profile.get_constant("_IdlePDPT")
# Since the DTB must be page aligned, if this is not, it is probably
# a pointer to the real DTB.
if result % 0x1000:
result = self.profile.get_constant_object(
"_IdlePDPT", "unsigned int")
yield result
else:
result = self.profile.get_constant("_IdlePML4", is_address=True)
if result > 0xffffff8000000000:
result -= 0xffffff8000000000
yield result
def _dtb_hits_kernel_pmap(self):
"""On 64-bit systems, finds the DTB from the kernel pmap struct.
This is a very easy way of getting the DTB on systems where the kernel
pmap is a static symbol (which seems to be most of them.)
Yields:
The physical address of the DTB, not verified.
"""
kernel_pmap_addr = self.profile.get_constant(
"_kernel_pmap_store", is_address=True)
kernel_pmap = self.profile.pmap(offset=ID_MAP_VTOP(kernel_pmap_addr),
vm=self.physical_address_space)
yield int(kernel_pmap.pm_cr3)
def _dtb_methods(self):
"""Determines viable methods of getting the DTB based on profile.
Yields:
Callable object that will yield DTB values.
"""
if MOUNTAIN_LION_OR_LATER(self.profile):
yield self._dtb_hits_idlepml4
else:
yield self._dtb_hits_legacy
if self.profile.metadata("arch") == "AMD64":
yield self._dtb_hits_kernel_pmap
def dtb_hits(self):
for method in self._dtb_methods():
for dtb_hit in method():
yield dtb_hit
def VerifyHit(self, hit):
address_space = self.CreateAS(hit)
if address_space:
address = self.profile.get_constant(
"_version", is_address=True)
if not address_space.is_valid_address(address):
return
if address_space.read(address, 13) != "Darwin Kernel":
return
return address_space
def render(self, renderer):
renderer.table_header([("DTB", "dtb", "[addrpad]"),
("Verified", "verified", "8"),
("Source", "method", "15")])
for method in self._dtb_methods():
for dtb_hit in method():
renderer.table_row(
dtb_hit,
self.VerifyHit(dtb_hit) is not None,
method.__name__)
class DarwinPlugin(DarwinKASLRMixin,
plugin.KernelASMixin,
AbstractDarwinCommandPlugin):
"""Plugin which requires the kernel Address space to be loaded."""
__abstract = True
class DarwinProcessFilter(DarwinPlugin):
"""A class for filtering processes."""
__abstract = True
@classmethod
def args(cls, parser):
super(DarwinProcessFilter, cls).args(parser)
parser.add_argument("--pid",
type="ArrayIntParser",
help="One or more pids of processes to select.")
parser.add_argument("--proc_regex", default=None,
help="A regex to select a process by name.")
parser.add_argument("--phys_proc",
type="ArrayIntParser",
help="Physical addresses of proc structs.")
parser.add_argument("--proc", type="ArrayIntParser",
help="Kernel addresses of proc structs.")
parser.add_argument(
"--method", choices=list(cls.METHODS), nargs="+",
help="Method to list processes (Default uses all methods).")
def __init__(self, pid=None, proc_regex=None, phys_proc=None, proc=None,
method=None, **kwargs):
"""Filters processes by parameters.
Args:
phys_proc_struct: One or more proc structs or offsets defined in
the physical AS.
pids: A list of pids.
pid: A single pid.
"""
super(DarwinProcessFilter, self).__init__(**kwargs)
# Per-method cache of procs discovered.
self.cache = {}
self.methods = method or self.METHODS
if isinstance(phys_proc, (int, long)):
phys_proc = [phys_proc]
elif phys_proc is None:
phys_proc = []
if isinstance(proc, (int, long)):
proc = [proc]
elif isinstance(proc, obj.Struct):
proc = [proc.obj_offset]
elif proc is None:
proc = []
self.phys_proc = phys_proc
self.proc = proc
pids = []
if isinstance(pid, list):
pids.extend(pid)
elif isinstance(pid, (int, long)):
pids.append(pid)
if self.session.pid and not pid:
pids.append(self.session.pid)
self.pids = pids
self.proc_regex_text = proc_regex
if isinstance(proc_regex, basestring):
proc_regex = re.compile(proc_regex, re.I)
self.proc_regex = proc_regex
# Sometimes its important to know if any filtering is specified at all.
self.filtering_requested = (self.pids or self.proc_regex or
self.phys_proc or self.proc)
def list_procs(self, sort=True):
"""Uses a few methods to list the procs."""
seen = set()
for method in self.METHODS:
if method not in self.methods:
continue
procs = self.session.GetParameter(
"darwin_pslist_%s" % method, [])
self.session.logging.debug("Listed %d processes using %s",
len(procs), method)
procs = [self.session.profile.proc(x) for x in procs]
seen.update(procs)
if sort:
return sorted(seen, key=lambda proc: proc.p_pid)
return seen
def filter_processes(self):
"""Filters proc list using phys_proc and pids lists."""
# No filtering required:
if not self.filtering_requested:
for proc in self.list_procs():
yield proc
else:
# We need to filter by phys_proc
for offset in self.phys_proc:
yield self.virtual_process_from_physical_offset(offset)
for offset in self.proc:
yield self.profile.proc(vm=self.kernel_address_space,
offset=int(offset))
# We need to filter by pids
for proc in self.list_procs():
if int(proc.p_pid) in self.pids:
yield proc
elif self.proc_regex and self.proc_regex.match(
utils.SmartUnicode(proc.p_comm)):
yield proc
def virtual_process_from_physical_offset(self, physical_offset):
"""Tries to return an proc in virtual space from a physical offset.
We do this by reflecting off the list elements.
Args:
physical_offset: The physcial offset of the process.
Returns:
an _PROC object or a NoneObject on failure.
"""
physical_proc = self.profile.eprocess(offset=int(physical_offset),
vm=self.kernel_address_space.base)
# We cast our list entry in the kernel AS by following Flink into the
# kernel AS and then the Blink. Note the address space switch upon
# dereferencing the pointer.
our_list_entry = physical_proc.procs.next.dereference(
vm=self.kernel_address_space).prev.dereference()
# Now we get the proc_struct object from the list entry.
return our_list_entry.dereference_as("proc_struct", "procs")
METHODS = [
"allproc",
"deadprocs",
"tasks",
"pidhash",
"pgrphash",
]
class KernelAddressCheckerMixIn(object):
"""A plugin mixin which does kernel address checks."""
def __init__(self, **kwargs):
super(KernelAddressCheckerMixIn, self).__init__(**kwargs)
# We use the module plugin to help us local addresses inside kernel
# modules.
self.module_plugin = self.session.plugins.lsmod(session=self.session)
class PsListAllProcHook(AbstractDarwinParameterHook):
"""List all processes by following the _allproc list head."""
name = "darwin_pslist_allproc"
def calculate(self):
first = self.session.profile.get_constant_object(
"_allproc", target="proclist").lh_first
result = set(first.p_list)
return [x.obj_offset for x in result]
class PsListDeadProcHook(AbstractDarwinParameterHook):
"""List all processes by following the _allproc list head."""
name = "darwin_pslist_deadprocs"
def calculate(self):
"""List deallocated proc structs using the zone allocator."""
# Find the proc zone from the allocator.
proc_zone = self.session.manager.find_first(
"AllocationZone/name is 'proc'")["Struct/base"]
# Walk over the free list and get all the proc objects.
obj_list = proc_zone.free_elements.walk_list("next")
result = []
for object in obj_list:
proc = object.cast("proc")
# Validate the proc. Real procs have a non zero argc.
if proc.p_argc > 0:
result.append(proc.obj_offset)
return result
class PsListTasksHook(AbstractDarwinParameterHook):
"""List all processes by following the _allproc list head."""
name = "darwin_pslist_tasks"
def calculate(self):
"""List processes using the processor tasks queue.
See
/osfmk/kern/processor.c (processor_set_things)
"""
seen = set()
tasks = self.session.profile.get_constant_object(
"_tasks",
target="queue_entry",
vm=self.session.kernel_address_space)
for task in tasks.list_of_type("task", "tasks"):
proc = task.bsd_info.deref()
if proc:
seen.add(proc.obj_offset)
return seen
class PsListPgrpHashHook(AbstractDarwinParameterHook):
"""List all processes by following the _allproc list head."""
name = "darwin_pslist_pgrphash"
def calculate(self):
"""Process groups are organized in a hash chain.
xnu-1699.26.8/bsd/sys/proc_internal.h
"""
seen = set()
# Note that _pgrphash is initialized through:
# xnu-1699.26.8/bsd/kern/kern_proc.c:195
# hashinit(int elements, int type, u_long *hashmask)
# /xnu-1699.26.8/bsd/kern/kern_subr.c: 327
# hashinit(int elements, int type, u_long *hashmask) {
# ...
# *hashmask = hashsize - 1;
# Hence the value in _pgrphash is one less than the size of the hash
# table.
pgr_hash_table = self.session.profile.get_constant_object(
"_pgrphashtbl",
target="Pointer",
target_args=dict(
target="Array",
target_args=dict(
target="pgrphashhead",
count=self.session.profile.get_constant_object(
"_pgrphash", "unsigned long") + 1
)
)
)
for slot in pgr_hash_table.deref():
for pgrp in slot.lh_first.walk_list("pg_hash.le_next"):
for proc in pgrp.pg_members.lh_first.walk_list(
"p_pglist.le_next"):
seen.add(proc.obj_offset)
return seen
class PsListPidHashHook(AbstractDarwinParameterHook):
"""List all processes by following the _allproc list head."""
name = "darwin_pslist_pidhash"
def calculate(self):
"""Lists processes using pid hash tables.
xnu-1699.26.8/bsd/kern/kern_proc.c:834:
pfind_locked(pid_t pid)
"""
seen = set()
# Note that _pidhash is initialized through:
# xnu-1699.26.8/bsd/kern/kern_proc.c:194
# pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
# /xnu-1699.26.8/bsd/kern/kern_subr.c: 327
# hashinit(int elements, int type, u_long *hashmask) {
# ...
# *hashmask = hashsize - 1;
# Hence the value in pidhash is one less than the size of the hash
# table.
pid_hash_table = self.session.profile.get_constant_object(
"_pidhashtbl",
target="Pointer",
target_args=dict(
target="Array",
target_args=dict(
target="pidhashhead",
count=self.session.profile.get_constant_object(
"_pidhash", "unsigned long") + 1
)
)
)
for plist in pid_hash_table.deref():
for proc in plist.lh_first.walk_list("p_hash.le_next"):
if proc:
seen.add(proc.obj_offset)
return seen
| gpl-2.0 |
swayf/pyLoad | module/plugins/hoster/Xdcc.py | 2 | 8399 | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: jeix
"""
from os.path import join
from os.path import exists
from os import makedirs
import re
import sys
import time
import socket, struct
from select import select
from module.utils import save_join
from module.plugins.Hoster import Hoster
class Xdcc(Hoster):
__name__ = "Xdcc"
__version__ = "0.3"
__pattern__ = r'xdcc://.*?(/#?.*?)?/.*?/#?\d+/?' # xdcc://irc.Abjects.net/#channel/[XDCC]|Shit/#0004/
__type__ = "hoster"
__config__ = [
("nick", "str", "Nickname", "pyload"),
("ident", "str", "Ident", "pyloadident"),
("realname", "str", "Realname", "pyloadreal")
]
__description__ = """A Plugin that allows you to download from an IRC XDCC bot"""
__author_name__ = ("jeix")
__author_mail__ = ("[email protected]")
def setup(self):
self.debug = 0 #0,1,2
self.timeout = 30
self.multiDL = False
def process(self, pyfile):
# change request type
self.req = pyfile.m.core.requestFactory.getRequest(self.__name__, type="XDCC")
self.pyfile = pyfile
for i in range(0,3):
try:
nmn = self.doDownload(pyfile.url)
self.log.debug("%s: Download of %s finished." % (self.__name__, nmn))
return
except socket.error, e:
if hasattr(e, "errno"):
errno = e.errno
else:
errno = e.args[0]
if errno in (10054,):
self.log.debug("XDCC: Server blocked our ip, retry in 5 min")
self.setWait(300)
self.wait()
continue
self.fail("Failed due to socket errors. Code: %d" % errno)
self.fail("Server blocked our ip, retry again later manually")
def doDownload(self, url):
self.pyfile.setStatus("waiting") # real link
download_folder = self.config['general']['download_folder']
location = join(download_folder, self.pyfile.package().folder.decode(sys.getfilesystemencoding()))
if not exists(location):
makedirs(location)
m = re.search(r'xdcc://(.*?)/#?(.*?)/(.*?)/#?(\d+)/?', url)
server = m.group(1)
chan = m.group(2)
bot = m.group(3)
pack = m.group(4)
nick = self.getConf('nick')
ident = self.getConf('ident')
real = self.getConf('realname')
temp = server.split(':')
ln = len(temp)
if ln == 2:
host, port = temp
elif ln == 1:
host, port = temp[0], 6667
else:
self.fail("Invalid hostname for IRC Server (%s)" % server)
#######################
# CONNECT TO IRC AND IDLE FOR REAL LINK
dl_time = time.time()
sock = socket.socket()
sock.connect((host, int(port)))
if nick == "pyload":
nick = "pyload-%d" % (time.time() % 1000) # last 3 digits
sock.send("NICK %s\r\n" % nick)
sock.send("USER %s %s bla :%s\r\n" % (ident, host, real))
time.sleep(3)
sock.send("JOIN #%s\r\n" % chan)
sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
# IRC recv loop
readbuffer = ""
done = False
retry = None
m = None
while True:
# done is set if we got our real link
if done:
break
if retry:
if time.time() > retry:
retry = None
dl_time = time.time()
sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
else:
if (dl_time + self.timeout) < time.time(): # todo: add in config
sock.send("QUIT :byebye\r\n")
sock.close()
self.fail("XDCC Bot did not answer")
fdset = select([sock], [], [], 0)
if sock not in fdset[0]:
continue
readbuffer += sock.recv(1024)
temp = readbuffer.split("\n")
readbuffer = temp.pop()
for line in temp:
if self.debug is 2: print "*> " + unicode(line, errors='ignore')
line = line.rstrip()
first = line.split()
if first[0] == "PING":
sock.send("PONG %s\r\n" % first[1])
if first[0] == "ERROR":
self.fail("IRC-Error: %s" % line)
msg = line.split(None, 3)
if len(msg) != 4:
continue
msg = { \
"origin":msg[0][1:], \
"action":msg[1], \
"target":msg[2], \
"text" :msg[3][1:] \
}
if nick == msg["target"][0:len(nick)] and "PRIVMSG" == msg["action"]:
if msg["text"] == "\x01VERSION\x01":
self.log.debug("XDCC: Sending CTCP VERSION.")
sock.send("NOTICE %s :%s\r\n" % (msg['origin'], "pyLoad! IRC Interface"))
elif msg["text"] == "\x01TIME\x01":
self.log.debug("Sending CTCP TIME.")
sock.send("NOTICE %s :%d\r\n" % (msg['origin'], time.time()))
elif msg["text"] == "\x01LAG\x01":
pass # don't know how to answer
if not (bot == msg["origin"][0:len(bot)]
and nick == msg["target"][0:len(nick)]
and msg["action"] in ("PRIVMSG", "NOTICE")):
continue
if self.debug is 1:
print "%s: %s" % (msg["origin"], msg["text"])
if "You already requested that pack" in msg["text"]:
retry = time.time() + 300
if "you must be on a known channel to request a pack" in msg["text"]:
self.fail("Wrong channel")
m = re.match('\x01DCC SEND (.*?) (\d+) (\d+)(?: (\d+))?\x01', msg["text"])
if m:
done = True
# get connection data
ip = socket.inet_ntoa(struct.pack('L', socket.ntohl(int(m.group(2)))))
port = int(m.group(3))
packname = m.group(1)
if len(m.groups()) > 3:
self.req.filesize = int(m.group(4))
self.pyfile.name = packname
filename = save_join(location, packname)
self.log.info("XDCC: Downloading %s from %s:%d" % (packname, ip, port))
self.pyfile.setStatus("downloading")
newname = self.req.download(ip, port, filename, sock, self.pyfile.setProgress)
if newname and newname != filename:
self.log.info("%(name)s saved as %(newname)s" % {"name": self.pyfile.name, "newname": newname})
filename = newname
# kill IRC socket
# sock.send("QUIT :byebye\r\n")
sock.close()
self.lastDownload = filename
return self.lastDownload
| agpl-3.0 |
sinkpoint/dipy | dipy/reconst/odf.py | 15 | 2057 | from __future__ import division, print_function, absolute_import
from .base import ReconstModel, ReconstFit
import numpy as np
# Classes OdfModel and OdfFit are using API ReconstModel and ReconstFit from
# .base
class OdfModel(ReconstModel):
"""An abstract class to be sub-classed by specific odf models
All odf models should provide a fit method which may take data as it's
first and only argument.
"""
def __init__(self, gtab):
ReconstModel.__init__(self, gtab)
def fit(self, data):
"""To be implemented by specific odf models"""
raise NotImplementedError("To be implemented in sub classes")
class OdfFit(ReconstFit):
def odf(self, sphere):
"""To be implemented but specific odf models"""
raise NotImplementedError("To be implemented in sub classes")
def gfa(samples):
"""The general fractional anisotropy of a function evaluated
on the unit sphere"""
diff = samples - samples.mean(-1)[..., None]
n = samples.shape[-1]
numer = n * (diff * diff).sum(-1)
denom = (n - 1) * (samples * samples).sum(-1)
return np.sqrt(numer / denom)
def minmax_normalize(samples, out=None):
"""Min-max normalization of a function evaluated on the unit sphere
Normalizes samples to ``(samples - min(samples)) / (max(samples) -
min(samples))`` for each unit sphere.
Parameters
----------
samples : ndarray (..., N)
N samples on a unit sphere for each point, stored along the last axis
of the array.
out : ndrray (..., N), optional
An array to store the normalized samples.
Returns
-------
out : ndarray, (..., N)
Normalized samples.
"""
if out is None:
dtype = np.common_type(np.empty(0, 'float32'), samples)
out = np.array(samples, dtype=dtype, copy=True)
else:
out[:] = samples
sample_mins = np.min(samples, -1)[..., None]
sample_maxes = np.max(samples, -1)[..., None]
out -= sample_mins
out /= (sample_maxes - sample_mins)
return out
| bsd-3-clause |
jspraul/bite-project | deps/gdata-python-client/samples/apps/marketplace_sample/gdata/test_data.py | 103 | 348233 | #!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
XML_ENTRY_1 = """<?xml version='1.0'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:g='http://base.google.com/ns/1.0'>
<category scheme="http://base.google.com/categories/itemtypes"
term="products"/>
<id> http://www.google.com/test/id/url </id>
<title type='text'>Testing 2000 series laptop</title>
<content type='xhtml'>
<div xmlns='http://www.w3.org/1999/xhtml'>A Testing Laptop</div>
</content>
<link rel='alternate' type='text/html'
href='http://www.provider-host.com/123456789'/>
<link rel='license'
href='http://creativecommons.org/licenses/by-nc/2.5/rdf'/>
<g:label>Computer</g:label>
<g:label>Laptop</g:label>
<g:label>testing laptop</g:label>
<g:item_type>products</g:item_type>
</entry>"""
TEST_BASE_ENTRY = """<?xml version='1.0'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:g='http://base.google.com/ns/1.0'>
<category scheme="http://base.google.com/categories/itemtypes"
term="products"/>
<title type='text'>Testing 2000 series laptop</title>
<content type='xhtml'>
<div xmlns='http://www.w3.org/1999/xhtml'>A Testing Laptop</div>
</content>
<app:control xmlns:app='http://purl.org/atom/app#'>
<app:draft>yes</app:draft>
<gm:disapproved xmlns:gm='http://base.google.com/ns-metadata/1.0'/>
</app:control>
<link rel='alternate' type='text/html'
href='http://www.provider-host.com/123456789'/>
<g:label>Computer</g:label>
<g:label>Laptop</g:label>
<g:label>testing laptop</g:label>
<g:item_type>products</g:item_type>
</entry>"""
BIG_FEED = """<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title type="text">dive into mark</title>
<subtitle type="html">
A <em>lot</em> of effort
went into making this effortless
</subtitle>
<updated>2005-07-31T12:29:29Z</updated>
<id>tag:example.org,2003:3</id>
<link rel="alternate" type="text/html"
hreflang="en" href="http://example.org/"/>
<link rel="self" type="application/atom+xml"
href="http://example.org/feed.atom"/>
<rights>Copyright (c) 2003, Mark Pilgrim</rights>
<generator uri="http://www.example.com/" version="1.0">
Example Toolkit
</generator>
<entry>
<title>Atom draft-07 snapshot</title>
<link rel="alternate" type="text/html"
href="http://example.org/2005/04/02/atom"/>
<link rel="enclosure" type="audio/mpeg" length="1337"
href="http://example.org/audio/ph34r_my_podcast.mp3"/>
<id>tag:example.org,2003:3.2397</id>
<updated>2005-07-31T12:29:29Z</updated>
<published>2003-12-13T08:29:29-04:00</published>
<author>
<name>Mark Pilgrim</name>
<uri>http://example.org/</uri>
<email>[email protected]</email>
</author>
<contributor>
<name>Sam Ruby</name>
</contributor>
<contributor>
<name>Joe Gregorio</name>
</contributor>
<content type="xhtml" xml:lang="en"
xml:base="http://diveintomark.org/">
<div xmlns="http://www.w3.org/1999/xhtml">
<p><i>[Update: The Atom draft is finished.]</i></p>
</div>
</content>
</entry>
</feed>
"""
SMALL_FEED = """<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title>Example Feed</title>
<link href="http://example.org/"/>
<updated>2003-12-13T18:30:02Z</updated>
<author>
<name>John Doe</name>
</author>
<id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id>
<entry>
<title>Atom-Powered Robots Run Amok</title>
<link href="http://example.org/2003/12/13/atom03"/>
<id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id>
<updated>2003-12-13T18:30:02Z</updated>
<summary>Some text.</summary>
</entry>
</feed>
"""
GBASE_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:g='http://base.google.com/ns/1.0' xmlns:batch='http://schemas.google.com/gdata/batch'>
<id>http://www.google.com/base/feeds/snippets</id>
<updated>2007-02-08T23:18:21.935Z</updated>
<title type='text'>Items matching query: digital camera</title>
<link rel='alternate' type='text/html' href='http://base.google.com'>
</link>
<link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets'>
</link>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets?start-index=1&max-results=25&bq=digital+camera'>
</link>
<link rel='next' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets?start-index=26&max-results=25&bq=digital+camera'>
</link>
<generator version='1.0' uri='http://base.google.com'>GoogleBase </generator>
<openSearch:totalResults>2171885</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>http://www.google.com/base/feeds/snippets/13246453826751927533</id>
<published>2007-02-08T13:23:27.000Z</published>
<updated>2007-02-08T16:40:57.000Z</updated>
<category scheme='http://base.google.com/categories/itemtypes' term='Products'>
</category>
<title type='text'>Digital Camera Battery Notebook Computer 12v DC Power Cable - 5.5mm x 2.5mm (Center +) Camera Connecting Cables</title>
<content type='html'>Notebook Computer 12v DC Power Cable - 5.5mm x 2.1mm (Center +) This connection cable will allow any Digital Pursuits battery pack to power portable computers that operate with 12v power and have a 2.1mm power connector (center +) Digital ...</content>
<link rel='alternate' type='text/html' href='http://www.bhphotovideo.com/bnh/controller/home?O=productlist&A=details&Q=&sku=305668&is=REG&kw=DIDCB5092&BI=583'>
</link>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets/13246453826751927533'>
</link>
<author>
<name>B&H Photo-Video</name>
<email>[email protected]</email>
</author>
<g:payment_notes type='text'>PayPal & Bill Me Later credit available online only.</g:payment_notes>
<g:condition type='text'>new</g:condition>
<g:location type='location'>420 9th Ave. 10001</g:location>
<g:id type='text'>305668-REG</g:id>
<g:item_type type='text'>Products</g:item_type>
<g:brand type='text'>Digital Camera Battery</g:brand>
<g:expiration_date type='dateTime'>2007-03-10T13:23:27.000Z</g:expiration_date>
<g:customer_id type='int'>1172711</g:customer_id>
<g:price type='floatUnit'>34.95 usd</g:price>
<g:product_type type='text'>Digital Photography>Camera Connecting Cables</g:product_type>
<g:item_language type='text'>EN</g:item_language>
<g:manufacturer_id type='text'>DCB5092</g:manufacturer_id>
<g:target_country type='text'>US</g:target_country>
<g:weight type='float'>1.0</g:weight>
<g:image_link type='url'>http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305668.jpg&dhm=ffffffff84c9a95e&size=6</g:image_link>
</entry>
<entry>
<id>http://www.google.com/base/feeds/snippets/10145771037331858608</id>
<published>2007-02-08T13:23:27.000Z</published>
<updated>2007-02-08T16:40:57.000Z</updated>
<category scheme='http://base.google.com/categories/itemtypes' term='Products'>
</category>
<title type='text'>Digital Camera Battery Electronic Device 5v DC Power Cable - 5.5mm x 2.5mm (Center +) Camera Connecting Cables</title>
<content type='html'>Electronic Device 5v DC Power Cable - 5.5mm x 2.5mm (Center +) This connection cable will allow any Digital Pursuits battery pack to power any electronic device that operates with 5v power and has a 2.5mm power connector (center +) Digital ...</content>
<link rel='alternate' type='text/html' href='http://www.bhphotovideo.com/bnh/controller/home?O=productlist&A=details&Q=&sku=305656&is=REG&kw=DIDCB5108&BI=583'>
</link>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets/10145771037331858608'>
</link>
<author>
<name>B&H Photo-Video</name>
<email>[email protected]</email>
</author>
<g:location type='location'>420 9th Ave. 10001</g:location>
<g:condition type='text'>new</g:condition>
<g:weight type='float'>0.18</g:weight>
<g:target_country type='text'>US</g:target_country>
<g:product_type type='text'>Digital Photography>Camera Connecting Cables</g:product_type>
<g:payment_notes type='text'>PayPal & Bill Me Later credit available online only.</g:payment_notes>
<g:id type='text'>305656-REG</g:id>
<g:image_link type='url'>http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305656.jpg&dhm=7315bdc8&size=6</g:image_link>
<g:manufacturer_id type='text'>DCB5108</g:manufacturer_id>
<g:upc type='text'>838098005108</g:upc>
<g:price type='floatUnit'>34.95 usd</g:price>
<g:item_language type='text'>EN</g:item_language>
<g:brand type='text'>Digital Camera Battery</g:brand>
<g:customer_id type='int'>1172711</g:customer_id>
<g:item_type type='text'>Products</g:item_type>
<g:expiration_date type='dateTime'>2007-03-10T13:23:27.000Z</g:expiration_date>
</entry>
<entry>
<id>http://www.google.com/base/feeds/snippets/3128608193804768644</id>
<published>2007-02-08T02:21:27.000Z</published>
<updated>2007-02-08T15:40:13.000Z</updated>
<category scheme='http://base.google.com/categories/itemtypes' term='Products'>
</category>
<title type='text'>Digital Camera Battery Power Cable for Kodak 645 Pro-Back ProBack & DCS-300 Series Camera Connecting Cables</title>
<content type='html'>Camera Connection Cable - to Power Kodak 645 Pro-Back DCS-300 Series Digital Cameras This connection cable will allow any Digital Pursuits battery pack to power the following digital cameras: Kodak DCS Pro Back 645 DCS-300 series Digital Photography ...</content>
<link rel='alternate' type='text/html' href='http://www.bhphotovideo.com/bnh/controller/home?O=productlist&A=details&Q=&sku=305685&is=REG&kw=DIDCB6006&BI=583'>
</link>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/snippets/3128608193804768644'>
</link>
<author>
<name>B&H Photo-Video</name>
<email>[email protected]</email>
</author>
<g:weight type='float'>0.3</g:weight>
<g:manufacturer_id type='text'>DCB6006</g:manufacturer_id>
<g:image_link type='url'>http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305685.jpg&dhm=72f0ca0a&size=6</g:image_link>
<g:location type='location'>420 9th Ave. 10001</g:location>
<g:payment_notes type='text'>PayPal & Bill Me Later credit available online only.</g:payment_notes>
<g:item_type type='text'>Products</g:item_type>
<g:target_country type='text'>US</g:target_country>
<g:accessory_for type='text'>digital kodak camera</g:accessory_for>
<g:brand type='text'>Digital Camera Battery</g:brand>
<g:expiration_date type='dateTime'>2007-03-10T02:21:27.000Z</g:expiration_date>
<g:item_language type='text'>EN</g:item_language>
<g:condition type='text'>new</g:condition>
<g:price type='floatUnit'>34.95 usd</g:price>
<g:customer_id type='int'>1172711</g:customer_id>
<g:product_type type='text'>Digital Photography>Camera Connecting Cables</g:product_type>
<g:id type='text'>305685-REG</g:id>
</entry>
</feed>"""
EXTENSION_TREE = """<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<g:author xmlns:g="http://www.google.com">
<g:name>John Doe
<g:foo yes="no" up="down">Bar</g:foo>
</g:name>
</g:author>
</feed>
"""
TEST_AUTHOR = """<?xml version="1.0" encoding="utf-8"?>
<author xmlns="http://www.w3.org/2005/Atom">
<name xmlns="http://www.w3.org/2005/Atom">John Doe</name>
<email xmlns="http://www.w3.org/2005/Atom">[email protected]</email>
<uri xmlns="http://www.w3.org/2005/Atom">http://www.google.com</uri>
</author>
"""
TEST_LINK = """<?xml version="1.0" encoding="utf-8"?>
<link xmlns="http://www.w3.org/2005/Atom" href="http://www.google.com"
rel="test rel" foo1="bar" foo2="rab"/>
"""
TEST_GBASE_ATTRIBUTE = """<?xml version="1.0" encoding="utf-8"?>
<g:brand type='text' xmlns:g="http://base.google.com/ns/1.0">Digital Camera Battery</g:brand>
"""
CALENDAR_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gd='http://schemas.google.com/g/2005'
xmlns:gCal='http://schemas.google.com/gCal/2005'>
<id>http://www.google.com/calendar/feeds/default</id>
<updated>2007-03-20T22:48:57.833Z</updated>
<title type='text'>GData Ops Demo's Calendar List</title>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default'></link>
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default'></link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<generator version='1.0' uri='http://www.google.com/calendar'>
Google Calendar</generator>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>
http://www.google.com/calendar/feeds/default/gdata.ops.demo%40gmail.com</id>
<published>2007-03-20T22:48:57.837Z</published>
<updated>2007-03-20T22:48:52.000Z</updated>
<title type='text'>GData Ops Demo</title>
<link rel='alternate' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/gdata.ops.demo%40gmail.com/private/full'>
</link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/gdata.ops.demo%40gmail.com'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:color value='#2952A3'></gCal:color>
<gCal:accesslevel value='owner'></gCal:accesslevel>
<gCal:hidden value='false'></gCal:hidden>
<gCal:timezone value='America/Los_Angeles'></gCal:timezone>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/jnh21ovnjgfph21h32gvms2758%40group.calendar.google.com</id>
<published>2007-03-20T22:48:57.837Z</published>
<updated>2007-03-20T22:48:53.000Z</updated>
<title type='text'>GData Ops Demo Secondary Calendar</title>
<summary type='text'></summary>
<link rel='alternate' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/jnh21ovnjgfph21h32gvms2758%40group.calendar.google.com/private/full'>
</link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/jnh21ovnjgfph21h32gvms2758%40group.calendar.google.com'>
</link>
<author>
<name>GData Ops Demo Secondary Calendar</name>
</author>
<gCal:color value='#528800'></gCal:color>
<gCal:accesslevel value='owner'></gCal:accesslevel>
<gCal:hidden value='false'></gCal:hidden>
<gCal:timezone value='America/Los_Angeles'></gCal:timezone>
<gd:where valueString=''></gd:where>
</entry>
</feed>
"""
CALENDAR_FULL_EVENT_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gd='http://schemas.google.com/g/2005'
xmlns:gCal='http://schemas.google.com/gCal/2005'>
<id>
http://www.google.com/calendar/feeds/default/private/full</id>
<updated>2007-03-20T21:29:57.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>GData Ops Demo</title>
<subtitle type='text'>GData Ops Demo</subtitle>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full'>
</link>
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full'>
</link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full?updated-min=2001-01-01&max-results=25'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<generator version='1.0' uri='http://www.google.com/calendar'>
Google Calendar</generator>
<openSearch:totalResults>10</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<gCal:timezone value='America/Los_Angeles'></gCal:timezone>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/o99flmgmkfkfrr8u745ghr3100</id>
<published>2007-03-20T21:29:52.000Z</published>
<updated>2007-03-20T21:29:57.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>test deleted</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=bzk5ZmxtZ21rZmtmcnI4dTc0NWdocjMxMDAgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/o99flmgmkfkfrr8u745ghr3100'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/o99flmgmkfkfrr8u745ghr3100/63310109397'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.canceled'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/o99flmgmkfkfrr8u745ghr3100/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-23T12:00:00.000-07:00'
endTime='2007-03-23T13:00:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/2qt3ao5hbaq7m9igr5ak9esjo0</id>
<published>2007-03-20T21:26:04.000Z</published>
<updated>2007-03-20T21:28:46.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Afternoon at Dolores Park with Kim</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=MnF0M2FvNWhiYXE3bTlpZ3I1YWs5ZXNqbzAgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/2qt3ao5hbaq7m9igr5ak9esjo0'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/2qt3ao5hbaq7m9igr5ak9esjo0/63310109326'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/2qt3ao5hbaq7m9igr5ak9esjo0/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.private'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:who rel='http://schemas.google.com/g/2005#event.organizer'
valueString='GData Ops Demo' email='[email protected]'>
<gd:attendeeStatus value='http://schemas.google.com/g/2005#event.accepted'>
</gd:attendeeStatus>
</gd:who>
<gd:who rel='http://schemas.google.com/g/2005#event.attendee'
valueString='Ryan Boyd (API)' email='[email protected]'>
<gd:attendeeStatus value='http://schemas.google.com/g/2005#event.invited'>
</gd:attendeeStatus>
</gd:who>
<gd:when startTime='2007-03-24T12:00:00.000-07:00'
endTime='2007-03-24T15:00:00.000-07:00'>
<gd:reminder minutes='20'></gd:reminder>
</gd:when>
<gd:where valueString='Dolores Park with Kim'></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/uvsqhg7klnae40v50vihr1pvos</id>
<published>2007-03-20T21:28:37.000Z</published>
<updated>2007-03-20T21:28:37.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Team meeting</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=dXZzcWhnN2tsbmFlNDB2NTB2aWhyMXB2b3NfMjAwNzAzMjNUMTYwMDAwWiBnZGF0YS5vcHMuZGVtb0Bt'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/uvsqhg7klnae40v50vihr1pvos'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/uvsqhg7klnae40v50vihr1pvos/63310109317'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gd:recurrence>DTSTART;TZID=America/Los_Angeles:20070323T090000
DTEND;TZID=America/Los_Angeles:20070323T100000
RRULE:FREQ=WEEKLY;BYDAY=FR;UNTIL=20070817T160000Z;WKST=SU
BEGIN:VTIMEZONE TZID:America/Los_Angeles
X-LIC-LOCATION:America/Los_Angeles BEGIN:STANDARD
TZOFFSETFROM:-0700 TZOFFSETTO:-0800 TZNAME:PST
DTSTART:19701025T020000 RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU
END:STANDARD BEGIN:DAYLIGHT TZOFFSETFROM:-0800 TZOFFSETTO:-0700
TZNAME:PDT DTSTART:19700405T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU END:DAYLIGHT
END:VTIMEZONE</gd:recurrence>
<gCal:sendEventNotifications value='true'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:visibility value='http://schemas.google.com/g/2005#event.public'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:reminder minutes='10'></gd:reminder>
<gd:where valueString=''></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/st4vk9kiffs6rasrl32e4a7alo</id>
<published>2007-03-20T21:25:46.000Z</published>
<updated>2007-03-20T21:25:46.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Movie with Kim and danah</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=c3Q0dms5a2lmZnM2cmFzcmwzMmU0YTdhbG8gZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/st4vk9kiffs6rasrl32e4a7alo'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/st4vk9kiffs6rasrl32e4a7alo/63310109146'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/st4vk9kiffs6rasrl32e4a7alo/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-24T20:00:00.000-07:00'
endTime='2007-03-24T21:00:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/ofl1e45ubtsoh6gtu127cls2oo</id>
<published>2007-03-20T21:24:43.000Z</published>
<updated>2007-03-20T21:25:08.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Dinner with Kim and Sarah</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=b2ZsMWU0NXVidHNvaDZndHUxMjdjbHMyb28gZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ofl1e45ubtsoh6gtu127cls2oo'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ofl1e45ubtsoh6gtu127cls2oo/63310109108'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/ofl1e45ubtsoh6gtu127cls2oo/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-20T19:00:00.000-07:00'
endTime='2007-03-20T21:30:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/b69s2avfi2joigsclecvjlc91g</id>
<published>2007-03-20T21:24:19.000Z</published>
<updated>2007-03-20T21:25:05.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Dinner with Jane and John</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=YjY5czJhdmZpMmpvaWdzY2xlY3ZqbGM5MWcgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/b69s2avfi2joigsclecvjlc91g'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/b69s2avfi2joigsclecvjlc91g/63310109105'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/b69s2avfi2joigsclecvjlc91g/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-22T17:00:00.000-07:00'
endTime='2007-03-22T19:30:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/u9p66kkiotn8bqh9k7j4rcnjjc</id>
<published>2007-03-20T21:24:33.000Z</published>
<updated>2007-03-20T21:24:33.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Tennis with Elizabeth</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=dTlwNjZra2lvdG44YnFoOWs3ajRyY25qamMgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/u9p66kkiotn8bqh9k7j4rcnjjc'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/u9p66kkiotn8bqh9k7j4rcnjjc/63310109073'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/u9p66kkiotn8bqh9k7j4rcnjjc/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-24T10:00:00.000-07:00'
endTime='2007-03-24T11:00:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/76oj2kceidob3s708tvfnuaq3c</id>
<published>2007-03-20T21:24:00.000Z</published>
<updated>2007-03-20T21:24:00.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Lunch with Jenn</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=NzZvajJrY2VpZG9iM3M3MDh0dmZudWFxM2MgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/76oj2kceidob3s708tvfnuaq3c'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/76oj2kceidob3s708tvfnuaq3c/63310109040'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/76oj2kceidob3s708tvfnuaq3c/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-03-20T11:30:00.000-07:00'
endTime='2007-03-20T12:30:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/5np9ec8m7uoauk1vedh5mhodco</id>
<published>2007-03-20T07:50:02.000Z</published>
<updated>2007-03-20T20:39:26.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>test entry</title>
<content type='text'>test desc</content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=NW5wOWVjOG03dW9hdWsxdmVkaDVtaG9kY28gZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/5np9ec8m7uoauk1vedh5mhodco'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/5np9ec8m7uoauk1vedh5mhodco/63310106366'>
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/5np9ec8m7uoauk1vedh5mhodco/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.private'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:who rel='http://schemas.google.com/g/2005#event.attendee'
valueString='Vivian Li' email='[email protected]'>
<gd:attendeeStatus value='http://schemas.google.com/g/2005#event.declined'>
</gd:attendeeStatus>
</gd:who>
<gd:who rel='http://schemas.google.com/g/2005#event.organizer'
valueString='GData Ops Demo' email='[email protected]'>
<gd:attendeeStatus value='http://schemas.google.com/g/2005#event.accepted'>
</gd:attendeeStatus>
</gd:who>
<gd:when startTime='2007-03-21T08:00:00.000-07:00'
endTime='2007-03-21T09:00:00.000-07:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where valueString='anywhere'></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/fu6sl0rqakf3o0a13oo1i1a1mg</id>
<published>2007-02-14T23:23:37.000Z</published>
<updated>2007-02-14T23:25:30.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>test</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=ZnU2c2wwcnFha2YzbzBhMTNvbzFpMWExbWcgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/fu6sl0rqakf3o0a13oo1i1a1mg'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/fu6sl0rqakf3o0a13oo1i1a1mg/63307178730'>
</link>
<link rel="http://schemas.google.com/gCal/2005/webContent" title="World Cup" href="http://www.google.com/calendar/images/google-holiday.gif" type="image/gif">
<gCal:webContent width="276" height="120" url="http://www.google.com/logos/worldcup06.gif" />
</link>
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/fu6sl0rqakf3o0a13oo1i1a1mg/comments'>
</gd:feedLink>
</gd:comments>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:when startTime='2007-02-15T08:30:00.000-08:00'
endTime='2007-02-15T09:30:00.000-08:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where></gd:where>
</entry>
<entry>
<id>
http://www.google.com/calendar/feeds/default/private/full/h7a0haa4da8sil3rr19ia6luvc</id>
<published>2007-07-16T22:13:28.000Z</published>
<updated>2007-07-16T22:13:29.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event' />
<title type='text'></title>
<content type='text' />
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=aDdhMGhhYTRkYThzaWwzcnIxOWlhNmx1dmMgZ2RhdGEub3BzLmRlbW9AbQ'
title='alternate' />
<link rel='http://schemas.google.com/gCal/2005/webContent'
type='application/x-google-gadgets+xml'
href='http://gdata.ops.demo.googlepages.com/birthdayicon.gif'
title='Date and Time Gadget'>
<gCal:webContent width='300' height='136'
url='http://google.com/ig/modules/datetime.xml'>
<gCal:webContentGadgetPref name='color' value='green' />
</gCal:webContent>
</link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/h7a0haa4da8sil3rr19ia6luvc' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/h7a0haa4da8sil3rr19ia6luvc/63320307209' />
<author>
<name>GData Ops Demo</name>
<email>[email protected]</email>
</author>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/h7a0haa4da8sil3rr19ia6luvc/comments' />
</gd:comments>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed' />
<gd:visibility value='http://schemas.google.com/g/2005#event.default' />
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque' />
<gd:when startTime='2007-03-14' endTime='2007-03-15' />
<gd:where />
</entry>
</feed>
"""
CALENDAR_BATCH_REQUEST = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:batch='http://schemas.google.com/gdata/batch'
xmlns:gCal='http://schemas.google.com/gCal/2005'>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<entry>
<batch:id>1</batch:id>
<batch:operation type='insert' />
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event inserted via batch</title>
</entry>
<entry>
<batch:id>2</batch:id>
<batch:operation type='query' />
<id>http://www.google.com/calendar/feeds/default/private/full/glcs0kv2qqa0gf52qi1jo018gc</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event queried via batch</title>
</entry>
<entry>
<batch:id>3</batch:id>
<batch:operation type='update' />
<id>http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event updated via batch</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=dWptMGdvNWR0bmdka3I2dTkxZGNxdmowcXMgaGFyaXNodi50ZXN0QG0' title='alternate' />
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs/63326098791' />
</entry>
<entry>
<batch:id>4</batch:id>
<batch:operation type='delete' />
<id>http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event deleted via batch</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=ZDhxYmc5ZWdrMW42bGhzZ3Exc2picWZmcWMgaGFyaXNodi50ZXN0QG0' title='alternate' />
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc/63326018324' />
</entry>
</feed>
"""
CALENDAR_BATCH_RESPONSE = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:batch='http://schemas.google.com/gdata/batch'
xmlns:gCal='http://schemas.google.com/gCal/2005'>
<id>http://www.google.com/calendar/feeds/default/private/full</id>
<updated>2007-09-21T23:01:00.380Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>Batch Feed</title>
<link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full' />
<link rel='http://schemas.google.com/g/2005#post' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full' />
<link rel='http://schemas.google.com/g/2005#batch' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/batch' />
<entry>
<batch:id>1</batch:id>
<batch:status code='201' reason='Created' />
<batch:operation type='insert' />
<id>http://www.google.com/calendar/feeds/default/private/full/n9ug78gd9tv53ppn4hdjvk68ek</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event inserted via batch</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=bjl1Zzc4Z2Q5dHY1M3BwbjRoZGp2azY4ZWsgaGFyaXNodi50ZXN0QG0' title='alternate' />
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/n9ug78gd9tv53ppn4hdjvk68ek' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/n9ug78gd9tv53ppn4hdjvk68ek/63326098860' />
</entry>
<entry>
<batch:id>2</batch:id>
<batch:status code='200' reason='Success' />
<batch:operation type='query' />
<id>http://www.google.com/calendar/feeds/default/private/full/glsc0kv2aqa0ff52qi1jo018gc</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event queried via batch</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=Z2xzYzBrdjJhcWEwZmY1MnFpMWpvMDE4Z2MgaGFyaXNodi50ZXN0QG0' title='alternate' />
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/glsc0kv2aqa0ff52qi1jo018gc' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/glsc0kv2aqa0ff52qi1jo018gc/63326098791' />
</entry>
<entry xmlns:gCal='http://schemas.google.com/gCal/2005'>
<batch:id>3</batch:id>
<batch:status code='200' reason='Success' />
<batch:operation type='update' />
<id>http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event updated via batch</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=dWptMGdvNWR0bmdka3I2dTkxZGNxdmowcXMgaGFyaXNodi50ZXN0QG0' title='alternate' />
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs/63326098860' />
<batch:id>3</batch:id>
<batch:status code='200' reason='Success' />
<batch:operation type='update' />
</entry>
<entry>
<batch:id>4</batch:id>
<batch:status code='200' reason='Success' />
<batch:operation type='delete' />
<id>http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc</id>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/g/2005#event' />
<title type='text'>Event deleted via batch</title>
<content type='text'>Deleted</content>
</entry>
</feed>
"""
GBASE_ATTRIBUTE_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gm='http://base.google.com/ns-metadata/1.0'>
<id>http://www.google.com/base/feeds/attributes</id>
<updated>2006-11-01T20:35:59.578Z</updated>
<category scheme='http://base.google.com/categories/itemtypes' term='online jobs'></category>
<category scheme='http://base.google.com/categories/itemtypes' term='jobs'></category>
<title type='text'>Attribute histogram for query: [item type:jobs]</title>
<link rel='alternate' type='text/html' href='http://base.google.com'></link>
<link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://www.google.com/base/feeds
/attributes'></link>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/attributes/-/jobs'></link>
<generator version='1.0' uri='http://base.google.com'>GoogleBase</generator>
<openSearch:totalResults>16</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>16</openSearch:itemsPerPage>
<entry>
<id>http://www.google.com/base/feeds/attributes/job+industry%28text%29N%5Bitem+type%3Ajobs%5D</id>
<updated>2006-11-01T20:36:00.100Z</updated>
<title type='text'>job industry(text)</title>
<content type='text'>Attribute"job industry" of type text.
</content>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/attributes/job+industry%28text
%29N%5Bitem+type%3Ajobs%5D'></link>
<gm:attribute name='job industry' type='text' count='4416629'>
<gm:value count='380772'>it internet</gm:value>
<gm:value count='261565'>healthcare</gm:value>
<gm:value count='142018'>information technology</gm:value>
<gm:value count='124622'>accounting</gm:value>
<gm:value count='111311'>clerical and administrative</gm:value>
<gm:value count='82928'>other</gm:value>
<gm:value count='77620'>sales and sales management</gm:value>
<gm:value count='68764'>information systems</gm:value>
<gm:value count='65859'>engineering and architecture</gm:value>
<gm:value count='64757'>sales</gm:value>
</gm:attribute>
</entry>
</feed>
"""
GBASE_ATTRIBUTE_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gm='http://base.google.com/ns-metadata/1.0'>
<id>http://www.google.com/base/feeds/attributes/job+industry%28text%29N%5Bitem+type%3Ajobs%5D</id>
<updated>2006-11-01T20:36:00.100Z</updated>
<title type='text'>job industry(text)</title>
<content type='text'>Attribute"job industry" of type text.
</content>
<link rel='self' type='application/atom+xml' href='http://www.google.com/base/feeds/attributes/job+industry%28text%29N%5Bitem+type%3Ajobs%5D'></link>
<gm:attribute name='job industry' type='text' count='4416629'>
<gm:value count='380772'>it internet</gm:value>
<gm:value count='261565'>healthcare</gm:value>
<gm:value count='142018'>information technology</gm:value>
<gm:value count='124622'>accounting</gm:value>
<gm:value count='111311'>clerical and administrative</gm:value>
<gm:value count='82928'>other</gm:value>
<gm:value count='77620'>sales and sales management</gm:value>
<gm:value count='68764'>information systems</gm:value>
<gm:value count='65859'>engineering and architecture</gm:value>
<gm:value count='64757'>sales</gm:value>
</gm:attribute>
</entry>
"""
GBASE_LOCALES_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gm='http://base.google.com/ns-metadata/1.0'>
<id> http://www.google.com/base/feeds/locales/</id>
<updated>2006-06-13T18:11:40.120Z</updated>
<title type="text">Locales</title>
<link rel="alternate" type="text/html" href="http://base.google.com"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml"
href="http://www.google.com/base/feeds/locales/"/>
<link rel="self" type="application/atom+xml" href="http://www.google.com/base/feeds/locales/"/>
<author>
<name>Google Inc.</name>
<email>[email protected]</email>
</author>
<generator version="1.0" uri="http://base.google.com">GoogleBase</generator>
<openSearch:totalResults>3</openSearch:totalResults>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>http://www.google.com/base/feeds/locales/en_US</id>
<updated>2006-03-27T22:27:36.658Z</updated>
<category scheme="http://base.google.com/categories/locales" term="en_US"/>
<title type="text">en_US</title>
<content type="text">en_US</content>
<link rel="self" type="application/atom+xml"
href="http://www.google.com/base/feeds/locales/en_US"></link>
<link rel="related" type="application/atom+xml"
href="http://www.google.com/base/feeds/itemtypes/en_US" title="Item types in en_US"/>
</entry>
<entry>
<id>http://www.google.com/base/feeds/locales/en_GB</id>
<updated>2006-06-13T18:14:18.601Z</updated>
<category scheme="http://base.google.com/categories/locales" term="en_GB"/>
<title type="text">en_GB</title>
<content type="text">en_GB</content>
<link rel="related" type="application/atom+xml"
href="http://www.google.com/base/feeds/itemtypes/en_GB" title="Item types in en_GB"/>
<link rel="self" type="application/atom+xml"
href="http://www.google.com/base/feeds/locales/en_GB"/>
</entry>
<entry>
<id>http://www.google.com/base/feeds/locales/de_DE</id>
<updated>2006-06-13T18:14:18.601Z</updated>
<category scheme="http://base.google.com/categories/locales" term="de_DE"/>
<title type="text">de_DE</title>
<content type="text">de_DE</content>
<link rel="related" type="application/atom+xml"
href="http://www.google.com/base/feeds/itemtypes/de_DE" title="Item types in de_DE"/>
<link rel="self" type="application/atom+xml"
href="http://www.google.com/base/feeds/locales/de_DE"/>
</entry>
</feed>"""
GBASE_STRING_ENCODING_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom' xmlns:gm='http://base.google.com/ns-metadata/1.0'
xmlns:g='http://base.google.com/ns/1.0' xmlns:batch='http://schemas.google.com/gdata/batch'>
<id>http://www.google.com/base/feeds/snippets/17495780256183230088</id>
<published>2007-12-09T03:13:07.000Z</published>
<updated>2008-01-07T03:26:46.000Z</updated>
<category scheme='http://base.google.com/categories/itemtypes' term='Products'/>
<title type='text'>Digital Camera Cord Fits SONY Cybershot DSC-R1 S40</title>
<content type='html'>SONY \xC2\xB7 Cybershot Digital Camera Usb Cable DESCRIPTION
This is a 2.5 USB 2.0 A to Mini B (5 Pin) high quality digital camera
cable used for connecting your Sony Digital Cameras and Camcoders. Backward
Compatible with USB 2.0, 1.0 and 1.1. Fully ...</content>
<link rel='alternate' type='text/html'
href='http://adfarm.mediaplex.com/ad/ck/711-5256-8196-2?loc=http%3A%2F%2Fcgi.ebay.com%2FDigital-Camera-Cord-Fits-SONY-Cybershot-DSC-R1-S40_W0QQitemZ270195049057QQcmdZViewItem'/>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/base/feeds/snippets/17495780256183230088'/>
<author>
<name>eBay</name>
</author>
<g:item_type type='text'>Products</g:item_type>
<g:item_language type='text'>EN</g:item_language>
<g:target_country type='text'>US</g:target_country>
<g:price type='floatUnit'>0.99 usd</g:price>
<g:image_link type='url'>http://thumbs.ebaystatic.com/pict/270195049057_1.jpg</g:image_link>
<g:category type='text'>Cameras & Photo>Digital Camera Accessories>Cables</g:category>
<g:category type='text'>Cords & Connectors>USB Cables>For Other Brands</g:category>
<g:customer_id type='int'>11729</g:customer_id>
<g:id type='text'>270195049057</g:id>
<g:expiration_date type='dateTime'>2008-02-06T03:26:46Z</g:expiration_date>
</entry>"""
RECURRENCE_EXCEPTION_ENTRY = """<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gd='http://schemas.google.com/g/2005'
xmlns:gCal='http://schemas.google.com/gCal/2005'>
<id>
http://www.google.com/calendar/feeds/default/private/composite/i7lgfj69mjqjgnodklif3vbm7g</id>
<published>2007-04-05T21:51:49.000Z</published>
<updated>2007-04-05T21:51:49.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>testDavid</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=aTdsZ2ZqNjltanFqZ25vZGtsaWYzdmJtN2dfMjAwNzA0MDNUMTgwMDAwWiBnZGF0YS5vcHMudGVzdEBt'
title='alternate'></link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/default/private/composite/i7lgfj69mjqjgnodklif3vbm7g'>
</link>
<author>
<name>gdata ops</name>
<email>[email protected]</email>
</author>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gCal:sendEventNotifications value='true'>
</gCal:sendEventNotifications>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.confirmed'>
</gd:eventStatus>
<gd:recurrence>DTSTART;TZID=America/Anchorage:20070403T100000
DTEND;TZID=America/Anchorage:20070403T110000
RRULE:FREQ=DAILY;UNTIL=20070408T180000Z;WKST=SU
EXDATE;TZID=America/Anchorage:20070407T100000
EXDATE;TZID=America/Anchorage:20070405T100000
EXDATE;TZID=America/Anchorage:20070404T100000 BEGIN:VTIMEZONE
TZID:America/Anchorage X-LIC-LOCATION:America/Anchorage
BEGIN:STANDARD TZOFFSETFROM:-0800 TZOFFSETTO:-0900 TZNAME:AKST
DTSTART:19701025T020000 RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU
END:STANDARD BEGIN:DAYLIGHT TZOFFSETFROM:-0900 TZOFFSETTO:-0800
TZNAME:AKDT DTSTART:19700405T020000
RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU END:DAYLIGHT
END:VTIMEZONE</gd:recurrence>
<gd:where valueString=''></gd:where>
<gd:reminder minutes='10'></gd:reminder>
<gd:recurrenceException specialized='true'>
<gd:entryLink>
<entry>
<id>i7lgfj69mjqjgnodklif3vbm7g_20070407T180000Z</id>
<published>2007-04-05T21:51:49.000Z</published>
<updated>2007-04-05T21:52:58.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#event'></category>
<title type='text'>testDavid</title>
<content type='text'></content>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/event?eid=aTdsZ2ZqNjltanFqZ25vZGtsaWYzdmJtN2dfMjAwNzA0MDdUMTgwMDAwWiBnZGF0YS5vcHMudGVzdEBt'
title='alternate'></link>
<author>
<name>gdata ops</name>
<email>[email protected]</email>
</author>
<gd:visibility value='http://schemas.google.com/g/2005#event.default'>
</gd:visibility>
<gd:originalEvent id='i7lgfj69mjqjgnodklif3vbm7g'
href='http://www.google.com/calendar/feeds/default/private/composite/i7lgfj69mjqjgnodklif3vbm7g'>
<gd:when startTime='2007-04-07T13:00:00.000-05:00'>
</gd:when>
</gd:originalEvent>
<gCal:sendEventNotifications value='false'>
</gCal:sendEventNotifications>
<gd:transparency value='http://schemas.google.com/g/2005#event.opaque'>
</gd:transparency>
<gd:eventStatus value='http://schemas.google.com/g/2005#event.canceled'>
</gd:eventStatus>
<gd:comments>
<gd:feedLink href='http://www.google.com/calendar/feeds/default/private/full/i7lgfj69mjqjgnodklif3vbm7g_20070407T180000Z/comments'>
<feed>
<updated>2007-04-05T21:54:09.285Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/g/2005#message'>
</category>
<title type='text'>Comments for: testDavid</title>
<link rel='alternate' type='text/html'
href='http://www.google.com/calendar/feeds/default/private/full/i7lgfj69mjqjgnodklif3vbm7g_20070407T180000Z/comments'
title='alternate'></link>
</feed>
</gd:feedLink>
</gd:comments>
<gd:when startTime='2007-04-07T13:00:00.000-05:00'
endTime='2007-04-07T14:00:00.000-05:00'>
<gd:reminder minutes='10'></gd:reminder>
</gd:when>
<gd:where valueString=''></gd:where>
</entry>
</gd:entryLink>
</gd:recurrenceException>
</entry>"""
NICK_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<atom:entry xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>https://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname'/>
<atom:title type="text">Foo</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo"/>
<atom:link rel="edit" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo"/>
<apps:nickname name="Foo"/>
<apps:login userName="TestUser"/>
</atom:entry>"""
NICK_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:apps="http://schemas.google.com/apps/2006">
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/nickname/2.0
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname'/>
<atom:title type="text">Nicknames for user SusanJones</atom:title>
<atom:link rel='http://schemas.google.com/g/2005#feed'
type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0"/>
<atom:link rel='http://schemas.google.com/g/2005#post'
type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0"/>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0?username=TestUser"/>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>2</openSearch:itemsPerPage>
<atom:entry>
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo
</atom:id>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname'/>
<atom:title type="text">Foo</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo"/>
<apps:nickname name="Foo"/>
<apps:login userName="TestUser"/>
</atom:entry>
<atom:entry>
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/suse
</atom:id>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname'/>
<atom:title type="text">suse</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Bar"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Bar"/>
<apps:nickname name="Bar"/>
<apps:login userName="TestUser"/>
</atom:entry>
</atom:feed>"""
USER_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<atom:entry xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>https://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#user'/>
<atom:title type="text">TestUser</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser"/>
<atom:link rel="edit" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser"/>
<apps:login userName="TestUser" password="password" suspended="false"
ipWhitelisted='false' hashFunctionName="SHA-1"/>
<apps:name familyName="Test" givenName="User"/>
<apps:quota limit="1024"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.nicknames'
href="https://apps-apis.google.com/a/feeds/example.com/nickname/2.0?username=Test-3121"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.emailLists'
href="https://apps-apis.google.com/a/feeds/example.com/emailList/[email protected]"/>
</atom:entry>"""
USER_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/user/2.0
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#user'/>
<atom:title type="text">Users</atom:title>
<atom:link rel="next" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/user/2.0?startUsername=john"/>
<atom:link rel='http://schemas.google.com/g/2005#feed'
type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/user/2.0"/>
<atom:link rel='http://schemas.google.com/g/2005#post'
type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/user/2.0"/>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/user/2.0"/>
<openSearch:startIndex>1</openSearch:startIndex>
<atom:entry>
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser
</atom:id>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#user'/>
<atom:title type="text">TestUser</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser"/>
<gd:who rel='http://schemas.google.com/apps/2006#user.recipient'
email="[email protected]"/>
<apps:login userName="TestUser" suspended="false"/>
<apps:quota limit="2048"/>
<apps:name familyName="Test" givenName="User"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.nicknames'
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0?username=TestUser"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.emailLists'
href="http://apps-apis.google.com/a/feeds/example.com/emailList/[email protected]"/>
</atom:entry>
<atom:entry>
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/user/2.0/JohnSmith
</atom:id>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#user'/>
<atom:title type="text">JohnSmith</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/user/2.0/JohnSmith"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/user/2.0/JohnSmith"/>
<gd:who rel='http://schemas.google.com/apps/2006#user.recipient'
email="[email protected]"/>
<apps:login userName="JohnSmith" suspended="false"/>
<apps:quota limit="2048"/>
<apps:name familyName="Smith" givenName="John"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.nicknames'
href="http://apps-apis.google.com/a/feeds/example.com/nickname/2.0?username=JohnSmith"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.emailLists'
href="http://apps-apis.google.com/a/feeds/example.com/emailList/[email protected]"/>
</atom:entry>
</atom:feed>"""
EMAIL_LIST_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<atom:entry xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>
https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/testlist
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList'/>
<atom:title type="text">testlist</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/testlist"/>
<atom:link rel="edit" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/testlist"/>
<apps:emailList name="testlist"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#emailList.recipients'
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/testlist/recipient/"/>
</atom:entry>"""
EMAIL_LIST_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/emailList/2.0
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList'/>
<atom:title type="text">EmailLists</atom:title>
<atom:link rel="next" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0?startEmailListName=john"/>
<atom:link rel='http://schemas.google.com/g/2005#feed'
type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0"/>
<atom:link rel='http://schemas.google.com/g/2005#post'
type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0"/>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0"/>
<openSearch:startIndex>1</openSearch:startIndex>
<atom:entry>
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList'/>
<atom:title type="text">us-sales</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales"/>
<apps:emailList name="us-sales"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#emailList.recipients'
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/"/>
</atom:entry>
<atom:entry>
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-eng
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList'/>
<atom:title type="text">us-eng</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-eng"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-eng"/>
<apps:emailList name="us-eng"/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#emailList.recipients'
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-eng/recipient/"/>
</atom:entry>
</atom:feed>"""
EMAIL_LIST_RECIPIENT_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<atom:entry xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:apps="http://schemas.google.com/apps/2006"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/TestUser%40example.com</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList.recipient'/>
<atom:title type="text">TestUser</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/TestUser%40example.com"/>
<atom:link rel="edit" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/TestUser%40example.com"/>
<gd:who email="[email protected]"/>
</atom:entry>"""
EMAIL_LIST_RECIPIENT_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gd="http://schemas.google.com/g/2005">
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList.recipient'/>
<atom:title type="text">Recipients for email list us-sales</atom:title>
<atom:link rel="next" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/[email protected]"/>
<atom:link rel='http://schemas.google.com/g/2005#feed'
type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient"/>
<atom:link rel='http://schemas.google.com/g/2005#post'
type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient"/>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient"/>
<openSearch:startIndex>1</openSearch:startIndex>
<atom:entry>
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/joe%40example.com
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList.recipient'/>
<atom:title type="text">[email protected]</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/joe%40example.com"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/joe%40example.com"/>
<gd:who email="[email protected]"/>
</atom:entry>
<atom:entry>
<atom:id>
http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/susan%40example.com
</atom:id>
<atom:updated>1970-01-01T00:00:00.000Z</atom:updated>
<atom:category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#emailList.recipient'/>
<atom:title type="text">[email protected]</atom:title>
<atom:link rel="self" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/susan%40example.com"/>
<atom:link rel="edit" type="application/atom+xml"
href="http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/susan%40example.com"/>
<gd:who email="[email protected]"/>
</atom:entry>
</atom:feed>"""
ACL_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gAcl='http://schemas.google.com/acl/2007'>
<id>http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full</id>
<updated>2007-04-21T00:52:04.000Z</updated>
<title type='text'>Elizabeth Bennet's access control list</title>
<link rel='http://schemas.google.com/acl/2007#controlledObject'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/private/full'>
</link>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full'>
</link>
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full'>
</link>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full'>
</link>
<generator version='1.0'
uri='http://www.google.com/calendar'>Google Calendar</generator>
<openSearch:totalResults>2</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com</id>
<updated>2007-04-21T00:52:04.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/acl/2007#accessRule'>
</category>
<title type='text'>owner</title>
<content type='text'></content>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com'>
</link>
<author>
<name>Elizabeth Bennet</name>
<email>[email protected]</email>
</author>
<gAcl:scope type='user' value='[email protected]'></gAcl:scope>
<gAcl:role value='http://schemas.google.com/gCal/2005#owner'>
</gAcl:role>
</entry>
<entry>
<id>http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default</id>
<updated>2007-04-21T00:52:04.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/acl/2007#accessRule'>
</category>
<title type='text'>read</title>
<content type='text'></content>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default'>
</link>
<author>
<name>Elizabeth Bennet</name>
<email>[email protected]</email>
</author>
<gAcl:scope type='default'></gAcl:scope>
<gAcl:role value='http://schemas.google.com/gCal/2005#read'>
</gAcl:role>
</entry>
</feed>"""
ACL_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gd='http://schemas.google.com/g/2005' xmlns:gCal='http://schemas.google.com/gCal/2005' xmlns:gAcl='http://schemas.google.com/acl/2007'>
<id>http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com</id>
<updated>2007-04-21T00:52:04.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/acl/2007#accessRule'>
</category>
<title type='text'>owner</title>
<content type='text'></content>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com'>
</link>
<author>
<name>Elizabeth Bennet</name>
<email>[email protected]</email>
</author>
<gAcl:scope type='user' value='[email protected]'></gAcl:scope>
<gAcl:role value='http://schemas.google.com/gCal/2005#owner'>
</gAcl:role>
</entry>"""
DOCUMENT_LIST_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<ns0:feed xmlns:ns0="http://www.w3.org/2005/Atom" xmlns:ns2="http://schemas.google.com/g/2005" xmlns:ns3="http://schemas.google.com/docs/2007"><ns1:totalResults
xmlns:ns1="http://a9.com/-/spec/opensearchrss/1.0/">2</ns1:totalResults><ns1:startIndex
xmlns:ns1="http://a9.com/-/spec/opensearchrss/1.0/">1</ns1:startIndex><ns0:entry><ns0:content
src="https://foo.com/fm?fmcmd=102&key=supercalifragilisticexpeadocious"
type="text/html"
/><ns0:author><ns0:name>test.user</ns0:name><ns0:email>[email protected]</ns0:email></ns0:author><ns0:category
label="spreadsheet" scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/docs/2007#spreadsheet"
/><ns0:id>https://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpeadocious</ns0:id><ns0:link
href="https://foo.com/ccc?key=supercalifragilisticexpeadocious" rel="alternate"
type="text/html" /><ns0:link
href="https://foo.com/feeds/worksheets/supercalifragilisticexpeadocious/private/full"
rel="http://schemas.google.com/spreadsheets/2006#worksheetsfeed"
type="application/atom+xml" /><ns0:link
href="https://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpeadocious"
rel="self" type="application/atom+xml" /><ns0:title type="text">Test Spreadsheet</ns0:title><ns0:updated>2007-07-03T18:03:32.045Z</ns0:updated>
<ns2:feedLink href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3Afoofoofoo" rel="http://schemas.google.com/acl/2007#accessControlList"/>
<ns2:resourceId>document:dfrkj84g_3348jbxpxcd</ns2:resourceId>
<ns2:lastModifiedBy>
<ns0:name>test.user</ns0:name>
<ns0:email>[email protected]</ns0:email>
</ns2:lastModifiedBy>
<ns2:lastViewed>2009-03-05T07:48:21.493Z</ns2:lastViewed>
<ns3:writersCanInvite value='true'/>
</ns0:entry><ns0:entry><ns0:content
src="http://docs.google.com/RawDocContents?action=fetch&docID=gr00vy"
type="text/html"
/><ns0:author><ns0:name>test.user</ns0:name><ns0:email>[email protected]</ns0:email></ns0:author><ns0:category
label="document" scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/docs/2007#document"
/><ns0:id>http://docs.google.com/feeds/documents/private/full/document%3Agr00vy</ns0:id><ns0:link
href="http://foobar.com/Doc?id=gr00vy" rel="alternate" type="text/html"
/><ns0:link
href="http://docs.google.com/feeds/documents/private/full/document%3Agr00vy"
rel="self" type="application/atom+xml" /><ns0:title type="text">Test Document</ns0:title><ns0:updated>2007-07-03T18:02:50.338Z</ns0:updated>
<ns2:feedLink href="http://docs.google.com/feeds/acl/private/full/document%3Afoofoofoo" rel="http://schemas.google.com/acl/2007#accessControlList"/>
<ns2:lastModifiedBy>
<ns0:name>test.user</ns0:name>
<ns0:email>[email protected]</ns0:email>
</ns2:lastModifiedBy>
<ns3:writersCanInvite value='false'/>
<ns2:lastViewed>2009-03-05T07:48:21.493Z</ns2:lastViewed>
</ns0:entry><ns0:id>http://docs.google.com/feeds/documents/private/full</ns0:id><ns0:link
href="http://docs.google.com" rel="alternate" type="text/html" /><ns0:link
href="http://docs.google.com/feeds/documents/private/full"
rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml"
/><ns0:link href="http://docs.google.com/feeds/documents/private/full"
rel="http://schemas.google.com/g/2005#post" type="application/atom+xml"
/><ns0:link href="http://docs.google.com/feeds/documents/private/full"
rel="self" type="application/atom+xml" /><ns0:title type="text">Available
Documents -
[email protected]</ns0:title><ns0:updated>2007-07-09T23:07:21.898Z</ns0:updated>
</ns0:feed>
"""
DOCUMENT_LIST_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<ns0:entry xmlns:ns0="http://www.w3.org/2005/Atom" xmlns:ns1="http://schemas.google.com/g/2005" xmlns:ns2="http://schemas.google.com/docs/2007"><ns0:content
src="https://foo.com/fm?fmcmd=102&key=supercalifragilisticexpealidocious" type="text/html"/>
<ns0:author><ns0:name>test.user</ns0:name><ns0:email>[email protected]</ns0:email></ns0:author>
<ns0:category label="spreadsheet" scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/docs/2007#spreadsheet"
/><ns0:id>https://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpealidocious</ns0:id>
<ns0:link href="https://foo.com/ccc?key=supercalifragilisticexpealidocious"
rel="alternate" type="text/html" /><ns0:link
href="https://foo.com/feeds/worksheets/supercalifragilisticexpealidocious/private/full"
rel="http://schemas.google.com/spreadsheets/2006#worksheetsfeed"
type="application/atom+xml" /><ns0:link
href="https://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpealidocious"
rel="self" type="application/atom+xml" />
<ns0:title type="text">Test Spreadsheet</ns0:title><ns0:updated>2007-07-03T18:03:32.045Z</ns0:updated>
<ns1:resourceId>spreadsheet:supercalifragilisticexpealidocious</ns1:resourceId>
<ns1:lastModifiedBy>
<ns0:name>test.user</ns0:name>
<ns0:email>[email protected]</ns0:email>
</ns1:lastModifiedBy>
<ns1:lastViewed>2009-03-05T07:48:21.493Z</ns1:lastViewed>
<ns2:writersCanInvite value='true'/>
</ns0:entry>
"""
DOCUMENT_LIST_ENTRY_V3 = """<?xml version='1.0' encoding='UTF-8'?>
<ns0:entry xmlns:ns0="http://www.w3.org/2005/Atom" xmlns:ns1="http://schemas.google.com/g/2005" xmlns:ns2="http://schemas.google.com/docs/2007"><ns0:content
src="https://foo.com/fm?fmcmd=102&key=supercalifragilisticexpealidocious" type="text/html"/>
<ns0:author><ns0:name>test.user</ns0:name><ns0:email>[email protected]</ns0:email></ns0:author>
<ns0:category label="spreadsheet" scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/docs/2007#spreadsheet"
/><ns0:id>https://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpealidocious</ns0:id>
<ns0:link href="https://foo.com/ccc?key=supercalifragilisticexpealidocious"
rel="alternate" type="text/html" /><ns0:link
href="https://foo.com/feeds/worksheets/supercalifragilisticexpealidocious/private/full"
rel="http://schemas.google.com/spreadsheets/2006#worksheetsfeed"
type="application/atom+xml" /><ns0:link
href="https://docs.google.com/feeds/documents/private/full/spreadsheet%3Asupercalifragilisticexpealidocious"
rel="self" type="application/atom+xml" />
<ns0:link rel="http://schemas.google.com/docs/2007#parent" type="application/atom+xml"
href="http://docs.google.com/feeds/default/private/full/folder%3A12345" title="AFolderName" />
<ns0:title type="text">Test Spreadsheet</ns0:title><ns0:updated>2007-07-03T18:03:32.045Z</ns0:updated>
<ns1:resourceId>spreadsheet:supercalifragilisticexpealidocious</ns1:resourceId>
<ns1:lastModifiedBy>
<ns0:name>test.user</ns0:name>
<ns0:email>[email protected]</ns0:email>
</ns1:lastModifiedBy>
<ns1:lastViewed>2009-03-05T07:48:21.493Z</ns1:lastViewed>
<ns2:writersCanInvite value='true'/>
<ns1:quotaBytesUsed>1000</ns1:quotaBytesUsed>
<ns1:feedLink rel="http://schemas.google.com/acl/2007#accessControlList" href="https://docs.google.com/feeds/default/private/full/spreadsheet%3Asupercalifragilisticexpealidocious/acl" />
<ns1:feedLink rel="http://schemas.google.com/docs/2007/revisions" href="https://docs.google.com/feeds/default/private/full/spreadsheet%3Asupercalifragilisticexpealidocious/revisions" />
<ns0:category scheme="http://schemas.google.com/g/2005/labels" term="http://schemas.google.com/g/2005/labels#starred" label="starred"/>
<ns0:category scheme="http://schemas.google.com/g/2005/labels" term="http://schemas.google.com/g/2005/labels#viewed" label="viewed"/>
<ns0:category scheme="http://schemas.google.com/g/2005/labels" term="http://schemas.google.com/g/2005/labels#hidden" label="hidden"/>
<ns0:category scheme="http://schemas.google.com/g/2005/labels" term="http://schemas.google.com/g/2005/labels#trashed" label="trashed"/>
<ns0:category scheme="http://schemas.google.com/g/2005/labels" term="http://schemas.google.com/g/2005/labels#mine" label="mine"/>
<ns0:category scheme="http://schemas.google.com/g/2005/labels" term="http://schemas.google.com/g/2005/labels#private" label="private"/>
<ns0:category scheme="http://schemas.google.com/g/2005/labels" term="http://schemas.google.com/g/2005/labels#shared-with-domain" label="shared-with-domain"/>
<ns0:category scheme="http://schemas.google.com/g/2005/labels" term="http://schemas.google.com/g/2005/labels#restricted-download" label="restricted-download"/>
</ns0:entry>
"""
DOCUMENT_LIST_ACL_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:gAcl='http://schemas.google.com/acl/2007'>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/acl/2007#accessRule'/>
<gAcl:role value='writer'/>
<gAcl:scope type='user' value='[email protected]'/>
</entry>"""
DOCUMENT_LIST_ACL_WITHKEY_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:gAcl='http://schemas.google.com/acl/2007'>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/acl/2007#accessRule'/>
<gAcl:withKey key='somekey'><gAcl:role value='writer' /></gAcl:withKey>
<gAcl:scope type='domain' value='example.com' />
</entry>"""
DOCUMENT_LIST_ACL_ADDITIONAL_ROLE_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:gAcl='http://schemas.google.com/acl/2007'>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/acl/2007#accessRule'/>
<gAcl:additionalRole value='commenter' />
<gAcl:withKey key='somekey'>
<gAcl:role value='writer' />
<gAcl:additionalRole value='commenter' />
</gAcl:withKey>
<gAcl:scope type='domain' value='example.com' />
</entry>"""
DOCUMENT_LIST_ACL_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gAcl="http://schemas.google.com/acl/2007"
xmlns:batch="http://schemas.google.com/gdata/batch">
<id>http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQYCgZpwUQ</id>
<updated>2009-02-22T03:48:25.895Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/acl/2007#accessRule"/>
<title type="text">Document Permissions</title>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQYCgZpwUQ"/>
<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQYCgZpwUQ"/>
<link rel="http://schemas.google.com/g/2005#batch" type="application/atom+xml" href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQYCgZpwUQ/batch"/>
<link rel="self" type="application/atom+xml" href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQYCgZpwUQ"/>
<openSearch:totalResults>2</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQp4pwUwUQ/user%3Auser%40gmail.com</id>
<updated>2009-02-22T03:48:25.896Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/acl/2007#accessRule"/>
<title type="text">Document Permission - [email protected]</title>
<link rel="self" type="application/atom+xml" href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQp4pwUwUQ/user%3Auser%40gmail.com"/>
<link rel="edit" type="application/atom+xml" href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQp4pwUwUQ/user%3Auser%40gmail.com"/>
<gAcl:role value="owner"/>
<gAcl:scope type="user" value="[email protected]"/>
</entry>
<entry>
<id>http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8fCgZp4pwUwUQ/user%3Auser2%40google.com</id>
<updated>2009-02-22T03:48:26.257Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/acl/2007#accessRule"/>
<title type="text">Document Permission - [email protected]</title>
<link rel="self" type="application/atom+xml" href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQYCgZp4pwUwUQ/user%3Auser2%40google.com"/>
<link rel="edit" type="application/atom+xml" href="http://docs.google.com/feeds/acl/private/full/spreadsheet%3ApFrmMi8feTQYCgZp4pwUwUQ/user%3Auser2%40google.com"/>
<gAcl:role value="writer"/>
<gAcl:scope type="domain" value="google.com"/>
</entry>
</feed>"""
DOCUMENT_LIST_REVISION_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/"
xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005"
xmlns:docs="http://schemas.google.com/docs/2007"
gd:etag="W/"CE4HQX08cCt7ImA9WxNTFEU."">
<id>https://docs.google.com/feeds/default/private/full/resource_id/revisions</id>
<updated>2009-08-17T04:22:10.378Z</updated>
<title>Document Revisions</title>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml"
href="https://docs.google.com/feeds/default/private/full/resource_id/revisions"/>
<link rel="http://schemas.google.com/g/2005#batch" type="application/atom+xml"
href="https://docs.google.com/feeds/default/private/full/resource_id/revisions/batch"/>
<link rel="self" type="application/atom+xml"
href="https://docs.google.com/feeds/default/private/full/resource_id/revisions"/>
<openSearch:totalResults>6</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>https://docs.google.com/feeds/id/resource_id/revisions/2</id>
<updated>2009-08-17T04:22:10.440Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-08-14T07:11:34.197Z</app:edited>
<title>Revision 2</title>
<content type="text/html" src="https://docs.google.com/feeds/download/documents/Export?docId=doc_id&revision=2"/>
<link rel="alternate" type="text/html"
href="https://docs.google.com/Doc?id=doc_id&revision=2"/>
<link rel="self" type="application/atom+xml"
href="https://docs.google.com/feeds/default/private/full/resource_id/revisions/2"/>
<link rel='http://schemas.google.com/docs/2007#publish' type='text/html' href='https://docs.google.com/View?docid=dfr4&pageview=1&hgd=1'/>
<author>
<name>another_user</name>
<email>[email protected]</email>
</author>
<docs:publish value="true"/>
<docs:publishAuto value="true"/>
<docs:publishOutsideDomain value="false"/>
</entry>
</feed>
"""
DOCUMENT_LIST_METADATA = """
<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:docs="http://schemas.google.com/docs/2007"
xmlns:gd="http://schemas.google.com/g/2005"
xmlns:gAcl="http://schemas.google.com/acl/2007"
gd:etag="W/"AkYNRnc_eSt7ImA9WxBUFks."">
<docs:additionalRoleInfo kind='document'>
<docs:additionalRoleSet primaryRole='reader'>
<gAcl:additionalRole value='commenter' />
</docs:additionalRoleSet>
</docs:additionalRoleInfo>
</entry>
"""
BATCH_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns="http://www.w3.org/2005/Atom"
xmlns:batch="http://schemas.google.com/gdata/batch"
xmlns:g="http://base.google.com/ns/1.0">
<id>http://www.google.com/base/feeds/items/2173859253842813008</id>
<published>2006-07-11T14:51:43.560Z</published>
<updated>2006-07-11T14:51: 43.560Z</updated>
<title type="text">title</title>
<content type="html">content</content>
<link rel="self"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/2173859253842813008"/>
<link rel="edit"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/2173859253842813008"/>
<g:item_type>recipes</g:item_type>
<batch:operation type="insert"/>
<batch:id>itemB</batch:id>
<batch:status code="201" reason="Created"/>
</entry>"""
BATCH_FEED_REQUEST = """<?xml version="1.0" encoding="UTF-8"?>
<feed
xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:g="http://base.google.com/ns/1.0"
xmlns:batch="http://schemas.google.com/gdata/batch">
<title type="text">My Batch Feed</title>
<entry>
<id>http://www.google.com/base/feeds/items/13308004346459454600</id>
<batch:operation type="delete"/>
</entry>
<entry>
<id>http://www.google.com/base/feeds/items/17437536661927313949</id>
<batch:operation type="delete"/>
</entry>
<entry>
<title type="text">...</title>
<content type="html">...</content>
<batch:id>itemA</batch:id>
<batch:operation type="insert"/>
<g:item_type>recipes</g:item_type>
</entry>
<entry>
<title type="text">...</title>
<content type="html">...</content>
<batch:id>itemB</batch:id>
<batch:operation type="insert"/>
<g:item_type>recipes</g:item_type>
</entry>
</feed>"""
BATCH_FEED_RESULT = """<?xml version="1.0" encoding="UTF-8"?>
<feed
xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:g="http://base.google.com/ns/1.0"
xmlns:batch="http://schemas.google.com/gdata/batch">
<id>http://www.google.com/base/feeds/items</id>
<updated>2006-07-11T14:51:42.894Z</updated>
<title type="text">My Batch</title>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items"/>
<link rel="http://schemas.google.com/g/2005#post"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items"/>
<link rel=" http://schemas.google.com/g/2005#batch"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/batch"/>
<entry>
<id>http://www.google.com/base/feeds/items/2173859253842813008</id>
<published>2006-07-11T14:51:43.560Z</published>
<updated>2006-07-11T14:51: 43.560Z</updated>
<title type="text">...</title>
<content type="html">...</content>
<link rel="self"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/2173859253842813008"/>
<link rel="edit"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/2173859253842813008"/>
<g:item_type>recipes</g:item_type>
<batch:operation type="insert"/>
<batch:id>itemB</batch:id>
<batch:status code="201" reason="Created"/>
</entry>
<entry>
<id>http://www.google.com/base/feeds/items/11974645606383737963</id>
<published>2006-07-11T14:51:43.247Z</published>
<updated>2006-07-11T14:51: 43.247Z</updated>
<title type="text">...</title>
<content type="html">...</content>
<link rel="self"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/11974645606383737963"/>
<link rel="edit"
type="application/atom+xml"
href="http://www.google.com/base/feeds/items/11974645606383737963"/>
<g:item_type>recipes</g:item_type>
<batch:operation type="insert"/>
<batch:id>itemA</batch:id>
<batch:status code="201" reason="Created"/>
</entry>
<entry>
<id>http://www.google.com/base/feeds/items/13308004346459454600</id>
<updated>2006-07-11T14:51:42.894Z</updated>
<title type="text">Error</title>
<content type="text">Bad request</content>
<batch:status code="404"
reason="Bad request"
content-type="application/xml">
<errors>
<error type="request" reason="Cannot find item"/>
</errors>
</batch:status>
</entry>
<entry>
<id>http://www.google.com/base/feeds/items/17437536661927313949</id>
<updated>2006-07-11T14:51:43.246Z</updated>
<content type="text">Deleted</content>
<batch:operation type="delete"/>
<batch:status code="200" reason="Success"/>
</entry>
</feed>"""
ALBUM_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/" xmlns:exif="http://schemas.google.com/photos/exif/2007" xmlns:geo="http://www.w3.org/2003/01/geo/wgs84_pos#" xmlns:gml="http://www.opengis.net/gml" xmlns:georss="http://www.georss.org/georss" xmlns:photo="http://www.pheed.com/pheed/" xmlns:media="http://search.yahoo.com/mrss/" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gphoto="http://schemas.google.com/photos/2007">
<id>http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/1</id>
<updated>2007-09-21T18:23:05.000Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/photos/2007#album"/>
<title type="text">Test</title>
<subtitle type="text"/>
<rights type="text">public</rights>
<icon>http://lh6.google.com/sample.user/Rt8WNoDZEJE/AAAAAAAAABk/HQGlDhpIgWo/s160-c/Test.jpg</icon>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/1"/>
<link rel="alternate" type="text/html" href="http://picasaweb.google.com/sample.user/Test"/>
<link rel="http://schemas.google.com/photos/2007#slideshow" type="application/x-shockwave-flash" href="http://picasaweb.google.com/s/c/bin/slideshow.swf?host=picasaweb.google.com&RGB=0x000000&feed=http%3A%2F%2Fpicasaweb.google.com%2Fdata%2Ffeed%2Fapi%2Fuser%2Fsample.user%2Falbumid%2F1%3Falt%3Drss"/>
<link rel="self" type="application/atom+xml" href="http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/1?start-index=1&max-results=500&kind=photo%2Ctag"/>
<author>
<name>sample</name>
<uri>http://picasaweb.google.com/sample.user</uri>
</author>
<generator version="1.00" uri="http://picasaweb.google.com/">Picasaweb</generator> <openSearch:totalResults>4</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>500</openSearch:itemsPerPage>
<gphoto:id>1</gphoto:id>
<gphoto:name>Test</gphoto:name>
<gphoto:location/>
<gphoto:access>public</gphoto:access> <gphoto:timestamp>1188975600000</gphoto:timestamp>
<gphoto:numphotos>2</gphoto:numphotos>
<gphoto:user>sample.user</gphoto:user>
<gphoto:nickname>sample</gphoto:nickname>
<gphoto:commentingEnabled>true</gphoto:commentingEnabled>
<gphoto:commentCount>0</gphoto:commentCount>
<entry> <id>http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/2</id>
<published>2007-09-05T20:49:23.000Z</published>
<updated>2007-09-21T18:23:05.000Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/photos/2007#photo"/>
<title type="text">Aqua Blue.jpg</title>
<summary type="text">Blue</summary>
<content type="image/jpeg" src="http://lh4.google.com/sample.user/Rt8WU4DZEKI/AAAAAAAAABY/IVgLqmnzJII/Aqua%20Blue.jpg"/> <link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/1/photoid/2"/>
<link rel="alternate" type="text/html" href="http://picasaweb.google.com/sample.user/Test/photo#2"/>
<link rel="self" type="application/atom+xml" href="http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/2"/>
<gphoto:id>2</gphoto:id>
<gphoto:version>1190398985145172</gphoto:version>
<gphoto:position>0.0</gphoto:position>
<gphoto:albumid>1</gphoto:albumid> <gphoto:width>2560</gphoto:width>
<gphoto:height>1600</gphoto:height>
<gphoto:size>883405</gphoto:size>
<gphoto:client/>
<gphoto:checksum/>
<gphoto:timestamp>1189025362000</gphoto:timestamp>
<exif:tags> <exif:flash>true</exif:flash>
<exif:imageUniqueID>c041ce17aaa637eb656c81d9cf526c24</exif:imageUniqueID>
</exif:tags>
<gphoto:commentingEnabled>true</gphoto:commentingEnabled>
<gphoto:commentCount>1</gphoto:commentCount>
<media:group>
<media:title type="plain">Aqua Blue.jpg</media:title> <media:description type="plain">Blue</media:description>
<media:keywords>tag, test</media:keywords>
<media:content url="http://lh4.google.com/sample.user/Rt8WU4DZEKI/AAAAAAAAABY/IVgLqmnzJII/Aqua%20Blue.jpg" height="1600" width="2560" type="image/jpeg" medium="image"/>
<media:thumbnail url="http://lh4.google.com/sample.user/Rt8WU4DZEKI/AAAAAAAAABY/IVgLqmnzJII/s72/Aqua%20Blue.jpg" height="45" width="72"/>
<media:thumbnail url="http://lh4.google.com/sample.user/Rt8WU4DZEKI/AAAAAAAAABY/IVgLqmnzJII/s144/Aqua%20Blue.jpg" height="90" width="144"/>
<media:thumbnail url="http://lh4.google.com/sample.user/Rt8WU4DZEKI/AAAAAAAAABY/IVgLqmnzJII/s288/Aqua%20Blue.jpg" height="180" width="288"/>
<media:credit>sample</media:credit>
</media:group>
</entry>
<entry>
<id>http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/3</id>
<published>2007-09-05T20:49:24.000Z</published>
<updated>2007-09-21T18:19:38.000Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/photos/2007#photo"/>
<title type="text">Aqua Graphite.jpg</title>
<summary type="text">Gray</summary>
<content type="image/jpeg" src="http://lh5.google.com/sample.user/Rt8WVIDZELI/AAAAAAAAABg/d7e0i7gvhNU/Aqua%20Graphite.jpg"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/1/photoid/3"/>
<link rel="alternate" type="text/html" href="http://picasaweb.google.com/sample.user/Test/photo#3"/>
<link rel="self" type="application/atom+xml" href="http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/3"/>
<gphoto:id>3</gphoto:id>
<gphoto:version>1190398778006402</gphoto:version>
<gphoto:position>1.0</gphoto:position>
<gphoto:albumid>1</gphoto:albumid>
<gphoto:width>2560</gphoto:width>
<gphoto:height>1600</gphoto:height>
<gphoto:size>798334</gphoto:size>
<gphoto:client/>
<gphoto:checksum/>
<gphoto:timestamp>1189025363000</gphoto:timestamp>
<exif:tags>
<exif:flash>true</exif:flash>
<exif:imageUniqueID>a5ce2e36b9df7d3cb081511c72e73926</exif:imageUniqueID>
</exif:tags>
<gphoto:commentingEnabled>true</gphoto:commentingEnabled>
<gphoto:commentCount>0</gphoto:commentCount>
<media:group>
<media:title type="plain">Aqua Graphite.jpg</media:title>
<media:description type="plain">Gray</media:description>
<media:keywords/>
<media:content url="http://lh5.google.com/sample.user/Rt8WVIDZELI/AAAAAAAAABg/d7e0i7gvhNU/Aqua%20Graphite.jpg" height="1600" width="2560" type="image/jpeg" medium="image"/>
<media:thumbnail url="http://lh5.google.com/sample.user/Rt8WVIDZELI/AAAAAAAAABg/d7e0i7gvhNU/s72/Aqua%20Graphite.jpg" height="45" width="72"/>
<media:thumbnail url="http://lh5.google.com/sample.user/Rt8WVIDZELI/AAAAAAAAABg/d7e0i7gvhNU/s144/Aqua%20Graphite.jpg" height="90" width="144"/>
<media:thumbnail url="http://lh5.google.com/sample.user/Rt8WVIDZELI/AAAAAAAAABg/d7e0i7gvhNU/s288/Aqua%20Graphite.jpg" height="180" width="288"/>
<media:credit>sample</media:credit>
</media:group>
</entry>
<entry>
<id>http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/tag/tag</id>
<updated>2007-09-05T20:49:24.000Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/photos/2007#tag"/>
<title type="text">tag</title>
<summary type="text">tag</summary>
<link rel="alternate" type="text/html" href="http://picasaweb.google.com/lh/searchbrowse?q=tag&psc=G&uname=sample.user&filter=0"/>
<link rel="self" type="application/atom+xml" href="http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/tag/tag"/>
<author>
<name>sample</name>
<uri>http://picasaweb.google.com/sample.user</uri>
</author>
</entry>
<entry>
<id>http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/tag/test</id>
<updated>2007-09-05T20:49:24.000Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/photos/2007#tag"/>
<title type="text">test</title>
<summary type="text">test</summary>
<link rel="alternate" type="text/html" href="http://picasaweb.google.com/lh/searchbrowse?q=test&psc=G&uname=sample.user&filter=0"/>
<link rel="self" type="application/atom+xml" href="http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/tag/test"/>
<author>
<name>sample</name>
<uri>http://picasaweb.google.com/sample.user</uri>
</author>
</entry>
</feed>"""
CODE_SEARCH_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:opensearch="http://a9.com/-/spec/opensearchrss/1.0/" xmlns:gcs="http://schemas.google.com/codesearch/2006" xml:base="http://www.google.com">
<id>http://www.google.com/codesearch/feeds/search?q=malloc</id>
<updated>2007-12-19T16:08:04Z</updated>
<title type="text">Google Code Search</title>
<generator version="1.0" uri="http://www.google.com/codesearch">Google Code Search</generator>
<opensearch:totalResults>2530000</opensearch:totalResults>
<opensearch:startIndex>1</opensearch:startIndex>
<author>
<name>Google Code Search</name>
<uri>http://www.google.com/codesearch</uri>
</author>
<link rel="http://schemas.google.com/g/2006#feed" type="application/atom+xml" href="http://schemas.google.com/codesearch/2006"/>
<link rel="self" type="application/atom+xml" href="http://www.google.com/codesearch/feeds/search?q=malloc"/>
<link rel="next" type="application/atom+xml" href="http://www.google.com/codesearch/feeds/search?q=malloc&start-index=11"/>
<link rel="alternate" type="text/html" href="http://www.google.com/codesearch?q=malloc"/>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:LDjwp-Iqc7U:84hEYaYsZk8:xDGReDhvNi0&sa=N&ct=rx&cd=1&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoconf-2.60/autoconf.html-002&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoconf-2.60/autoconf.html-002#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">software/autoconf/manual/autoconf-2.60/autoconf.html</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:LDjwp-Iqc7U:84hEYaYsZk8:xDGReDhvNi0&sa=N&ct=rx&cd=1&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoconf-2.60/autoconf.html-002&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoconf-2.60/autoconf.html-002#first"/><gcs:package name="http://www.gnu.org" uri="http://www.gnu.org"></gcs:package><gcs:file name="software/autoconf/manual/autoconf-2.60/autoconf.html-002"></gcs:file><content type="text/html"><pre> 8: void *<b>malloc</b> ();
</pre></content><gcs:match lineNumber="4" type="text/html"><pre> #undef <b>malloc</b>
</pre></gcs:match><gcs:match lineNumber="8" type="text/html"><pre> void *<b>malloc</b> ();
</pre></gcs:match><gcs:match lineNumber="14" type="text/html"><pre> rpl_<b>malloc</b> (size_t n)
</pre></gcs:match><gcs:match lineNumber="18" type="text/html"><pre> return <b>malloc</b> (n);
</pre></gcs:match></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:h4hfh-fV-jI:niBq_bwWZNs:H0OhClf0HWQ&sa=N&ct=rx&cd=2&cs_p=ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz&cs_f=guile-1.6.8/libguile/mallocs.c&cs_p=ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz&cs_f=guile-1.6.8/libguile/mallocs.c#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">guile-1.6.8/libguile/mallocs.c</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:h4hfh-fV-jI:niBq_bwWZNs:H0OhClf0HWQ&sa=N&ct=rx&cd=2&cs_p=ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz&cs_f=guile-1.6.8/libguile/mallocs.c&cs_p=ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz&cs_f=guile-1.6.8/libguile/mallocs.c#first"/><gcs:package name="ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz" uri="ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz"></gcs:package><gcs:file name="guile-1.6.8/libguile/mallocs.c"></gcs:file><content type="text/html"><pre> 86: {
scm_t_bits mem = n ? (scm_t_bits) <b>malloc</b> (n) : 0;
if (n &amp;&amp; !mem)
</pre></content><gcs:match lineNumber="54" type="text/html"><pre>#include &lt;<b>malloc</b>.h&gt;
</pre></gcs:match><gcs:match lineNumber="62" type="text/html"><pre>scm_t_bits scm_tc16_<b>malloc</b>;
</pre></gcs:match><gcs:match lineNumber="66" type="text/html"><pre><b>malloc</b>_free (SCM ptr)
</pre></gcs:match><gcs:match lineNumber="75" type="text/html"><pre><b>malloc</b>_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
</pre></gcs:match><gcs:match lineNumber="77" type="text/html"><pre> scm_puts(&quot;#&lt;<b>malloc</b> &quot;, port);
</pre></gcs:match><gcs:match lineNumber="87" type="text/html"><pre> scm_t_bits mem = n ? (scm_t_bits) <b>malloc</b> (n) : 0;
</pre></gcs:match><gcs:match lineNumber="90" type="text/html"><pre> SCM_RETURN_NEWSMOB (scm_tc16_<b>malloc</b>, mem);
</pre></gcs:match><gcs:match lineNumber="98" type="text/html"><pre> scm_tc16_<b>malloc</b> = scm_make_smob_type (&quot;<b>malloc</b>&quot;, 0);
</pre></gcs:match><gcs:match lineNumber="99" type="text/html"><pre> scm_set_smob_free (scm_tc16_<b>malloc</b>, <b>malloc</b>_free);
</pre></gcs:match><rights>GPL</rights></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:9wyZUG-N_30:7_dFxoC1ZrY:C0_iYbFj90M&sa=N&ct=rx&cd=3&cs_p=http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz&cs_f=bash-3.0/lib/malloc/alloca.c&cs_p=http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz&cs_f=bash-3.0/lib/malloc/alloca.c#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">bash-3.0/lib/malloc/alloca.c</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:9wyZUG-N_30:7_dFxoC1ZrY:C0_iYbFj90M&sa=N&ct=rx&cd=3&cs_p=http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz&cs_f=bash-3.0/lib/malloc/alloca.c&cs_p=http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz&cs_f=bash-3.0/lib/malloc/alloca.c#first"/><gcs:package name="http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz" uri="http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz"></gcs:package><gcs:file name="bash-3.0/lib/malloc/alloca.c"></gcs:file><content type="text/html"><pre> 78: #ifndef emacs
#define <b>malloc</b> x<b>malloc</b>
extern pointer x<b>malloc</b> ();
</pre></content><gcs:match lineNumber="69" type="text/html"><pre> <b>malloc</b>. The Emacs executable needs alloca to call x<b>malloc</b>, because
</pre></gcs:match><gcs:match lineNumber="70" type="text/html"><pre> ordinary <b>malloc</b> isn&#39;t protected from input signals. On the other
</pre></gcs:match><gcs:match lineNumber="71" type="text/html"><pre> hand, the utilities in lib-src need alloca to call <b>malloc</b>; some of
</pre></gcs:match><gcs:match lineNumber="72" type="text/html"><pre> them are very simple, and don&#39;t have an x<b>malloc</b> routine.
</pre></gcs:match><gcs:match lineNumber="76" type="text/html"><pre> Callers below should use <b>malloc</b>. */
</pre></gcs:match><gcs:match lineNumber="79" type="text/html"><pre>#define <b>malloc</b> x<b>malloc</b>
</pre></gcs:match><gcs:match lineNumber="80" type="text/html"><pre>extern pointer x<b>malloc</b> ();
</pre></gcs:match><gcs:match lineNumber="132" type="text/html"><pre> It is very important that sizeof(header) agree with <b>malloc</b>
</pre></gcs:match><gcs:match lineNumber="198" type="text/html"><pre> register pointer new = <b>malloc</b> (sizeof (header) + size);
</pre></gcs:match><rights>GPL</rights></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:uhVCKyPcT6k:8juMxxzmUJw:H7_IDsTB2L4&sa=N&ct=rx&cd=4&cs_p=http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2&cs_f=mozilla/xpcom/build/malloc.c&cs_p=http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2&cs_f=mozilla/xpcom/build/malloc.c#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">mozilla/xpcom/build/malloc.c</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:uhVCKyPcT6k:8juMxxzmUJw:H7_IDsTB2L4&sa=N&ct=rx&cd=4&cs_p=http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2&cs_f=mozilla/xpcom/build/malloc.c&cs_p=http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2&cs_f=mozilla/xpcom/build/malloc.c#first"/><gcs:package name="http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2" uri="http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2"></gcs:package><gcs:file name="mozilla/xpcom/build/malloc.c"></gcs:file><content type="text/html"><pre> 54: http://gee.cs.oswego.edu/dl/html/<b>malloc</b>.html
You may already by default be using a c library containing a <b>malloc</b>
</pre></content><gcs:match lineNumber="4" type="text/html"><pre>/* ---------- To make a <b>malloc</b>.h, start cutting here ------------ */
</pre></gcs:match><gcs:match lineNumber="22" type="text/html"><pre> Note: There may be an updated version of this <b>malloc</b> obtainable at
</pre></gcs:match><gcs:match lineNumber="23" type="text/html"><pre> ftp://gee.cs.oswego.edu/pub/misc/<b>malloc</b>.c
</pre></gcs:match><gcs:match lineNumber="34" type="text/html"><pre>* Why use this <b>malloc</b>?
</pre></gcs:match><gcs:match lineNumber="37" type="text/html"><pre> most tunable <b>malloc</b> ever written. However it is among the fastest
</pre></gcs:match><gcs:match lineNumber="40" type="text/html"><pre> allocator for <b>malloc</b>-intensive programs.
</pre></gcs:match><gcs:match lineNumber="54" type="text/html"><pre> http://gee.cs.oswego.edu/dl/html/<b>malloc</b>.html
</pre></gcs:match><gcs:match lineNumber="56" type="text/html"><pre> You may already by default be using a c library containing a <b>malloc</b>
</pre></gcs:match><gcs:match lineNumber="57" type="text/html"><pre> that is somehow based on some version of this <b>malloc</b> (for example in
</pre></gcs:match><rights>Mozilla</rights></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:4n1P2HVOISs:Ybbpph0wR2M:OhIN_sDrG0U&sa=N&ct=rx&cd=5&cs_p=http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz&cs_f=hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh&cs_p=http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz&cs_f=hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:4n1P2HVOISs:Ybbpph0wR2M:OhIN_sDrG0U&sa=N&ct=rx&cd=5&cs_p=http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz&cs_f=hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh&cs_p=http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz&cs_f=hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh#first"/><gcs:package name="http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz" uri="http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz"></gcs:package><gcs:file name="hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh"></gcs:file><content type="text/html"><pre> 11: echo ================ unit-must-<b>malloc</b> tests ================
./unit-must-<b>malloc</b>
echo ...passed
</pre></content><gcs:match lineNumber="2" type="text/html"><pre># tag: Tom Lord Tue Dec 4 14:54:29 2001 (mem-tests/unit-must-<b>malloc</b>.sh)
</pre></gcs:match><gcs:match lineNumber="11" type="text/html"><pre>echo ================ unit-must-<b>malloc</b> tests ================
</pre></gcs:match><gcs:match lineNumber="12" type="text/html"><pre>./unit-must-<b>malloc</b>
</pre></gcs:match><rights>GPL</rights></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:GzkwiWG266M:ykuz3bG00ws:2sTvVSif08g&sa=N&ct=rx&cd=6&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2&cs_f=tar-1.14/lib/malloc.c&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2&cs_f=tar-1.14/lib/malloc.c#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">tar-1.14/lib/malloc.c</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:GzkwiWG266M:ykuz3bG00ws:2sTvVSif08g&sa=N&ct=rx&cd=6&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2&cs_f=tar-1.14/lib/malloc.c&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2&cs_f=tar-1.14/lib/malloc.c#first"/><gcs:package name="http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2" uri="http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2"></gcs:package><gcs:file name="tar-1.14/lib/malloc.c"></gcs:file><content type="text/html"><pre> 22: #endif
#undef <b>malloc</b>
</pre></content><gcs:match lineNumber="1" type="text/html"><pre>/* Work around bug on some systems where <b>malloc</b> (0) fails.
</pre></gcs:match><gcs:match lineNumber="23" type="text/html"><pre>#undef <b>malloc</b>
</pre></gcs:match><gcs:match lineNumber="31" type="text/html"><pre>rpl_<b>malloc</b> (size_t n)
</pre></gcs:match><gcs:match lineNumber="35" type="text/html"><pre> return <b>malloc</b> (n);
</pre></gcs:match><rights>GPL</rights></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:o_TFIeBY6dY:ktI_dt8wPao:AI03BD1Dz0Y&sa=N&ct=rx&cd=7&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz&cs_f=tar-1.16.1/lib/malloc.c&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz&cs_f=tar-1.16.1/lib/malloc.c#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">tar-1.16.1/lib/malloc.c</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:o_TFIeBY6dY:ktI_dt8wPao:AI03BD1Dz0Y&sa=N&ct=rx&cd=7&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz&cs_f=tar-1.16.1/lib/malloc.c&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz&cs_f=tar-1.16.1/lib/malloc.c#first"/><gcs:package name="http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz" uri="http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz"></gcs:package><gcs:file name="tar-1.16.1/lib/malloc.c"></gcs:file><content type="text/html"><pre> 21: #include &lt;config.h&gt;
#undef <b>malloc</b>
</pre></content><gcs:match lineNumber="1" type="text/html"><pre>/* <b>malloc</b>() function that is glibc compatible.
</pre></gcs:match><gcs:match lineNumber="22" type="text/html"><pre>#undef <b>malloc</b>
</pre></gcs:match><gcs:match lineNumber="30" type="text/html"><pre>rpl_<b>malloc</b> (size_t n)
</pre></gcs:match><gcs:match lineNumber="34" type="text/html"><pre> return <b>malloc</b> (n);
</pre></gcs:match><rights>GPL</rights></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:_ibw-VLkMoI:jBOtIJSmFd4:-0NUEVeCwfY&sa=N&ct=rx&cd=8&cs_p=http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2&cs_f=uClibc-0.9.29/include/malloc.h&cs_p=http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2&cs_f=uClibc-0.9.29/include/malloc.h#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">uClibc-0.9.29/include/malloc.h</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:_ibw-VLkMoI:jBOtIJSmFd4:-0NUEVeCwfY&sa=N&ct=rx&cd=8&cs_p=http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2&cs_f=uClibc-0.9.29/include/malloc.h&cs_p=http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2&cs_f=uClibc-0.9.29/include/malloc.h#first"/><gcs:package name="http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2" uri="http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2"></gcs:package><gcs:file name="uClibc-0.9.29/include/malloc.h"></gcs:file><content type="text/html"><pre> 1: /* Prototypes and definition for <b>malloc</b> implementation.
Copyright (C) 1996, 1997, 1999, 2000 Free Software Foundation, Inc.
</pre></content><gcs:match lineNumber="1" type="text/html"><pre>/* Prototypes and definition for <b>malloc</b> implementation.
</pre></gcs:match><gcs:match lineNumber="26" type="text/html"><pre> `pt<b>malloc</b>&#39;, a <b>malloc</b> implementation for multiple threads without
</pre></gcs:match><gcs:match lineNumber="28" type="text/html"><pre> See the files `pt<b>malloc</b>.c&#39; or `COPYRIGHT&#39; for copying conditions.
</pre></gcs:match><gcs:match lineNumber="32" type="text/html"><pre> This work is mainly derived from <b>malloc</b>-2.6.4 by Doug Lea
</pre></gcs:match><gcs:match lineNumber="35" type="text/html"><pre> ftp://g.oswego.edu/pub/misc/<b>malloc</b>.c
</pre></gcs:match><gcs:match lineNumber="40" type="text/html"><pre> `pt<b>malloc</b>.c&#39;.
</pre></gcs:match><gcs:match lineNumber="45" type="text/html"><pre># define __<b>malloc</b>_ptr_t void *
</pre></gcs:match><gcs:match lineNumber="51" type="text/html"><pre># define __<b>malloc</b>_ptr_t char *
</pre></gcs:match><gcs:match lineNumber="56" type="text/html"><pre># define __<b>malloc</b>_size_t size_t
</pre></gcs:match><rights>LGPL</rights></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:F6qHcZ9vefo:bTX7o9gKfks:hECF4r_eKC0&sa=N&ct=rx&cd=9&cs_p=http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz&cs_f=glibc-2.0.1/hurd/hurdmalloc.h&cs_p=http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz&cs_f=glibc-2.0.1/hurd/hurdmalloc.h#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">glibc-2.0.1/hurd/hurdmalloc.h</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:F6qHcZ9vefo:bTX7o9gKfks:hECF4r_eKC0&sa=N&ct=rx&cd=9&cs_p=http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz&cs_f=glibc-2.0.1/hurd/hurdmalloc.h&cs_p=http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz&cs_f=glibc-2.0.1/hurd/hurdmalloc.h#first"/><gcs:package name="http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz" uri="http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz"></gcs:package><gcs:file name="glibc-2.0.1/hurd/hurdmalloc.h"></gcs:file><content type="text/html"><pre> 15: #define <b>malloc</b> _hurd_<b>malloc</b>
#define realloc _hurd_realloc
</pre></content><gcs:match lineNumber="3" type="text/html"><pre> All hurd-internal code which uses <b>malloc</b> et al includes this file so it
</pre></gcs:match><gcs:match lineNumber="4" type="text/html"><pre> will use the internal <b>malloc</b> routines _hurd_{<b>malloc</b>,realloc,free}
</pre></gcs:match><gcs:match lineNumber="7" type="text/html"><pre> of <b>malloc</b> et al is the unixoid one using sbrk.
</pre></gcs:match><gcs:match lineNumber="11" type="text/html"><pre>extern void *_hurd_<b>malloc</b> (size_t);
</pre></gcs:match><gcs:match lineNumber="15" type="text/html"><pre>#define <b>malloc</b> _hurd_<b>malloc</b>
</pre></gcs:match><rights>GPL</rights></entry>
<entry><id>http://www.google.com/codesearch?hl=en&q=+malloc+show:CHUvHYzyLc8:pdcAfzDA6lY:wjofHuNLTHg&sa=N&ct=rx&cd=10&cs_p=ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2&cs_f=httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h&cs_p=ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2&cs_f=httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h#first</id><updated>2007-12-19T16:08:04Z</updated><author><name>Code owned by external author.</name></author><title type="text">httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h</title><link rel="alternate" type="text/html" href="http://www.google.com/codesearch?hl=en&q=+malloc+show:CHUvHYzyLc8:pdcAfzDA6lY:wjofHuNLTHg&sa=N&ct=rx&cd=10&cs_p=ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2&cs_f=httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h&cs_p=ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2&cs_f=httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h#first"/><gcs:package name="ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2" uri="ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2"></gcs:package><gcs:file name="httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h"></gcs:file><content type="text/html"><pre> 173: #undef <b>malloc</b>
#define <b>malloc</b>(x) library_<b>malloc</b>(gLibHandle,x)
</pre></content><gcs:match lineNumber="170" type="text/html"><pre>/* Redefine <b>malloc</b> to use the library <b>malloc</b> call so
</pre></gcs:match><gcs:match lineNumber="173" type="text/html"><pre>#undef <b>malloc</b>
</pre></gcs:match><gcs:match lineNumber="174" type="text/html"><pre>#define <b>malloc</b>(x) library_<b>malloc</b>(gLibHandle,x)
</pre></gcs:match><rights>Apache</rights></entry>
</feed>"""
YOUTUBE_VIDEO_FEED = """<?xml version='1.0' encoding='UTF-8'?><feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gml='http://www.opengis.net/gml' xmlns:georss='http://www.georss.org/georss' xmlns:media='http://search.yahoo.com/mrss/' xmlns:yt='http://gdata.youtube.com/schemas/2007' xmlns:gd='http://schemas.google.com/g/2005'><id>http://gdata.youtube.com/feeds/api/standardfeeds/top_rated</id><updated>2008-05-14T02:24:07.000-07:00</updated><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#video'/><title type='text'>Top Rated</title><logo>http://www.youtube.com/img/pic_youtubelogo_123x63.gif</logo><link rel='alternate' type='text/html' href='http://www.youtube.com/browse?s=tr'/><link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/standardfeeds/top_rated'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/standardfeeds/top_rated?start-index=1&max-results=25'/><link rel='next' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/standardfeeds/top_rated?start-index=26&max-results=25'/><author><name>YouTube</name><uri>http://www.youtube.com/</uri></author><generator version='beta' uri='http://gdata.youtube.com/'>YouTube data API</generator><openSearch:totalResults>100</openSearch:totalResults><openSearch:startIndex>1</openSearch:startIndex><openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry><id>http://gdata.youtube.com/feeds/api/videos/C71ypXYGho8</id><published>2008-03-20T10:17:27.000-07:00</published><updated>2008-05-14T04:26:37.000-07:00</updated><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='karyn'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='garcia'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='me'/><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#video'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='boyfriend'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='por'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='te'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='odeio'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='amar'/><category scheme='http://gdata.youtube.com/schemas/2007/categories.cat' term='Music' label='Music'/><title type='text'>Me odeio por te amar - KARYN GARCIA</title><content type='text'>http://www.karyngarcia.com.br</content><link rel='alternate' type='text/html' href='http://www.youtube.com/watch?v=C71ypXYGho8'/><link rel='http://gdata.youtube.com/schemas/2007#video.related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/videos/C71ypXYGho8/related'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/standardfeeds/top_rated/C71ypXYGho8'/><author><name>TvKarynGarcia</name><uri>http://gdata.youtube.com/feeds/api/users/tvkaryngarcia</uri></author><media:group><media:title type='plain'>Me odeio por te amar - KARYN GARCIA</media:title><media:description type='plain'>http://www.karyngarcia.com.br</media:description><media:keywords>amar, boyfriend, garcia, karyn, me, odeio, por, te</media:keywords><yt:duration seconds='203'/><media:category label='Music' scheme='http://gdata.youtube.com/schemas/2007/categories.cat'>Music</media:category><media:category label='test111' scheme='http://gdata.youtube.com/schemas/2007/developertags.cat'>test111</media:category><media:category label='test222' scheme='http://gdata.youtube.com/schemas/2007/developertags.cat'>test222</media:category><media:content url='http://www.youtube.com/v/C71ypXYGho8' type='application/x-shockwave-flash' medium='video' isDefault='true' expression='full' duration='203' yt:format='5'/><media:content url='rtsp://rtsp2.youtube.com/ChoLENy73wIaEQmPhgZ2pXK9CxMYDSANFEgGDA==/0/0/0/video.3gp' type='video/3gpp' medium='video' expression='full' duration='203' yt:format='1'/><media:content url='rtsp://rtsp2.youtube.com/ChoLENy73wIaEQmPhgZ2pXK9CxMYESARFEgGDA==/0/0/0/video.3gp' type='video/3gpp' medium='video' expression='full' duration='203' yt:format='6'/><media:player url='http://www.youtube.com/watch?v=C71ypXYGho8'/><media:thumbnail url='http://img.youtube.com/vi/C71ypXYGho8/2.jpg' height='97' width='130' time='00:01:41.500'/><media:thumbnail url='http://img.youtube.com/vi/C71ypXYGho8/1.jpg' height='97' width='130' time='00:00:50.750'/><media:thumbnail url='http://img.youtube.com/vi/C71ypXYGho8/3.jpg' height='97' width='130' time='00:02:32.250'/><media:thumbnail url='http://img.youtube.com/vi/C71ypXYGho8/0.jpg' height='240' width='320' time='00:01:41.500'/></media:group><yt:statistics viewCount='138864' favoriteCount='2474'/><gd:rating min='1' max='5' numRaters='4626' average='4.95'/><gd:comments><gd:feedLink href='http://gdata.youtube.com/feeds/api/videos/C71ypXYGho8/comments' countHint='27'/></gd:comments></entry>
<entry><id>http://gdata.youtube.com/feeds/api/videos/gsVaTyb1tBw</id><published>2008-02-15T04:31:45.000-08:00</published><updated>2008-05-14T05:09:42.000-07:00</updated><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='extreme'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='cam'/><category scheme='http://gdata.youtube.com/schemas/2007/categories.cat' term='Sports' label='Sports'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='alcala'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='kani'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='helmet'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='campillo'/><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#video'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='pato'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='dirt'/><title type='text'>extreme helmet cam Kani, Keil and Pato</title><content type='text'>trimmed</content><link rel='alternate' type='text/html' href='http://www.youtube.com/watch?v=gsVaTyb1tBw'/><link rel='http://gdata.youtube.com/schemas/2007#video.responses' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/videos/gsVaTyb1tBw/responses'/><link rel='http://gdata.youtube.com/schemas/2007#video.related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/videos/gsVaTyb1tBw/related'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/standardfeeds/recently_featured/gsVaTyb1tBw'/><author><name>peraltamagic</name><uri>http://gdata.youtube.com/feeds/api/users/peraltamagic</uri></author><media:group><media:title type='plain'>extreme helmet cam Kani, Keil and Pato</media:title><media:description type='plain'>trimmed</media:description><media:keywords>alcala, cam, campillo, dirt, extreme, helmet, kani, pato</media:keywords><yt:duration seconds='31'/><media:category label='Sports' scheme='http://gdata.youtube.com/schemas/2007/categories.cat'>Sports</media:category><media:content url='http://www.youtube.com/v/gsVaTyb1tBw' type='application/x-shockwave-flash' medium='video' isDefault='true' expression='full' duration='31' yt:format='5'/><media:content url='rtsp://rtsp2.youtube.com/ChoLENy73wIaEQkctPUmT1rFghMYDSANFEgGDA==/0/0/0/video.3gp' type='video/3gpp' medium='video' expression='full' duration='31' yt:format='1'/><media:content url='rtsp://rtsp2.youtube.com/ChoLENy73wIaEQkctPUmT1rFghMYESARFEgGDA==/0/0/0/video.3gp' type='video/3gpp' medium='video' expression='full' duration='31' yt:format='6'/><media:player url='http://www.youtube.com/watch?v=gsVaTyb1tBw'/><media:thumbnail url='http://img.youtube.com/vi/gsVaTyb1tBw/2.jpg' height='97' width='130' time='00:00:15.500'/><media:thumbnail url='http://img.youtube.com/vi/gsVaTyb1tBw/1.jpg' height='97' width='130' time='00:00:07.750'/><media:thumbnail url='http://img.youtube.com/vi/gsVaTyb1tBw/3.jpg' height='97' width='130' time='00:00:23.250'/><media:thumbnail url='http://img.youtube.com/vi/gsVaTyb1tBw/0.jpg' height='240' width='320' time='00:00:15.500'/></media:group><yt:statistics viewCount='489941' favoriteCount='561'/><gd:rating min='1' max='5' numRaters='1255' average='4.11'/><gd:comments><gd:feedLink href='http://gdata.youtube.com/feeds/api/videos/gsVaTyb1tBw/comments' countHint='1116'/></gd:comments></entry>
</feed>"""
YOUTUBE_ENTRY_PRIVATE = """<?xml version='1.0' encoding='utf-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:media='http://search.yahoo.com/mrss/'
xmlns:gd='http://schemas.google.com/g/2005'
xmlns:yt='http://gdata.youtube.com/schemas/2007'
xmlns:gml='http://www.opengis.net/gml'
xmlns:georss='http://www.georss.org/georss'
xmlns:app='http://purl.org/atom/app#'>
<id>http://gdata.youtube.com/feeds/videos/UMFI1hdm96E</id>
<published>2007-01-07T01:50:15.000Z</published>
<updated>2007-01-07T01:50:15.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://gdata.youtube.com/schemas/2007#video' />
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat'
term='barkley' />
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat'
term='singing' />
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat'
term='acoustic' />
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat'
term='cover' />
<category scheme='http://gdata.youtube.com/schemas/2007/categories.cat'
term='Music' label='Music' />
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat'
term='gnarls' />
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat'
term='music' />
<title type='text'>"Crazy (Gnarles Barkley)" - Acoustic Cover</title>
<content type='html'><div style="color: #000000;font-family:
Arial, Helvetica, sans-serif; font-size:12px; font-size: 12px;
width: 555px;"><table cellspacing="0" cellpadding="0"
border="0"><tbody><tr><td width="140"
valign="top" rowspan="2"><div style="border: 1px solid
#999999; margin: 0px 10px 5px 0px;"><a
href="http://www.youtube.com/watch?v=UMFI1hdm96E"><img
alt=""
src="http://img.youtube.com/vi/UMFI1hdm96E/2.jpg"></a></div></td>
<td width="256" valign="top"><div style="font-size:
12px; font-weight: bold;"><a style="font-size: 15px;
font-weight: bold; font-decoration: none;"
href="http://www.youtube.com/watch?v=UMFI1hdm96E">&quot;Crazy
(Gnarles Barkley)&quot; - Acoustic Cover</a>
<br></div> <div style="font-size: 12px; margin:
3px 0px;"><span>Gnarles Barkley acoustic cover
http://www.myspace.com/davidchoimusic</span></div></td>
<td style="font-size: 11px; line-height: 1.4em; padding-left:
20px; padding-top: 1px;" width="146"
valign="top"><div><span style="color: #666666;
font-size: 11px;">From:</span> <a
href="http://www.youtube.com/profile?user=davidchoimusic">davidchoimusic</a></div>
<div><span style="color: #666666; font-size:
11px;">Views:</span> 113321</div> <div
style="white-space: nowrap;text-align: left"><img
style="border: 0px none; margin: 0px; padding: 0px;
vertical-align: middle; font-size: 11px;" align="top" alt=""
src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif">
<img style="border: 0px none; margin: 0px; padding: 0px;
vertical-align: middle; font-size: 11px;" align="top" alt=""
src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif">
<img style="border: 0px none; margin: 0px; padding: 0px;
vertical-align: middle; font-size: 11px;" align="top" alt=""
src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif">
<img style="border: 0px none; margin: 0px; padding: 0px;
vertical-align: middle; font-size: 11px;" align="top" alt=""
src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif">
<img style="border: 0px none; margin: 0px; padding: 0px;
vertical-align: middle; font-size: 11px;" align="top" alt=""
src="http://gdata.youtube.com/static/images/icn_star_half_11x11.gif"></div>
<div style="font-size: 11px;">1005 <span style="color:
#666666; font-size:
11px;">ratings</span></div></td></tr>
<tr><td><span style="color: #666666; font-size:
11px;">Time:</span> <span style="color: #000000;
font-size: 11px; font-weight:
bold;">04:15</span></td> <td style="font-size:
11px; padding-left: 20px;"><span style="color: #666666;
font-size: 11px;">More in</span> <a
href="http://www.youtube.com/categories_portal?c=10">Music</a></td></tr></tbody></table></div></content>
<link rel='self' type='application/atom+xml'
href='http://gdata.youtube.com/feeds/videos/UMFI1hdm96E' />
<link rel='alternate' type='text/html'
href='http://www.youtube.com/watch?v=UMFI1hdm96E' />
<link rel='http://gdata.youtube.com/schemas/2007#video.responses'
type='application/atom+xml'
href='http://gdata.youtube.com/feeds/videos/UMFI1hdm96E/responses' />
<link rel='http://gdata.youtube.com/schemas/2007#video.related'
type='application/atom+xml'
href='http://gdata.youtube.com/feeds/videos/UMFI1hdm96E/related' />
<author>
<name>davidchoimusic</name>
<uri>http://gdata.youtube.com/feeds/users/davidchoimusic</uri>
</author>
<media:group>
<media:title type='plain'>"Crazy (Gnarles Barkley)" - Acoustic Cover</media:title>
<media:description type='plain'>Gnarles Barkley acoustic cover http://www.myspace.com/davidchoimusic</media:description>
<media:keywords>music, singing, gnarls, barkley, acoustic, cover</media:keywords>
<yt:duration seconds='255' />
<media:category label='Music'
scheme='http://gdata.youtube.com/schemas/2007/categories.cat'>
Music</media:category>
<media:category
scheme='http://gdata.youtube.com/schemas/2007/developertags.cat'>
DeveloperTag1</media:category>
<media:content url='http://www.youtube.com/v/UMFI1hdm96E'
type='application/x-shockwave-flash' medium='video'
isDefault='true' expression='full' duration='255'
yt:format='5' />
<media:player url='http://www.youtube.com/watch?v=UMFI1hdm96E' />
<media:thumbnail url='http://img.youtube.com/vi/UMFI1hdm96E/2.jpg'
height='97' width='130' time='00:02:07.500' />
<media:thumbnail url='http://img.youtube.com/vi/UMFI1hdm96E/1.jpg'
height='97' width='130' time='00:01:03.750' />
<media:thumbnail url='http://img.youtube.com/vi/UMFI1hdm96E/3.jpg'
height='97' width='130' time='00:03:11.250' />
<media:thumbnail url='http://img.youtube.com/vi/UMFI1hdm96E/0.jpg'
height='240' width='320' time='00:02:07.500' />
<yt:private />
</media:group>
<yt:statistics viewCount='113321' />
<gd:rating min='1' max='5' numRaters='1005' average='4.77' />
<georss:where>
<gml:Point>
<gml:pos>37.398529052734375 -122.0635986328125</gml:pos>
</gml:Point>
</georss:where>
<gd:comments>
<gd:feedLink href='http://gdata.youtube.com/feeds/videos/UMFI1hdm96E/comments' />
</gd:comments>
<yt:noembed />
<app:control>
<app:draft>yes</app:draft>
<yt:state
name="rejected"
reasonCode="inappropriate"
helpUrl="http://www.youtube.com/t/community_guidelines">
The content of this video may violate the terms of use.</yt:state>
</app:control>
</entry>"""
YOUTUBE_COMMENT_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'><id>http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments</id><updated>2008-05-19T21:45:45.261Z</updated><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#comment'/><title type='text'>Comments</title><logo>http://www.youtube.com/img/pic_youtubelogo_123x63.gif</logo><link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU'/><link rel='alternate' type='text/html' href='http://www.youtube.com/watch?v=2Idhz9ef5oU'/><link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments?start-index=1&max-results=25'/><author><name>YouTube</name><uri>http://www.youtube.com/</uri></author><generator version='beta' uri='http://gdata.youtube.com/'>YouTube data API</generator><openSearch:totalResults>0</openSearch:totalResults><openSearch:startIndex>1</openSearch:startIndex><openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/91F809A3DE2EB81B</id>
<published>2008-02-22T15:27:15.000-08:00</published><updated>2008-02-22T15:27:15.000-08:00</updated>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#comment'/>
<title type='text'>test66</title>
<content type='text'>test66</content>
<link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU'/>
<link rel='alternate' type='text/html' href='http://www.youtube.com/watch?v=2Idhz9ef5oU'/>
<link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/91F809A3DE2EB81B'/>
<author><name>apitestjhartmann</name><uri>http://gdata.youtube.com/feeds/users/apitestjhartmann</uri></author>
</entry>
<entry>
<id>http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/A261AEEFD23674AA</id>
<published>2008-02-22T15:27:01.000-08:00</published><updated>2008-02-22T15:27:01.000-08:00</updated>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#comment'/>
<title type='text'>test333</title>
<content type='text'>test333</content>
<link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU'/>
<link rel='alternate' type='text/html' href='http://www.youtube.com/watch?v=2Idhz9ef5oU'/>
<link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/A261AEEFD23674AA'/>
<author><name>apitestjhartmann</name><uri>http://gdata.youtube.com/feeds/users/apitestjhartmann</uri></author>
</entry>
<entry>
<id>http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/0DCF1E3531B3FF85</id>
<published>2008-02-22T15:11:06.000-08:00</published><updated>2008-02-22T15:11:06.000-08:00</updated>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#comment'/>
<title type='text'>test2</title>
<content type='text'>test2</content>
<link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU'/>
<link rel='alternate' type='text/html' href='http://www.youtube.com/watch?v=2Idhz9ef5oU'/>
<link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/0DCF1E3531B3FF85'/>
<author><name>apitestjhartmann</name><uri>http://gdata.youtube.com/feeds/users/apitestjhartmann</uri></author>
</entry>
</feed>"""
YOUTUBE_PLAYLIST_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:media='http://search.yahoo.com/mrss/'
xmlns:yt='http://gdata.youtube.com/schemas/2007'
xmlns:gd='http://schemas.google.com/g/2005'>
<id>http://gdata.youtube.com/feeds/users/andyland74/playlists?start-index=1&max-results=25</id>
<updated>2008-02-26T00:26:15.635Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#playlistLink'/>
<title type='text'>andyland74's Playlists</title>
<logo>http://www.youtube.com/img/pic_youtubelogo_123x63.gif</logo>
<link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/andyland74'/>
<link rel='alternate' type='text/html' href='http://www.youtube.com/profile_play_list?user=andyland74'/>
<link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/andyland74/playlists'/>
<link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/andyland74/playlists?start-index=1&max-results=25'/>
<author>
<name>andyland74</name>
<uri>http://gdata.youtube.com/feeds/users/andyland74</uri>
</author>
<generator version='beta' uri='http://gdata.youtube.com/'>YouTube data API</generator>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<yt:description>My new playlist Description</yt:description>
<gd:feedLink rel='http://gdata.youtube.com/schemas/2007#playlist' href='http://gdata.youtube.com/feeds/playlists/8BCDD04DE8F771B2'/>
<id>http://gdata.youtube.com/feeds/users/andyland74/playlists/8BCDD04DE8F771B2</id>
<published>2007-11-04T17:30:27.000-08:00</published>
<updated>2008-02-22T09:55:14.000-08:00</updated>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#playlistLink'/>
<title type='text'>My New Playlist Title</title>
<content type='text'>My new playlist Description</content>
<link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/andyland74'/>
<link rel='alternate' type='text/html' href='http://www.youtube.com/view_play_list?p=8BCDD04DE8F771B2'/>
<link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/andyland74/playlists/8BCDD04DE8F771B2'/>
<author>
<name>andyland74</name>
<uri>http://gdata.youtube.com/feeds/users/andyland74</uri>
</author>
</entry>
</feed>"""
YOUTUBE_PLAYLIST_VIDEO_FEED = """<?xml version='1.0' encoding='UTF-8'?><feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gml='http://www.opengis.net/gml' xmlns:georss='http://www.georss.org/georss' xmlns:media='http://search.yahoo.com/mrss/' xmlns:yt='http://gdata.youtube.com/schemas/2007' xmlns:gd='http://schemas.google.com/g/2005'><id>http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B505</id><updated>2008-05-16T12:03:17.000-07:00</updated><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#playlist'/><category scheme='http://gdata.youtube.com/schemas/2007/tags.cat' term='videos'/><category scheme='http://gdata.youtube.com/schemas/2007/tags.cat' term='python'/><title type='text'>Test Playlist</title><subtitle type='text'>Test playlist 1</subtitle><logo>http://www.youtube.com/img/pic_youtubelogo_123x63.gif</logo><link rel='alternate' type='text/html' href='http://www.youtube.com/view_play_list?p=BCB3BB96DF51B505'/><link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B505'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B505?start-index=1&max-results=25'/><author><name>gdpython</name><uri>http://gdata.youtube.com/feeds/api/users/gdpython</uri></author><generator version='beta' uri='http://gdata.youtube.com/'>YouTube data API</generator><openSearch:totalResults>1</openSearch:totalResults><openSearch:startIndex>1</openSearch:startIndex><openSearch:itemsPerPage>25</openSearch:itemsPerPage><media:group><media:title type='plain'>Test Playlist</media:title><media:description type='plain'>Test playlist 1</media:description><media:content url='http://www.youtube.com/ep.swf?id=BCB3BB96DF51B505' type='application/x-shockwave-flash' yt:format='5'/></media:group><entry><id>http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B505/B0F29389E537F888</id><updated>2008-05-16T20:54:08.520Z</updated><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#playlist'/><title type='text'>Uploading YouTube Videos with the PHP Client Library</title><content type='text'>Jochen Hartmann demonstrates the basics of how to use the PHP Client Library with the YouTube Data API.
PHP Developer's Guide:
http://code.google.com/apis/youtube/developers_guide_php.html
Other documentation:
http://code.google.com/apis/youtube/</content><link rel='alternate' type='text/html' href='http://www.youtube.com/watch?v=iIp7OnHXBlo'/><link rel='http://gdata.youtube.com/schemas/2007#video.responses' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/videos/iIp7OnHXBlo/responses'/><link rel='http://gdata.youtube.com/schemas/2007#video.related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/videos/iIp7OnHXBlo/related'/><link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/videos/iIp7OnHXBlo'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B505/B0F29389E537F888'/><author><name>GoogleDevelopers</name><uri>http://gdata.youtube.com/feeds/api/users/googledevelopers</uri></author><media:group><media:title type='plain'>Uploading YouTube Videos with the PHP Client Library</media:title><media:description type='plain'>Jochen Hartmann demonstrates the basics of how to use the PHP Client Library with the YouTube Data API.
PHP Developer's Guide:
http://code.google.com/apis/youtube/developers_guide_php.html
Other documentation:
http://code.google.com/apis/youtube/</media:description><media:keywords>api, data, demo, php, screencast, tutorial, uploading, walkthrough, youtube</media:keywords><yt:duration seconds='466'/><media:category label='Education' scheme='http://gdata.youtube.com/schemas/2007/categories.cat'>Education</media:category><media:content url='http://www.youtube.com/v/iIp7OnHXBlo' type='application/x-shockwave-flash' medium='video' isDefault='true' expression='full' duration='466' yt:format='5'/><media:content url='rtsp://rtsp2.youtube.com/ChoLENy73wIaEQlaBtdxOnuKiBMYDSANFEgGDA==/0/0/0/video.3gp' type='video/3gpp' medium='video' expression='full' duration='466' yt:format='1'/><media:content url='rtsp://rtsp2.youtube.com/ChoLENy73wIaEQlaBtdxOnuKiBMYESARFEgGDA==/0/0/0/video.3gp' type='video/3gpp' medium='video' expression='full' duration='466' yt:format='6'/><media:player url='http://www.youtube.com/watch?v=iIp7OnHXBlo'/><media:thumbnail url='http://img.youtube.com/vi/iIp7OnHXBlo/2.jpg' height='97' width='130' time='00:03:53'/><media:thumbnail url='http://img.youtube.com/vi/iIp7OnHXBlo/1.jpg' height='97' width='130' time='00:01:56.500'/><media:thumbnail url='http://img.youtube.com/vi/iIp7OnHXBlo/3.jpg' height='97' width='130' time='00:05:49.500'/><media:thumbnail url='http://img.youtube.com/vi/iIp7OnHXBlo/0.jpg' height='240' width='320' time='00:03:53'/></media:group><yt:statistics viewCount='1550' favoriteCount='5'/><gd:rating min='1' max='5' numRaters='3' average='4.67'/><yt:location>undefined</yt:location><gd:comments><gd:feedLink href='http://gdata.youtube.com/feeds/api/videos/iIp7OnHXBlo/comments' countHint='2'/></gd:comments><yt:position>1</yt:position></entry></feed>"""
YOUTUBE_SUBSCRIPTION_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:media='http://search.yahoo.com/mrss/'
xmlns:yt='http://gdata.youtube.com/schemas/2007'
xmlns:gd='http://schemas.google.com/g/2005'>
<id>http://gdata.youtube.com/feeds/users/andyland74/subscriptions?start-index=1&max-results=25</id>
<updated>2008-02-26T00:26:15.635Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://gdata.youtube.com/schemas/2007#subscription'/>
<title type='text'>andyland74's Subscriptions</title>
<logo>http://www.youtube.com/img/pic_youtubelogo_123x63.gif</logo>
<link rel='related' type='application/atom+xml'
href='http://gdata.youtube.com/feeds/users/andyland74'/>
<link rel='alternate' type='text/html'
href='http://www.youtube.com/profile_subscriptions?user=andyland74'/>
<link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml'
href='http://gdata.youtube.com/feeds/users/andyland74/subscriptions'/>
<link rel='self' type='application/atom+xml'
href='http://gdata.youtube.com/feeds/users/andyland74/subscriptions?start-index=1&max-results=25'/>
<author>
<name>andyland74</name>
<uri>http://gdata.youtube.com/feeds/users/andyland74</uri>
</author>
<generator version='beta' uri='http://gdata.youtube.com/'>YouTube data API</generator>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>http://gdata.youtube.com/feeds/users/andyland74/subscriptions/d411759045e2ad8c</id>
<published>2007-11-04T17:30:27.000-08:00</published>
<updated>2008-02-22T09:55:14.000-08:00</updated>
<category scheme='http://gdata.youtube.com/schemas/2007/subscriptiontypes.cat'
term='channel'/>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://gdata.youtube.com/schemas/2007#subscription'/>
<title type='text'>Videos published by : NBC</title>
<link rel='related' type='application/atom+xml'
href='http://gdata.youtube.com/feeds/users/andyland74'/>
<link rel='alternate' type='text/html'
href='http://www.youtube.com/profile_videos?user=NBC'/>
<link rel='self' type='application/atom+xml'
href='http://gdata.youtube.com/feeds/users/andyland74/subscriptions/d411759045e2ad8c'/>
<author>
<name>andyland74</name>
<uri>http://gdata.youtube.com/feeds/users/andyland74</uri>
</author>
<yt:username>NBC</yt:username>
<gd:feedLink rel='http://gdata.youtube.com/schemas/2007#user.uploads'
href='http://gdata.youtube.com/feeds/api/users/nbc/uploads'/>
</entry>
</feed>"""
YOUTUBE_VIDEO_RESPONSE_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gml='http://www.opengis.net/gml' xmlns:georss='http://www.georss.org/georss' xmlns:media='http://search.yahoo.com/mrss/' xmlns:yt='http://gdata.youtube.com/schemas/2007' xmlns:gd='http://schemas.google.com/g/2005'>
<id>http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY/responses</id><updated>2008-05-19T22:37:34.076Z</updated><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#video'/><title type='text'>Videos responses to 'Giant NES controller coffee table'</title><logo>http://www.youtube.com/img/pic_youtubelogo_123x63.gif</logo><link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY'/><link rel='alternate' type='text/html' href='http://www.youtube.com/video_response_view_all?v=2c3q9K4cHzY'/><link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY/responses'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY/responses?start-index=1&max-results=25'/><author><name>YouTube</name><uri>http://www.youtube.com/</uri></author><generator version='beta' uri='http://gdata.youtube.com/'>YouTube data API</generator><openSearch:totalResults>8</openSearch:totalResults><openSearch:startIndex>1</openSearch:startIndex><openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>http://gdata.youtube.com/feeds/videos/7b9EnRI9VbY</id><published>2008-03-11T19:08:53.000-07:00</published><updated>2008-05-18T21:33:10.000-07:00</updated>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='OD'/><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#video'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='chat'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='Uncle'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='sex'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='catmint'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='kato'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='kissa'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='katt'/>
<category scheme='http://gdata.youtube.com/schemas/2007/categories.cat' term='Animals' label='Pets & Animals'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='kat'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='cat'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='cats'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='kedi'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='gato'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='Brattman'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='drug'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='overdose'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='catnip'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='party'/>
<category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='Katze'/><category scheme='http://gdata.youtube.com/schemas/2007/keywords.cat' term='gatto'/>
<title type='text'>Catnip Party</title><content type='html'>snipped</content>
<link rel='alternate' type='text/html' href='http://www.youtube.com/watch?v=7b9EnRI9VbY'/>
<link rel='http://gdata.youtube.com/schemas/2007#video.responses' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/7b9EnRI9VbY/responses'/>
<link rel='http://gdata.youtube.com/schemas/2007#video.related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/7b9EnRI9VbY/related'/>
<link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY/responses/7b9EnRI9VbY'/>
<author><name>PismoBeach</name><uri>http://gdata.youtube.com/feeds/users/pismobeach</uri></author>
<media:group>
<media:title type='plain'>Catnip Party</media:title>
<media:description type='plain'>Uncle, Hillary, Hankette, and B4 all but overdose on the patio</media:description><media:keywords>Brattman, cat, catmint, catnip, cats, chat, drug, gato, gatto, kat, kato, katt, Katze, kedi, kissa, OD, overdose, party, sex, Uncle</media:keywords>
<yt:duration seconds='139'/>
<media:category label='Pets & Animals' scheme='http://gdata.youtube.com/schemas/2007/categories.cat'>Animals</media:category>
<media:content url='http://www.youtube.com/v/7b9EnRI9VbY' type='application/x-shockwave-flash' medium='video' isDefault='true' expression='full' duration='139' yt:format='5'/>
<media:content url='rtsp://rtsp2.youtube.com/ChoLENy73wIaEQm2VT0SnUS_7RMYDSANFEgGDA==/0/0/0/video.3gp' type='video/3gpp' medium='video' expression='full' duration='139' yt:format='1'/>
<media:content url='rtsp://rtsp2.youtube.com/ChoLENy73wIaEQm2VT0SnUS_7RMYESARFEgGDA==/0/0/0/video.3gp' type='video/3gpp' medium='video' expression='full' duration='139' yt:format='6'/>
<media:player url='http://www.youtube.com/watch?v=7b9EnRI9VbY'/>
<media:thumbnail url='http://img.youtube.com/vi/7b9EnRI9VbY/2.jpg' height='97' width='130' time='00:01:09.500'/>
<media:thumbnail url='http://img.youtube.com/vi/7b9EnRI9VbY/1.jpg' height='97' width='130' time='00:00:34.750'/>
<media:thumbnail url='http://img.youtube.com/vi/7b9EnRI9VbY/3.jpg' height='97' width='130' time='00:01:44.250'/>
<media:thumbnail url='http://img.youtube.com/vi/7b9EnRI9VbY/0.jpg' height='240' width='320' time='00:01:09.500'/>
</media:group>
<yt:statistics viewCount='4235' favoriteCount='3'/>
<gd:rating min='1' max='5' numRaters='24' average='3.54'/>
<gd:comments>
<gd:feedLink href='http://gdata.youtube.com/feeds/videos/7b9EnRI9VbY/comments' countHint='14'/>
</gd:comments>
</entry>
</feed>
"""
YOUTUBE_PROFILE = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:media='http://search.yahoo.com/mrss/'
xmlns:yt='http://gdata.youtube.com/schemas/2007'
xmlns:gd='http://schemas.google.com/g/2005'>
<id>http://gdata.youtube.com/feeds/users/andyland74</id>
<published>2006-10-16T00:09:45.000-07:00</published>
<updated>2008-02-26T11:48:21.000-08:00</updated>
<category scheme='http://gdata.youtube.com/schemas/2007/channeltypes.cat'
term='Standard'/>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://gdata.youtube.com/schemas/2007#userProfile'/>
<title type='text'>andyland74 Channel</title>
<link rel='alternate' type='text/html'
href='http://www.youtube.com/profile?user=andyland74'/>
<link rel='self' type='application/atom+xml'
href='http://gdata.youtube.com/feeds/users/andyland74'/>
<author>
<name>andyland74</name>
<uri>http://gdata.youtube.com/feeds/users/andyland74</uri>
</author>
<yt:age>33</yt:age>
<yt:username>andyland74</yt:username>
<yt:firstName>andy</yt:firstName>
<yt:lastName>example</yt:lastName>
<yt:books>Catch-22</yt:books>
<yt:gender>m</yt:gender>
<yt:company>Google</yt:company>
<yt:hobbies>Testing YouTube APIs</yt:hobbies>
<yt:hometown>Somewhere</yt:hometown>
<yt:location>US</yt:location>
<yt:movies>Aqua Teen Hungerforce</yt:movies>
<yt:music>Elliott Smith</yt:music>
<yt:occupation>Technical Writer</yt:occupation>
<yt:school>University of North Carolina</yt:school>
<media:thumbnail url='http://i.ytimg.com/vi/YFbSxcdOL-w/default.jpg'/>
<yt:statistics viewCount='9' videoWatchCount='21' subscriberCount='1'
lastWebAccess='2008-02-25T16:03:38.000-08:00'/>
<gd:feedLink rel='http://gdata.youtube.com/schemas/2007#user.favorites'
href='http://gdata.youtube.com/feeds/users/andyland74/favorites' countHint='4'/>
<gd:feedLink rel='http://gdata.youtube.com/schemas/2007#user.contacts'
href='http://gdata.youtube.com/feeds/users/andyland74/contacts' countHint='1'/>
<gd:feedLink rel='http://gdata.youtube.com/schemas/2007#user.inbox'
href='http://gdata.youtube.com/feeds/users/andyland74/inbox' countHint='0'/>
<gd:feedLink rel='http://gdata.youtube.com/schemas/2007#user.playlists'
href='http://gdata.youtube.com/feeds/users/andyland74/playlists'/>
<gd:feedLink rel='http://gdata.youtube.com/schemas/2007#user.subscriptions'
href='http://gdata.youtube.com/feeds/users/andyland74/subscriptions' countHint='4'/>
<gd:feedLink rel='http://gdata.youtube.com/schemas/2007#user.uploads'
href='http://gdata.youtube.com/feeds/users/andyland74/uploads' countHint='1'/>
</entry>"""
YOUTUBE_CONTACTS_FEED = """<?xml version='1.0' encoding='UTF-8'?><feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:yt='http://gdata.youtube.com/schemas/2007' xmlns:gd='http://schemas.google.com/g/2005'>
<id>http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts</id><updated>2008-05-16T19:24:34.916Z</updated><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#friend'/><title type='text'>apitestjhartmann's Contacts</title><logo>http://www.youtube.com/img/pic_youtubelogo_123x63.gif</logo><link rel='alternate' type='text/html' href='http://www.youtube.com/profile_friends?user=apitestjhartmann'/><link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts'/><link rel='http://schemas.google.com/g/2005#post' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts?start-index=1&max-results=25'/><author><name>apitestjhartmann</name><uri>http://gdata.youtube.com/feeds/users/apitestjhartmann</uri></author><generator version='beta' uri='http://gdata.youtube.com/'>YouTube data API</generator><openSearch:totalResults>2</openSearch:totalResults><openSearch:startIndex>1</openSearch:startIndex><openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/test89899090</id><published>2008-02-04T11:27:54.000-08:00</published><updated>2008-05-16T19:24:34.916Z</updated><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#friend'/><title type='text'>test89899090</title><link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/test89899090'/><link rel='alternate' type='text/html' href='http://www.youtube.com/profile?user=test89899090'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/test89899090'/><link rel='edit' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/test89899090'/><author><name>apitestjhartmann</name><uri>http://gdata.youtube.com/feeds/users/apitestjhartmann</uri></author><yt:username>test89899090</yt:username><yt:status>requested</yt:status></entry>
<entry>
<id>http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/testjfisher</id><published>2008-02-26T14:13:03.000-08:00</published><updated>2008-05-16T19:24:34.916Z</updated><category scheme='http://schemas.google.com/g/2005#kind' term='http://gdata.youtube.com/schemas/2007#friend'/><title type='text'>testjfisher</title><link rel='related' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/testjfisher'/><link rel='alternate' type='text/html' href='http://www.youtube.com/profile?user=testjfisher'/><link rel='self' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/testjfisher'/><link rel='edit' type='application/atom+xml' href='http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/testjfisher'/><author><name>apitestjhartmann</name><uri>http://gdata.youtube.com/feeds/users/apitestjhartmann</uri></author><yt:username>testjfisher</yt:username><yt:status>pending</yt:status></entry>
</feed>"""
NEW_CONTACT = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:gd='http://schemas.google.com/g/2005'
xmlns:gContact='http://schemas.google.com/contact/2008'>
<id>http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base/8411573</id>
<updated>2008-02-28T18:47:02.303Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/contact/2008#contact' />
<title type='text'>Fitzgerald</title>
<content type='text'>Notes</content>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full/8411573' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full/8411573/1204224422303000' />
<gd:email rel='http://schemas.google.com/g/2005#work'
address='[email protected]' />
<gd:email rel='http://schemas.google.com/g/2005#home'
address='[email protected]' />
<gd:phoneNumber rel='http://schemas.google.com/g/2005#work'
primary='true'>(206)555-1212</gd:phoneNumber>
<gd:phoneNumber rel='http://schemas.google.com/g/2005#other'
primary='true'>456-123-2133</gd:phoneNumber>
<gd:phoneNumber rel='http://schemas.google.com/g/2005#home'>(206)555-1213</gd:phoneNumber>
<gd:extendedProperty name="pet" value="hamster" />
<gd:extendedProperty name="cousine">
<italian />
</gd:extendedProperty>
<gContact:groupMembershipInfo deleted="false" href="http://google.com/m8/feeds/groups/liz%40gmail.com/base/270f" />
<gd:im address='[email protected]'
protocol='http://schemas.google.com/g/2005#GOOGLE_TALK'
rel='http://schemas.google.com/g/2005#home' />
<gd:postalAddress rel='http://schemas.google.com/g/2005#work'
primary='true'>1600 Amphitheatre Pkwy Mountain View</gd:postalAddress>
</entry>"""
CONTACTS_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:gd='http://schemas.google.com/g/2005'
xmlns:gContact='http://schemas.google.com/contact/2008'
xmlns:batch='http://schemas.google.com/gdata/batch'>
<id>http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base</id>
<updated>2008-03-05T12:36:38.836Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/contact/2008#contact' />
<title type='text'>Contacts</title>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full' />
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full' />
<link rel='http://schemas.google.com/g/2005#batch'
type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full/batch' />
<link rel='self' type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full?max-results=25' />
<author>
<name>Elizabeth Bennet</name>
<email>[email protected]</email>
</author>
<generator version='1.0' uri='http://www.google.com/m8/feeds/contacts'>
Contacts
</generator>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>
http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base/c9012de
</id>
<updated>2008-03-05T12:36:38.835Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/contact/2008#contact' />
<title type='text'>Fitzgerald</title>
<link rel="http://schemas.google.com/contacts/2008/rel#photo" type="image/*"
href="http://google.com/m8/feeds/photos/media/liz%40gmail.com/c9012de"/>
<link rel="http://schemas.google.com/contacts/2008/rel#edit-photo" type="image/*"
href="http://www.google.com/m8/feeds/photos/media/liz%40gmail.com/c9012de/photo4524"/>
<link rel='self' type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full/c9012de' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/m8/feeds/contacts/liz%40gmail.com/full/c9012de/1204720598835000' />
<gd:phoneNumber rel='http://schemas.google.com/g/2005#home'
primary='true'>
456
</gd:phoneNumber>
<gd:extendedProperty name="pet" value="hamster" />
<gContact:groupMembershipInfo deleted="false" href="http://google.com/m8/feeds/groups/liz%40gmail.com/base/270f" />
</entry>
</feed>"""
CONTACT_GROUPS_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gContact="http://schemas.google.com/contact/2008"
xmlns:batch="http://schemas.google.com/gdata/batch"
xmlns:gd="http://schemas.google.com/g/2005">
<id>[email protected]</id>
<updated>2008-05-21T21:11:25.237Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/contact/2008#group"/>
<title type="text">Jo's Contact Groups</title>
<link rel="alternate" type="text/html" href="http://www.google.com/"/>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml"
href="http://google.m/m8/feeds/groups/jo%40gmail.com/thin"/>
<link rel="http://schemas.google.com/g/2005#post"
type="application/atom+xml"
href="http://google.m/m8/feeds/groups/jo%40gmail.com/thin"/>
<link rel="http://schemas.google.com/g/2005#batch"
type="application/atom+xml"
href="http://googleom/m8/feeds/groups/jo%40gmail.com/thin/batch"/>
<link rel="self"
type="application/atom+xml"
href="http://google.com/m8/feeds/groups/jo%40gmail.com/thin?max-results=25"/>
<author>
<name>Jo Brown</name>
<email>[email protected]</email>
</author>
<generator version="1.0" uri="http://google.com/m8/feeds">Contacts</generator>
<openSearch:totalResults>3</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>25</openSearch:itemsPerPage>
<entry>
<id>http://google.com/m8/feeds/groups/jo%40gmail.com/base/270f</id>
<updated>2008-05-14T13:10:19.070Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/contact/2008#group"/>
<title type="text">joggers</title>
<content type="text">joggers</content>
<link rel="self" type="application/atom+xml"
href="http://google.com/m8/feeds/groups/jo%40gmail.com/thin/270f"/>
<link rel="edit" type="application/atom+xml"
href="http://google.com/m8/feeds/groups/jo%40gmail.com/thin/270f/1210770619070000"/>
</entry>
</feed>"""
CONTACT_GROUP_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:gd="http://schemas.google.com/g/2005">
<category scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/g/2005#group"/>
<id>http://www.google.com/feeds/groups/jo%40gmail.com/base/1234</id>
<published>2005-01-18T21:00:00Z</published>
<updated>2006-01-01T00:00:00Z</updated>
<title type="text">Salsa group</title>
<content type="text">Salsa group</content>
<link rel='self' type='application/atom+xml'
href= 'http://www.google.com/m8/feeds/groups/jo%40gmail.com/full/2' />
<link rel='edit' type='application/atom+xml'
href='http://www.google.com/m8/feeds/groups/jo%40gmail.com/full/2/0'/>
<gd:extendedProperty name="more info about the group">
<info>Very nice people.</info>
</gd:extendedProperty>
</entry>"""
CALENDAR_RESOURCE_ENTRY = """<?xml version="1.0"?>
<atom:entry xmlns:atom="http://www.w3.org/2005/Atom" xmlns:apps="http://schemas.google.com/apps/2006">
<apps:property name="resourceId" value="CR-NYC-14-12-BR"/>
<apps:property name="resourceCommonName" value="Boardroom"/>
<apps:property name="resourceDescription" value="This conference room is in New York City, building 14, floor 12, Boardroom"/>
<apps:property name="resourceType" value="CR"/>
</atom:entry>"""
CALENDAR_RESOURCES_FEED = """<?xml version="1.0"?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/" xmlns:apps="http://schemas.google.com/apps/2006">
<id>https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com</id>
<updated>2008-10-17T15:29:21.064Z</updated>
<link rel="next" type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/?start=the next resourceId"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com"/>
<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com"/>
<link rel="self" type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com?start=CR-NYC-14-12-BR"/>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/CR-NYC-14-12-BR</id>
<updated>2008-10-17T15:29:21.064Z</updated>
<link rel="self" type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/CR-NYC-14-12-BR"/>
<link rel="edit" type="application/atom+xml" href="https://apps-apis.google.com/feeds/calendar/resource/2.0/yourdomain.com/CR-NYC-14-12-BR"/>
<apps:property name="resourceId" value="CR-NYC-14-12-BR"/>
<apps:property name="resourceCommonName" value="Boardroom"/>
<apps:property name="resourceEmail" value="[email protected]"/>
<apps:property name="resourceDescription" value="This conference room is in New York City, building 14, floor 12, Boardroom"/>
<apps:property name="resourceType" value="CR"/>
</entry>
<entry>
<id>https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/?start=(Bike)-London-43-Lobby-Bike-1</id>
<updated>2008-10-17T15:29:21.064Z</updated>
<link rel="self" type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/(Bike)-London-43-Lobby-Bike-1"/>
<link rel="edit" type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/calendar/resource/2.0/yourdomain.com/(Bike)-London-43-Lobby-Bike-1"/>
<apps:property name="resourceId" value="(Bike)-London-43-Lobby-Bike-1"/>
<apps:property name="resourceCommonName" value="London bike-1"/>
<apps:property name="resourceEmail" value="[email protected]"/>
<apps:property name="resourceDescription" value="Bike is in London at building 43's lobby."/>
<apps:property name="resourceType" value="(Bike)"/>
</entry>
</feed>"""
BLOG_ENTRY = """<entry xmlns='http://www.w3.org/2005/Atom'>
<id>tag:blogger.com,1999:blog-blogID.post-postID</id>
<published>2006-08-02T18:44:43.089-07:00</published>
<updated>2006-11-08T18:10:23.020-08:00</updated>
<title type='text'>Lizzy's Diary</title>
<summary type='html'>Being the journal of Elizabeth Bennet</summary>
<link rel='alternate' type='text/html'
href='http://blogName.blogspot.com/'>
</link>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://blogName.blogspot.com/feeds/posts/default'>
</link>
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://www.blogger.com/feeds/blogID/posts/default'>
</link>
<link rel='self' type='application/atom+xml'
href='http://www.blogger.com/feeds/userID/blogs/blogID'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.blogger.com/feeds/userID/blogs/blogID'>
</link>
<author>
<name>Elizabeth Bennet</name>
<email>[email protected]</email>
</author>
</entry>"""
BLOG_POST = """<entry xmlns='http://www.w3.org/2005/Atom'>
<title type='text'>Marriage!</title>
<content type='xhtml'>
<div xmlns="http://www.w3.org/1999/xhtml">
<p>Mr. Darcy has <em>proposed marriage</em> to me!</p>
<p>He is the last man on earth I would ever desire to marry.</p>
<p>Whatever shall I do?</p>
</div>
</content>
<author>
<name>Elizabeth Bennet</name>
<email>[email protected]</email>
</author>
</entry>"""
BLOG_POSTS_FEED = """<feed xmlns='http://www.w3.org/2005/Atom'>
<id>tag:blogger.com,1999:blog-blogID</id>
<updated>2006-11-08T18:10:23.020-08:00</updated>
<title type='text'>Lizzy's Diary</title>
<link rel='alternate' type='text/html'
href='http://blogName.blogspot.com/index.html'>
</link>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://blogName.blogspot.com/feeds/posts/default'>
</link>
<link rel='self' type='application/atom+xml'
href='http://blogName.blogspot.com/feeds/posts/default'>
</link>
<author>
<name>Elizabeth Bennet</name>
<email>[email protected]</email>
</author>
<generator version='7.00' uri='http://www2.blogger.com'>Blogger</generator>
<entry>
<id>tag:blogger.com,1999:blog-blogID.post-postID</id>
<published>2006-11-08T18:10:00.000-08:00</published>
<updated>2006-11-08T18:10:14.954-08:00</updated>
<title type='text'>Quite disagreeable</title>
<content type='html'><p>I met Mr. Bingley's friend Mr. Darcy
this evening. I found him quite disagreeable.</p></content>
<link rel='alternate' type='text/html'
href='http://blogName.blogspot.com/2006/11/quite-disagreeable.html'>
</link>
<link rel='self' type='application/atom+xml'
href='http://blogName.blogspot.com/feeds/posts/default/postID'>
</link>
<link rel='edit' type='application/atom+xml'
href='http://www.blogger.com/feeds/blogID/posts/default/postID'>
</link>
<author>
<name>Elizabeth Bennet</name>
<email>[email protected]</email>
</author>
</entry>
</feed>"""
BLOG_COMMENTS_FEED = """<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/">
<id>tag:blogger.com,1999:blog-blogID.postpostID..comments</id>
<updated>2007-04-04T21:56:29.803-07:00</updated>
<title type="text">My Blog : Time to relax</title>
<link rel="alternate" type="text/html" href="http://blogName.blogspot.com/2007/04/first-post.html"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://blogName.blogspot.com/feeds/postID/comments/default"/>
<link rel="self" type="application/atom+xml" href="http://blogName.blogspot.com/feeds/postID/comments/default"/>
<author>
<name>Blog Author name</name>
</author>
<generator version="7.00" uri="http://www2.blogger.com">Blogger</generator>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>tag:blogger.com,1999:blog-blogID.post-commentID</id>
<published>2007-04-04T21:56:00.000-07:00</published>
<updated>2007-04-04T21:56:29.803-07:00</updated>
<title type="text">This is my first comment</title>
<content type="html">This is my first comment</content>
<link rel="alternate" type="text/html" href="http://a-blogName.blogspot.com/2007/04/first-post.html#commentID"/>
<link rel="self" type="application/atom+xml" href="http://blogName.blogspot.com/feeds/postID/comments/default/commentID"/>
<link rel="edit" type="application/atom+xml" href="http://www.blogger.com/feeds/blogID/postID/comments/default/commentID"/>
<author>
<name>Blog Author name</name>
</author>
<thr:in-reply-to xmlns:thr='http://purl.org/syndication/thread/1.0'
href='http://blogName.blogspot.com/2007/04/first-post.html'
ref='tag:blogger.com,1999:blog-blogID.post-postID'
source='http://blogName.blogspot.com/feeds/posts/default/postID'
type='text/html' />
</entry>
</feed>"""
SITES_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:gd="http://schemas.google.com/g/2005"
xmlns:wt="http://schemas.google.com/webmasters/tools/2007">
<id>https://www.google.com/webmasters/tools/feeds/sites</id>
<title>Sites</title>
<openSearch:startIndex>1</openSearch:startIndex>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/webmasters/tools/2007#sites-feed" />
<link href="http://www.google.com/webmasters/tools/feeds/sites" rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" />
<link href="http://www.google.com/webmasters/tools/feeds/sites" rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" />
<link href="http://www.google.com/webmasters/tools/feeds/sites" rel="self" type="application/atom+xml" />
<updated>2008-10-02T07:26:51.833Z</updated>
<entry>
<id>http://www.example.com</id>
<title type="text">http://www.example.com</title>
<link href="http://www.google.com/webmasters/tools/feeds/sites/http%3A%2F%2Fwww.example.com%2F" rel="self" type="application/atom+xml"/>
<link href="http://www.google.com/webmasters/tools/feeds/sites/http%3A%2F%2Fwww.example.com%2F" rel="edit" type="application/atom+xml"/>
<content src="http://www.example.com"/>
<updated>2007-11-17T18:27:32.543Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/webmasters/tools/2007#site-info"/>
<gd:entryLink rel="http://schemas.google.com/webmasters/tools/2007#verification"
href="https://www.google.com/webmasters/tools/feeds/http%3A%2F%2Fwww%2Eexample%2Ecom%2F/verification" />
<gd:entryLink rel="http://schemas.google.com/webmasters/tools/2007#sitemaps"
href="https://www.google.com/webmasters/tools/feeds/http%3A%2F%2Fwww%2Eexample%2Ecom%2F/sitemaps" />
<wt:indexed>true</wt:indexed>
<wt:crawled>2008-09-14T08:59:28.000</wt:crawled>
<wt:geolocation>US</wt:geolocation>
<wt:preferred-domain>none</wt:preferred-domain>
<wt:crawl-rate>normal</wt:crawl-rate>
<wt:enhanced-image-search>true</wt:enhanced-image-search>
<wt:verified>false</wt:verified>
<wt:verification-method type="metatag" in-use="false"><meta name="verify-v1" content="a2Ai"/>
</wt:verification-method>
<wt:verification-method type="htmlpage" in-use="false">456456-google.html</wt:verification-method>
</entry>
</feed>"""
SITEMAPS_FEED = """<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:wt="http://schemas.google.com/webmasters/tools/2007">
<id>http://www.example.com</id>
<title type="text">http://www.example.com/</title>
<updated>2006-11-17T18:27:32.543Z</updated>
<link rel="self" type="application/atom+xml"
href="https://www.google.com/webmasters/tools/feeds/http%3A%2F%2Fwww%2Eexample%2Ecom%2F/sitemaps" />
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/webmasters/tools/2007#sitemaps-feed'/>
<wt:sitemap-mobile>
<wt:markup-language>HTML</wt:markup-language>
<wt:markup-language>WAP</wt:markup-language>
</wt:sitemap-mobile>
<wt:sitemap-news>
<wt:publication-label>Value1</wt:publication-label>
<wt:publication-label>Value2</wt:publication-label>
<wt:publication-label>Value3</wt:publication-label>
</wt:sitemap-news>
<entry>
<id>http://www.example.com/sitemap-index.xml</id>
<title type="text">http://www.example.com/sitemap-index.xml</title>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/webmasters/tools/2007#sitemap-regular'/>
<updated>2006-11-17T18:27:32.543Z</updated>
<wt:sitemap-type>WEB</wt:sitemap-type>
<wt:sitemap-status>StatusValue</wt:sitemap-status>
<wt:sitemap-last-downloaded>2006-11-18T19:27:32.543Z</wt:sitemap-last-downloaded>
<wt:sitemap-url-count>102</wt:sitemap-url-count>
</entry>
<entry>
<id>http://www.example.com/mobile/sitemap-index.xml</id>
<title type="text">http://www.example.com/mobile/sitemap-index.xml</title>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/webmasters/tools/2007#sitemap-mobile'/>
<updated>2006-11-17T18:27:32.543Z</updated>
<wt:sitemap-status>StatusValue</wt:sitemap-status>
<wt:sitemap-last-downloaded>2006-11-18T19:27:32.543Z</wt:sitemap-last-downloaded>
<wt:sitemap-url-count>102</wt:sitemap-url-count>
<wt:sitemap-mobile-markup-language>HTML</wt:sitemap-mobile-markup-language>
</entry>
<entry>
<id>http://www.example.com/news/sitemap-index.xml</id>
<title type="text">http://www.example.com/news/sitemap-index.xml</title>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/webmasters/tools/2007#sitemap-news'/>
<updated>2006-11-17T18:27:32.543Z</updated>
<wt:sitemap-status>StatusValue</wt:sitemap-status>
<wt:sitemap-last-downloaded>2006-11-18T19:27:32.543Z</wt:sitemap-last-downloaded>
<wt:sitemap-url-count>102</wt:sitemap-url-count>
<wt:sitemap-news-publication-label>LabelValue</wt:sitemap-news-publication-label>
</entry>
</feed>"""
HEALTH_CCR_NOTICE_PAYLOAD = """<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<Body>
<Problems>
<Problem>
<DateTime>
<Type><Text>Start date</Text></Type>
<ExactDateTime>2007-04-04T07:00:00Z</ExactDateTime>
</DateTime>
<Description>
<Text>Aortic valve disorders</Text>
<Code>
<Value>410.10</Value>
<CodingSystem>ICD9</CodingSystem>
<Version>2004</Version>
</Code>
</Description>
<Status><Text>Active</Text></Status>
</Problem>
</Problems>
</Body>
</ContinuityOfCareRecord>"""
HEALTH_PROFILE_ENTRY_DIGEST = """<?xml version="1.0" encoding="UTF-8"?>
<entry xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/" xmlns:ccr="urn:astm-org:CCR" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:h9m="http://schemas.google.com/health/metadata">
<id>https://www.google.com/health/feeds/profile/default/vneCn5qdEIY_digest</id>
<updated>2008-09-29T07:52:17.176Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile" />
<link rel="alternate" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default?digest=true" />
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/vneCn5qdEIY_digest" />
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/vneCn5qdEIY_digest" />
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>vneCn5qdEIY</CCRDocumentObjectID>
<Language>
<Text>English</Text>
<Code>
<Value>en</Value>
<CodingSystem>ISO-639-1</CodingSystem>
</Code>
</Language>
<Version>V1.0</Version>
<DateTime>
<ExactDateTime>2008-09-29T07:52:17.176Z</ExactDateTime>
</DateTime>
<Patient>
<ActorID>Google Health Profile</ActorID>
</Patient>
<Body>
<FunctionalStatus>
<Function>
<Type>
<Text>Pregnancy status</Text>
</Type>
<Description>
<Text>Not pregnant</Text>
</Description>
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Function>
<Function>
<Type>
<Text>Breastfeeding status</Text>
</Type>
<Description>
<Text>Not breastfeeding</Text>
</Description>
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Function>
</FunctionalStatus>
<Problems>
<Problem>
<CCRDataObjectID>Hn0FE0IlcY-FMFFgSTxkvA/CONDITION/0</CCRDataObjectID>
<DateTime>
<Type>
<Text>Start date</Text>
</Type>
<ExactDateTime>2007-04-04T07:00:00Z</ExactDateTime>
</DateTime>
<Description>
<Text>Aortic valve disorders</Text>
<Code>
<Value>410.10</Value>
<CodingSystem>ICD9</CodingSystem>
<Version>2004</Version>
</Code>
</Description>
<Status>
<Text>Active</Text>
</Status>
<Source>
<Actor>
<ActorID>example.com</ActorID>
<ActorRole>
<Text>Information Provider</Text>
</ActorRole>
</Actor>
</Source>
</Problem>
<Problem>
<Type />
<Description>
<Text>Malaria</Text>
<Code>
<Value>136.9</Value>
<CodingSystem>ICD9_Broader</CodingSystem>
</Code>
<Code>
<Value>084.6</Value>
<CodingSystem>ICD9</CodingSystem>
</Code>
</Description>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<HealthStatus>
<Description />
</HealthStatus>
</Problem>
</Problems>
<SocialHistory>
<SocialHistoryElement>
<Type>
<Text>Race</Text>
<Code>
<Value>S15814</Value>
<CodingSystem>HL7</CodingSystem>
</Code>
</Type>
<Description>
<Text>White</Text>
</Description>
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Episodes>
<Frequency>
<Units />
</Frequency>
</Episodes>
</SocialHistoryElement>
</SocialHistory>
<Alerts>
<Alert>
<Type>
<Text>Allergy</Text>
</Type>
<Description>
<Text>A-Fil</Text>
</Description>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Reaction>
<Description />
<Severity>
<Text>Severe</Text>
</Severity>
</Reaction>
</Alert>
<Alert>
<Type>
<Text>Allergy</Text>
</Type>
<Description>
<Text>A.E.R Traveler</Text>
</Description>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Reaction>
<Description />
<Severity>
<Text>Severe</Text>
</Severity>
</Reaction>
</Alert>
</Alerts>
<Medications>
<Medication>
<Type />
<Description />
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Product>
<ProductName>
<Text>A& D</Text>
</ProductName>
<Strength>
<Units />
<StrengthSequencePosition>0</StrengthSequencePosition>
<VariableStrengthModifier />
</Strength>
</Product>
<Directions>
<Direction>
<Description />
<DeliveryMethod />
<Dose>
<Units />
<DoseSequencePosition>0</DoseSequencePosition>
<VariableDoseModifier />
</Dose>
<Route>
<Text>To skin</Text>
<Code>
<Value>C38305</Value>
<CodingSystem>FDA</CodingSystem>
</Code>
<RouteSequencePosition>0</RouteSequencePosition>
<MultipleRouteModifier />
</Route>
</Direction>
</Directions>
<Refills />
</Medication>
<Medication>
<Type />
<Description />
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Product>
<ProductName>
<Text>A-Fil</Text>
</ProductName>
<Strength>
<Units />
<StrengthSequencePosition>0</StrengthSequencePosition>
<VariableStrengthModifier />
</Strength>
</Product>
<Directions>
<Direction>
<Description />
<DeliveryMethod />
<Dose>
<Units />
<DoseSequencePosition>0</DoseSequencePosition>
<VariableDoseModifier />
</Dose>
<Route>
<Text>To skin</Text>
<Code>
<Value>C38305</Value>
<CodingSystem>FDA</CodingSystem>
</Code>
<RouteSequencePosition>0</RouteSequencePosition>
<MultipleRouteModifier />
</Route>
</Direction>
</Directions>
<Refills />
</Medication>
<Medication>
<Type />
<Description />
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Product>
<ProductName>
<Text>Lipitor</Text>
</ProductName>
<Strength>
<Units />
<StrengthSequencePosition>0</StrengthSequencePosition>
<VariableStrengthModifier />
</Strength>
</Product>
<Directions>
<Direction>
<Description />
<DeliveryMethod />
<Dose>
<Units />
<DoseSequencePosition>0</DoseSequencePosition>
<VariableDoseModifier />
</Dose>
<Route>
<Text>By mouth</Text>
<Code>
<Value>C38288</Value>
<CodingSystem>FDA</CodingSystem>
</Code>
<RouteSequencePosition>0</RouteSequencePosition>
<MultipleRouteModifier />
</Route>
</Direction>
</Directions>
<Refills />
</Medication>
</Medications>
<Immunizations>
<Immunization>
<Type />
<Description />
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Product>
<ProductName>
<Text>Chickenpox Vaccine</Text>
<Code>
<Value>21</Value>
<CodingSystem>HL7</CodingSystem>
</Code>
</ProductName>
</Product>
<Directions>
<Direction>
<Description />
<DeliveryMethod />
</Direction>
</Directions>
<Refills />
</Immunization>
</Immunizations>
<VitalSigns>
<Result>
<Type />
<Description />
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Substance />
<Test>
<Type />
<Description>
<Text>Height</Text>
</Description>
<Status />
<TestResult>
<ResultSequencePosition>0</ResultSequencePosition>
<VariableResultModifier />
<Value>70</Value>
<Units>
<Unit>inches</Unit>
</Units>
</TestResult>
<ConfidenceValue />
</Test>
</Result>
<Result>
<Type />
<Description />
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Substance />
<Test>
<Type />
<Description>
<Text>Weight</Text>
</Description>
<Status />
<TestResult>
<ResultSequencePosition>0</ResultSequencePosition>
<VariableResultModifier />
<Value>2480</Value>
<Units>
<Unit>ounces</Unit>
</Units>
</TestResult>
<ConfidenceValue />
</Test>
</Result>
<Result>
<Type />
<Description />
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Substance />
<Test>
<Type />
<Description>
<Text>Blood Type</Text>
</Description>
<Status />
<TestResult>
<ResultSequencePosition>0</ResultSequencePosition>
<VariableResultModifier />
<Value>O+</Value>
<Units />
</TestResult>
<ConfidenceValue />
</Test>
</Result>
</VitalSigns>
<Results>
<Result>
<Type />
<Description />
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Substance />
<Test>
<DateTime>
<Type>
<Text>Collection start date</Text>
</Type>
<ExactDateTime>2008-09-03</ExactDateTime>
</DateTime>
<Type />
<Description>
<Text>Acetaldehyde - Blood</Text>
</Description>
<Status />
<TestResult>
<ResultSequencePosition>0</ResultSequencePosition>
<VariableResultModifier />
<Units />
</TestResult>
<ConfidenceValue />
</Test>
</Result>
</Results>
<Procedures>
<Procedure>
<Type />
<Description>
<Text>Abdominal Ultrasound</Text>
</Description>
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Procedure>
<Procedure>
<Type />
<Description>
<Text>Abdominoplasty</Text>
</Description>
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Procedure>
</Procedures>
</Body>
<Actors>
<Actor>
<ActorObjectID>Google Health Profile</ActorObjectID>
<Person>
<Name>
<BirthName />
<CurrentName />
</Name>
<DateOfBirth>
<Type />
<ExactDateTime>1984-07-22</ExactDateTime>
</DateOfBirth>
<Gender>
<Text>Male</Text>
</Gender>
</Person>
<Status />
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Actor>
</Actors>
</ContinuityOfCareRecord>
</entry>"""
HEALTH_PROFILE_FEED = """<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/"
xmlns:ccr="urn:astm-org:CCR" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:h9m="http://schemas.google.com/health/metadata">
<id>https://www.google.com/health/feeds/profile/default</id>
<updated>2008-09-30T01:07:17.888Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<title type="text">Profile Feed</title>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default"/>
<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default"/>
<link rel="http://schemas.google.com/g/2005#batch" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/batch"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default?digest=false"/>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>https://www.google.com/health/feeds/profile/default/DysasdfARnFAao</id>
<published>2008-09-29T03:12:50.850Z</published>
<updated>2008-09-29T03:12:50.850Z</updated>
<category term="MEDICATION"/>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category scheme="http://schemas.google.com/health/item" term="A& D"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/MEDICATION/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DA%26+D"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/DysasdfARnFAao"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/DysasdfARnFAao"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>hiD9sEigSzdk8nNT0evR4g</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Medications>
<Medication>
<Type/>
<Description/>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Product>
<ProductName>
<Text>A& D</Text>
</ProductName>
<Strength>
<Units/>
<StrengthSequencePosition>0</StrengthSequencePosition>
<VariableStrengthModifier/>
</Strength>
</Product>
<Directions>
<Direction>
<Description/>
<DeliveryMethod/>
<Dose>
<Units/>
<DoseSequencePosition>0</DoseSequencePosition>
<VariableDoseModifier/>
</Dose>
<Route>
<Text>To skin</Text>
<Code>
<Value>C38305</Value>
<CodingSystem>FDA</CodingSystem>
</Code>
<RouteSequencePosition>0</RouteSequencePosition>
<MultipleRouteModifier/>
</Route>
</Direction>
</Directions>
<Refills/>
</Medication>
</Medications>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/7I1WQzZrgp4</id>
<published>2008-09-29T03:27:14.909Z</published>
<updated>2008-09-29T03:27:14.909Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category scheme="http://schemas.google.com/health/item" term="A-Fil"/>
<category term="ALLERGY"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DA-Fil/ALLERGY"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/7I1WQzZrgp4"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/7I1WQzZrgp4"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>YOyHDxQUiECCPgnsjV8SlQ</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Alerts>
<Alert>
<Type>
<Text>Allergy</Text>
</Type>
<Description>
<Text>A-Fil</Text>
</Description>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Reaction>
<Description/>
<Severity>
<Text>Severe</Text>
</Severity>
</Reaction>
</Alert>
</Alerts>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/Dz9wV83sKFg</id>
<published>2008-09-29T03:12:52.166Z</published>
<updated>2008-09-29T03:12:52.167Z</updated>
<category term="MEDICATION"/>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category scheme="http://schemas.google.com/health/item" term="A-Fil"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/MEDICATION/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DA-Fil"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/Dz9wV83sKFg"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/Dz9wV83sKFg"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>7w.XFEPeuIYN3Rn32pUiUw</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Medications>
<Medication>
<Type/>
<Description/>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Product>
<ProductName>
<Text>A-Fil</Text>
</ProductName>
<Strength>
<Units/>
<StrengthSequencePosition>0</StrengthSequencePosition>
<VariableStrengthModifier/>
</Strength>
</Product>
<Directions>
<Direction>
<Description/>
<DeliveryMethod/>
<Dose>
<Units/>
<DoseSequencePosition>0</DoseSequencePosition>
<VariableDoseModifier/>
</Dose>
<Route>
<Text>To skin</Text>
<Code>
<Value>C38305</Value>
<CodingSystem>FDA</CodingSystem>
</Code>
<RouteSequencePosition>0</RouteSequencePosition>
<MultipleRouteModifier/>
</Route>
</Direction>
</Directions>
<Refills/>
</Medication>
</Medications>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/lzsxVzqZUyw</id>
<published>2008-09-29T03:13:07.496Z</published>
<updated>2008-09-29T03:13:07.497Z</updated>
<category scheme="http://schemas.google.com/health/item" term="A.E.R Traveler"/>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category term="ALLERGY"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DA.E.R+Traveler/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/ALLERGY"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/lzsxVzqZUyw"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/lzsxVzqZUyw"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>5efFB0J2WgEHNUvk2z3A1A</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Alerts>
<Alert>
<Type>
<Text>Allergy</Text>
</Type>
<Description>
<Text>A.E.R Traveler</Text>
</Description>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Reaction>
<Description/>
<Severity>
<Text>Severe</Text>
</Severity>
</Reaction>
</Alert>
</Alerts>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/6PvhfKAXyYw</id>
<published>2008-09-29T03:13:02.123Z</published>
<updated>2008-09-29T03:13:02.124Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category term="PROCEDURE"/>
<category scheme="http://schemas.google.com/health/item" term="Abdominal Ultrasound"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/PROCEDURE/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DAbdominal+Ultrasound"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/6PvhfKAXyYw"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/6PvhfKAXyYw"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>W3Wbvx_QHwG5pxVchpuF1A</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Procedures>
<Procedure>
<Type/>
<Description>
<Text>Abdominal Ultrasound</Text>
</Description>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Procedure>
</Procedures>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/r2zGPGewCeU</id>
<published>2008-09-29T03:13:03.434Z</published>
<updated>2008-09-29T03:13:03.435Z</updated>
<category scheme="http://schemas.google.com/health/item" term="Abdominoplasty"/>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category term="PROCEDURE"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DAbdominoplasty/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/PROCEDURE"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/r2zGPGewCeU"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/r2zGPGewCeU"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>OUKgj5X0KMnbkC5sDL.yHA</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Procedures>
<Procedure>
<Type/>
<Description>
<Text>Abdominoplasty</Text>
</Description>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Procedure>
</Procedures>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/_cCCbQ0O3ug</id>
<published>2008-09-29T03:13:29.041Z</published>
<updated>2008-09-29T03:13:29.042Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category scheme="http://schemas.google.com/health/item" term="Acetaldehyde - Blood"/>
<category term="LABTEST"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DAcetaldehyde+-+Blood/LABTEST"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/_cCCbQ0O3ug"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/_cCCbQ0O3ug"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>YWtomFb8aG.DueZ7z7fyug</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Results>
<Result>
<Type/>
<Description/>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Substance/>
<Test>
<DateTime>
<Type>
<Text>Collection start date</Text>
</Type>
<ExactDateTime>2008-09-03</ExactDateTime>
</DateTime>
<Type/>
<Description>
<Text>Acetaldehyde - Blood</Text>
</Description>
<Status/>
<TestResult>
<ResultSequencePosition>0</ResultSequencePosition>
<VariableResultModifier/>
<Units/>
</TestResult>
<ConfidenceValue/>
</Test>
</Result>
</Results>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/BdyA3iJZyCc</id>
<published>2008-09-29T03:00:45.915Z</published>
<updated>2008-09-29T03:00:45.915Z</updated>
<category scheme="http://schemas.google.com/health/item" term="Aortic valve disorders"/>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category term="CONDITION"/>
<title type="text">Aortic valve disorders</title>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DAortic+valve+disorders/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/CONDITION"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/BdyA3iJZyCc"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/BdyA3iJZyCc"/>
<author>
<name>example.com</name>
<uri>example.com</uri>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>h1ljpoeKJ85li.1FHsG9Gw</CCRDocumentObjectID>
<Body>
<Problems>
<Problem>
<CCRDataObjectID>Hn0FE0IlcY-FMFFgSTxkvA/CONDITION/0</CCRDataObjectID>
<DateTime>
<Type>
<Text>Start date</Text>
</Type>
<ExactDateTime>2007-04-04T07:00:00Z</ExactDateTime>
</DateTime>
<Description>
<Text>Aortic valve disorders</Text>
<Code>
<Value>410.10</Value>
<CodingSystem>ICD9</CodingSystem>
<Version>2004</Version>
</Code>
</Description>
<Status>
<Text>Active</Text>
</Status>
<Source>
<Actor>
<ActorID>example.com</ActorID>
<ActorRole>
<Text>Information Provider</Text>
</ActorRole>
</Actor>
</Source>
</Problem>
</Problems>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/Cl.aMWIH5VA</id>
<published>2008-09-29T03:13:34.996Z</published>
<updated>2008-09-29T03:13:34.997Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category scheme="http://schemas.google.com/health/item" term="Chickenpox Vaccine"/>
<category term="IMMUNIZATION"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DChickenpox+Vaccine/IMMUNIZATION"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/Cl.aMWIH5VA"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/Cl.aMWIH5VA"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>KlhUqfftgELIitpKbqYalw</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Immunizations>
<Immunization>
<Type/>
<Description/>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Product>
<ProductName>
<Text>Chickenpox Vaccine</Text>
<Code>
<Value>21</Value>
<CodingSystem>HL7</CodingSystem>
</Code>
</ProductName>
</Product>
<Directions>
<Direction>
<Description/>
<DeliveryMethod/>
</Direction>
</Directions>
<Refills/>
</Immunization>
</Immunizations>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/l0a7.FlX3_0</id>
<published>2008-09-29T03:14:47.461Z</published>
<updated>2008-09-29T03:14:47.461Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category term="DEMOGRAPHICS"/>
<category scheme="http://schemas.google.com/health/item" term="Demographics"/>
<title type="text">Demographics</title>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/DEMOGRAPHICS/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DDemographics"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/l0a7.FlX3_0"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/l0a7.FlX3_0"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>U5GDAVOxFbexQw3iyvqPYg</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body/>
<Actors>
<Actor>
<Person>
<Name>
<BirthName/>
<CurrentName/>
</Name>
<DateOfBirth>
<Type/>
<ExactDateTime>1984-07-22</ExactDateTime>
</DateOfBirth>
<Gender>
<Text>Male</Text>
</Gender>
</Person>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Actor>
</Actors>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/oIBDdgwFLyo</id>
<published>2008-09-29T03:14:47.690Z</published>
<updated>2008-09-29T03:14:47.691Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category term="DEMOGRAPHICS"/>
<category scheme="http://schemas.google.com/health/item" term="FunctionalStatus"/>
<title type="text">FunctionalStatus</title>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/DEMOGRAPHICS/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DFunctionalStatus"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/oIBDdgwFLyo"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/oIBDdgwFLyo"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>W.EJcnhxb7W5M4eR4Tr1YA</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<FunctionalStatus>
<Function>
<Type>
<Text>Pregnancy status</Text>
</Type>
<Description>
<Text>Not pregnant</Text>
</Description>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Function>
<Function>
<Type>
<Text>Breastfeeding status</Text>
</Type>
<Description>
<Text>Not breastfeeding</Text>
</Description>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
</Function>
</FunctionalStatus>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/wwljIlXuTVg</id>
<published>2008-09-29T03:26:10.080Z</published>
<updated>2008-09-29T03:26:10.081Z</updated>
<category term="MEDICATION"/>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category scheme="http://schemas.google.com/health/item" term="Lipitor"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/MEDICATION/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DLipitor"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/wwljIlXuTVg"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/wwljIlXuTVg"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>OrpghzvvbG_YaO5koqT2ug</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Medications>
<Medication>
<Type/>
<Description/>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Product>
<ProductName>
<Text>Lipitor</Text>
</ProductName>
<Strength>
<Units/>
<StrengthSequencePosition>0</StrengthSequencePosition>
<VariableStrengthModifier/>
</Strength>
</Product>
<Directions>
<Direction>
<Description/>
<DeliveryMethod/>
<Dose>
<Units/>
<DoseSequencePosition>0</DoseSequencePosition>
<VariableDoseModifier/>
</Dose>
<Route>
<Text>By mouth</Text>
<Code>
<Value>C38288</Value>
<CodingSystem>FDA</CodingSystem>
</Code>
<RouteSequencePosition>0</RouteSequencePosition>
<MultipleRouteModifier/>
</Route>
</Direction>
</Directions>
<Refills/>
</Medication>
</Medications>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/dd09TR12SiY</id>
<published>2008-09-29T07:52:17.175Z</published>
<updated>2008-09-29T07:52:17.176Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category scheme="http://schemas.google.com/health/item" term="Malaria"/>
<category term="CONDITION"/>
<title type="text"/>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DMalaria/CONDITION"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/dd09TR12SiY"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/dd09TR12SiY"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>XF99N6X4lpy.jfPUPLMMSQ</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<Problems>
<Problem>
<Type/>
<Description>
<Text>Malaria</Text>
<Code>
<Value>136.9</Value>
<CodingSystem>ICD9_Broader</CodingSystem>
</Code>
<Code>
<Value>084.6</Value>
<CodingSystem>ICD9</CodingSystem>
</Code>
</Description>
<Status>
<Text>ACTIVE</Text>
</Status>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<HealthStatus>
<Description/>
</HealthStatus>
</Problem>
</Problems>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/aS0Cf964DPs</id>
<published>2008-09-29T03:14:47.463Z</published>
<updated>2008-09-29T03:14:47.463Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category term="DEMOGRAPHICS"/>
<category scheme="http://schemas.google.com/health/item" term="SocialHistory (Drinking, Smoking)"/>
<title type="text">SocialHistory (Drinking, Smoking)</title>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/DEMOGRAPHICS/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DSocialHistory+%28Drinking%2C+Smoking%29"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/aS0Cf964DPs"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/aS0Cf964DPs"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>kXylGU5YXLBzriv61xPGZQ</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<SocialHistory>
<SocialHistoryElement>
<Type>
<Text>Race</Text>
<Code>
<Value>S15814</Value>
<CodingSystem>HL7</CodingSystem>
</Code>
</Type>
<Description>
<Text>White</Text>
</Description>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Episodes>
<Frequency>
<Units/>
</Frequency>
</Episodes>
</SocialHistoryElement>
</SocialHistory>
</Body>
</ContinuityOfCareRecord>
</entry>
<entry>
<id>https://www.google.com/health/feeds/profile/default/s5lII5xfj_g</id>
<published>2008-09-29T03:14:47.544Z</published>
<updated>2008-09-29T03:14:47.545Z</updated>
<category scheme="http://schemas.google.com/health/item" term="VitalSigns"/>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/health/kinds#profile"/>
<category term="DEMOGRAPHICS"/>
<title type="text">VitalSigns</title>
<content type="html"/>
<link rel="http://schemas.google.com/health/data#complete" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/-/%7Bhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fitem%7DVitalSigns/%7Bhttp%3A%2F%2Fschemas.google.com%2Fg%2F2005%23kind%7Dhttp%3A%2F%2Fschemas.google.com%2Fhealth%2Fkinds%23profile/DEMOGRAPHICS"/>
<link rel="self" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/s5lII5xfj_g"/>
<link rel="edit" type="application/atom+xml" href="https://www.google.com/health/feeds/profile/default/s5lII5xfj_g"/>
<author>
<name>User Name</name>
<email>[email protected]</email>
</author>
<ContinuityOfCareRecord xmlns="urn:astm-org:CCR">
<CCRDocumentObjectID>FTTIiY0TVVj35kZqFFjPjQ</CCRDocumentObjectID>
<Language/>
<DateTime>
<Type/>
</DateTime>
<Patient/>
<Body>
<VitalSigns>
<Result>
<Type/>
<Description/>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Substance/>
<Test>
<Type/>
<Description>
<Text>Height</Text>
</Description>
<Status/>
<TestResult>
<ResultSequencePosition>0</ResultSequencePosition>
<VariableResultModifier/>
<Value>70</Value>
<Units>
<Unit>inches</Unit>
</Units>
</TestResult>
<ConfidenceValue/>
</Test>
</Result>
<Result>
<Type/>
<Description/>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Substance/>
<Test>
<Type/>
<Description>
<Text>Weight</Text>
</Description>
<Status/>
<TestResult>
<ResultSequencePosition>0</ResultSequencePosition>
<VariableResultModifier/>
<Value>2480</Value>
<Units>
<Unit>ounces</Unit>
</Units>
</TestResult>
<ConfidenceValue/>
</Test>
</Result>
<Result>
<Type/>
<Description/>
<Status/>
<Source>
<Actor>
<ActorID>[email protected]</ActorID>
<ActorRole>
<Text>Patient</Text>
</ActorRole>
</Actor>
</Source>
<Substance/>
<Test>
<Type/>
<Description>
<Text>Blood Type</Text>
</Description>
<Status/>
<TestResult>
<ResultSequencePosition>0</ResultSequencePosition>
<VariableResultModifier/>
<Value>O+</Value>
<Units/>
</TestResult>
<ConfidenceValue/>
</Test>
</Result>
</VitalSigns>
</Body>
</ContinuityOfCareRecord>
</entry>
</feed>"""
HEALTH_PROFILE_LIST_ENTRY = """ <entry xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'>
<id>
https://www.google.com/health/feeds/profile/list/vndCn5sdfwdEIY</id>
<updated>1970-01-01T00:00:00.000Z</updated>
<title type='text'>profile name</title>
<content type='text'>vndCn5sdfwdEIY</content>
<link rel='self' type='application/atom+xml'
href='https://www.google.com/health/feeds/profile/list/vndCn5sdfwdEIY' />
<link rel='edit' type='application/atom+xml'
href='https://www.google.com/health/feeds/profile/list/vndCn5sdfwdEIY' />
<author>
<name>[email protected]</name>
</author>
</entry>"""
BOOK_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>"""\
"""<entry xmlns='http://www.w3.org/2005/Atom' xmlns:gbs='http://schemas.google.com/books/2008' xmlns:dc='http://purl.org/dc/terms' xmlns:gd='http://schemas.google.com/g/2005'>"""\
"""<id>http://www.google.com/books/feeds/volumes/b7GZr5Btp30C</id>"""\
"""<updated>2009-04-24T23:35:16.000Z</updated>"""\
"""<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/books/2008#volume'/>"""\
"""<title type='text'>A theory of justice</title>"""\
"""<link rel='http://schemas.google.com/books/2008/thumbnail' type='image/x-unknown' href='http://bks0.books.google.com/books?id=b7GZr5Btp30C&printsec=frontcover&img=1&zoom=5&sig=ACfU3U121bWZsbjBfVwVRSK2o982jJTd1w&source=gbs_gdata'/>"""\
"""<link rel='http://schemas.google.com/books/2008/info' type='text/html' href='http://books.google.com/books?id=b7GZr5Btp30C&ie=ISO-8859-1&source=gbs_gdata'/>"""\
"""<link rel='http://schemas.google.com/books/2008/annotation' type='application/atom+xml' href='http://www.google.com/books/feeds/users/me/volumes'/>"""\
"""<link rel='alternate' type='text/html' href='http://books.google.com/books?id=b7GZr5Btp30C&ie=ISO-8859-1'/>"""\
"""<link rel='self' type='application/atom+xml' href='http://www.google.com/books/feeds/volumes/b7GZr5Btp30C'/>"""\
"""<gbs:embeddability value='http://schemas.google.com/books/2008#embeddable'/>"""\
"""<gbs:openAccess value='http://schemas.google.com/books/2008#disabled'/>"""\
"""<gd:rating min='1' max='5' average='4.00'/>"""\
"""<gbs:viewability value='http://schemas.google.com/books/2008#view_partial'/>"""\
"""<dc:creator>John Rawls</dc:creator>"""\
"""<dc:date>1999</dc:date>"""\
"""<dc:description>p Since it appeared in 1971, John Rawls's i A Theory of Justice /i has become a classic. The author has now revised the original edition to clear up a number of difficulties he and others have found in the original book. /p p Rawls aims to express an essential part of the common core of the democratic tradition--justice as fairness--and to provide an alternative to utilitarianism, which had dominated the Anglo-Saxon tradition of political thought since the nineteenth century. Rawls substitutes the ideal of the social contract as a more satisfactory account of the basic rights and liberties of citizens as free and equal persons. "Each person," writes Rawls, "possesses an inviolability founded on justice that even the welfare of society as a whole cannot override." Advancing the ideas of Rousseau, Kant, Emerson, and Lincoln, Rawls's theory is as powerful today as it was when first published. /p</dc:description>"""\
"""<dc:format>538 pages</dc:format>"""\
"""<dc:identifier>b7GZr5Btp30C</dc:identifier>"""\
"""<dc:identifier>ISBN:0198250541</dc:identifier>"""\
"""<dc:identifier>ISBN:9780198250548</dc:identifier>"""\
"""<dc:language>en</dc:language>"""\
"""<dc:publisher>Oxford University Press</dc:publisher>"""\
"""<dc:title>A theory of justice</dc:title>"""\
"""</entry>"""
BOOK_FEED = """<?xml version='1.0' encoding='UTF-8'?>"""\
"""<feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gbs='http://schemas.google.com/books/2008' xmlns:dc='http://purl.org/dc/terms' xmlns:gd='http://schemas.google.com/g/2005'>"""\
"""<id>http://www.google.com/books/feeds/volumes</id>"""\
"""<updated>2009-04-24T23:39:47.000Z</updated>"""\
"""<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/books/2008#volume'/>"""\
"""<title type='text'>Search results for 9780198250548</title>"""\
"""<link rel='alternate' type='text/html' href='http://www.google.com'/>"""\
"""<link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://www.google.com/books/feeds/volumes'/>"""\
"""<link rel='self' type='application/atom+xml' href='http://www.google.com/books/feeds/volumes?q=9780198250548'/>"""\
"""<author>"""\
"""<name>Google Books Search</name>"""\
"""<uri>http://www.google.com</uri>"""\
"""</author>"""\
"""<generator version='beta'>Google Book Search data API</generator>"""\
"""<openSearch:totalResults>1</openSearch:totalResults>"""\
"""<openSearch:startIndex>1</openSearch:startIndex>"""\
"""<openSearch:itemsPerPage>20</openSearch:itemsPerPage>"""\
"""<entry>"""\
"""<id>http://www.google.com/books/feeds/volumes/b7GZr5Btp30C</id>"""\
"""<updated>2009-04-24T23:39:47.000Z</updated>"""\
"""<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/books/2008#volume'/>"""\
"""<title type='text'>A theory of justice</title>"""\
"""<link rel='http://schemas.google.com/books/2008/thumbnail' type='image/x-unknown' href='http://bks9.books.google.com/books?id=b7GZr5Btp30C&printsec=frontcover&img=1&zoom=5&sig=ACfU3U121bWZsbjBfVwVRSK2o982jJTd1w&source=gbs_gdata'/>"""\
"""<link rel='http://schemas.google.com/books/2008/info' type='text/html' href='http://books.google.com/books?id=b7GZr5Btp30C&dq=9780198250548&ie=ISO-8859-1&source=gbs_gdata'/>"""\
"""<link rel='http://schemas.google.com/books/2008/preview' type='text/html' href='http://books.google.com/books?id=b7GZr5Btp30C&pg=PA494&dq=9780198250548&ie=ISO-8859-1&source=gbs_gdata'/>"""\
"""<link rel='http://schemas.google.com/books/2008/annotation' type='application/atom+xml' href='http://www.google.com/books/feeds/users/me/volumes'/>"""\
"""<link rel='alternate' type='text/html' href='http://books.google.com/books?id=b7GZr5Btp30C&dq=9780198250548&ie=ISO-8859-1'/>"""\
"""<link rel='self' type='application/atom+xml' href='http://www.google.com/books/feeds/volumes/b7GZr5Btp30C'/>"""\
"""<gbs:embeddability value='http://schemas.google.com/books/2008#embeddable'/>"""\
"""<gbs:openAccess value='http://schemas.google.com/books/2008#disabled'/>"""\
"""<gbs:viewability value='http://schemas.google.com/books/2008#view_partial'/>"""\
"""<dc:creator>John Rawls</dc:creator>"""\
"""<dc:date>1999</dc:date>"""\
"""<dc:description>... 9780198250548 ...</dc:description>"""\
"""<dc:format>538 pages</dc:format>"""\
"""<dc:identifier>b7GZr5Btp30C</dc:identifier>"""\
"""<dc:identifier>ISBN:0198250541</dc:identifier>"""\
"""<dc:identifier>ISBN:9780198250548</dc:identifier>"""\
"""<dc:subject>Law</dc:subject>"""\
"""<dc:title>A theory of justice</dc:title>"""\
"""</entry>"""\
"""</feed>"""
MAP_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005" gd:etag="W/"CkIESHg4eSp7ImA9WxJbF08."">
<id>http://maps.google.com/maps/feeds/maps/208825816854482607313</id>
<updated>2009-07-27T18:48:29.631Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/maps/2008#map"/>
<title>My maps</title>
<link rel="alternate" type="text/html" href="http://maps.google.com/maps/ms?msa=1"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://maps.google.com/maps/feeds/maps/208825816854482607313/full"/>
<link rel="self" type="application/atom+xml" href="http://maps.google.com/maps/feeds/maps/208825816854482607313/full"/>
<link rel="http://schemas.google.com/g/2005#batch" type="application/atom+xml" href="http://maps.google.com/maps/feeds/maps/208825816854482607313/full/batch"/>
<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="http://maps.google.com/maps/feeds/maps/208825816854482607313/full"/>
<author>
<name>Roman</name>
</author>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>1</openSearch:itemsPerPage>
<entry gd:etag="W/"CkIESHg4eSp7ImA9WxJbF08."">
<id>http://maps.google.com/maps/feeds/maps/208825816854482607313/00046fb45f88fa910bcea</id>
<published>2009-07-27T18:46:34.451Z</published>
<updated>2009-07-27T18:48:29.631Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-07-27T18:48:29.631Z</app:edited>
<app:control xmlns:app="http://www.w3.org/2007/app">
<app:draft>yes</app:draft>
</app:control>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/maps/2008#map"/>
<title>Untitled</title>
<summary/>
<content src="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full"/>
<link rel="self" type="application/atom+xml" href="http://maps.google.com/maps/feeds/maps/208825816854482607313/full/00046fb45f88fa910bcea"/>
<link rel="alternate" type="text/html" href="http://maps.google.com/maps/ms?msa=0&msid=208825816854482607313.00046fb45f88fa910bcea"/>
<link rel="edit" type="application/atom+xml" href="http://maps.google.com/maps/feeds/maps/208825816854482607313/full/00046fb45f88fa910bcea"/>
<author>
<name>Roman</name>
</author>
</entry>
</feed>
"""
MAP_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<entry xmlns="http://www.w3.org/2005/Atom" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005" gd:etag="W/"CkIESHg4eSp7ImA9WxJbF08."">
<id>http://maps.google.com/maps/feeds/maps/208825816854482607313/00046fb45f88fa910bcea</id>
<published>2009-07-27T18:46:34.451Z</published>
<updated>2009-07-27T18:48:29.631Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-07-27T18:48:29.631Z</app:edited>
<app:control xmlns:app="http://www.w3.org/2007/app">
<app:draft>yes</app:draft>
</app:control>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/maps/2008#map"/>
<title>Untitled</title>
<summary/>
<content src="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full"/>
<link rel="self" type="application/atom+xml" href="http://maps.google.com/maps/feeds/maps/208825816854482607313/full/00046fb45f88fa910bcea"/>
<link rel="alternate" type="text/html" href="http://maps.google.com/maps/ms?msa=0&msid=208825816854482607313.00046fb45f88fa910bcea"/>
<link rel="edit" type="application/atom+xml" href="http://maps.google.com/maps/feeds/maps/208825816854482607313/full/00046fb45f88fa910bcea"/>
<author>
<name>Roman</name>
</author>
</entry>
"""
MAP_FEATURE_FEED = """<?xml version="1.0" encoding="UTF-8"?>
<atom:feed xmlns="http://www.opengis.net/kml/2.2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005" gd:etag="W/"CkIESHg4eSp7ImA9WxJbF08."">
<atom:id>http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea</atom:id>
<atom:updated>2009-07-27T18:48:29.631Z</atom:updated>
<atom:category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/maps/2008#feature"/>
<atom:title>Untitled</atom:title>
<atom:link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full"/>
<atom:link rel="self" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full"/>
<atom:link rel="http://schemas.google.com/g/2005#batch" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/batch"/>
<atom:link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full"/>
<openSearch:totalResults>4</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>4</openSearch:itemsPerPage>
<atom:entry gd:etag="W/"CkMBRH44fyp7ImA9WxJbF08."">
<atom:id>http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/00046fb4632573b19e0b7</atom:id>
<atom:published>2009-07-27T18:47:35.037Z</atom:published>
<atom:updated>2009-07-27T18:47:35.037Z</atom:updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-07-27T18:47:35.037Z</app:edited>
<atom:category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/maps/2008#feature"/>
<atom:title>Some feature title</atom:title>
<atom:content type="application/vnd.google-earth.kml+xml">
<Placemark>
<name>Some feature title</name>
<description><![CDATA[<div dir="ltr">Some feature content</div>]]></description>
<Style>
<IconStyle>
<Icon>
<href>http://maps.gstatic.com/intl/en_us/mapfiles/ms/micons/ylw-pushpin.png</href>
</Icon>
</IconStyle>
</Style>
<Point>
<coordinates>-113.818359,41.442726,0.0</coordinates>
</Point>
</Placemark>
</atom:content>
<atom:link rel="self" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/00046fb4632573b19e0b7"/>
<atom:link rel="edit" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/00046fb4632573b19e0b7"/>
<atom:author>
<atom:name>Roman</atom:name>
</atom:author>
<atom:contributor>
<atom:name>Roman</atom:name>
</atom:contributor>
</atom:entry>
<atom:entry gd:etag="W/"CkIEQ38zfCp7ImA9WxJbF08."">
<atom:id>http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/00046fb46325e839a11e6</atom:id>
<atom:published>2009-07-27T18:47:35.067Z</atom:published>
<atom:updated>2009-07-27T18:48:22.184Z</atom:updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-07-27T18:48:22.184Z</app:edited>
<atom:category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/maps/2008#feature"/>
<atom:title>A cool poly!</atom:title>
<atom:content type="application/vnd.google-earth.kml+xml">
<Placemark>
<name>A cool poly!</name>
<description><![CDATA[<div dir="ltr">And a description</div>]]></description>
<Style>
<LineStyle>
<color>FF0066FF</color>
<width>3</width>
</LineStyle>
<PolyStyle>
<color>730099FF</color>
<fill>1</fill>
<outline>1</outline>
</PolyStyle>
</Style>
<Polygon>
<outerBoundaryIs>
<LinearRing>
<tessellate>1</tessellate>
<coordinates>-109.775391,47.457809,0.0 -99.755859,51.508742,0.0 -92.900391,48.04871,0.0 -92.8125,44.339565,0.0 -95.273437,44.402392,0.0 -97.207031,46.619261,0.0 -100.898437,46.073231,0.0 -102.480469,43.068888,0.0 -110.742187,45.274886,0.0 -109.775391,47.457809,0.0 </coordinates>
</LinearRing>
</outerBoundaryIs>
</Polygon>
</Placemark>
</atom:content>
<atom:link rel="self" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/00046fb46325e839a11e6"/>
<atom:link rel="edit" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/00046fb46325e839a11e6"/>
<atom:author>
<atom:name>Roman</atom:name>
</atom:author>
<atom:contributor>
<atom:name>Roman</atom:name>
</atom:contributor>
</atom:entry>
<atom:entry gd:etag="W/"CkIEQ38yfCp7ImA9WxJbF08."">
<atom:id>http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/00046fb465f5002e56b7a</atom:id>
<atom:published>2009-07-27T18:48:22.194Z</atom:published>
<atom:updated>2009-07-27T18:48:22.194Z</atom:updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-07-27T18:48:22.194Z</app:edited>
<atom:category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/maps/2008#feature"/>
<atom:title>New Mexico</atom:title>
<atom:content type="application/vnd.google-earth.kml+xml">
<Placemark>
<name>New Mexico</name>
<description><![CDATA[<div dir="ltr">Word.</div>]]></description>
<Style>
<LineStyle>
<color>73009900</color>
<width>5</width>
</LineStyle>
</Style>
<LineString>
<tessellate>1</tessellate>
<coordinates>-110.039062,37.788081,0.0 -103.183594,37.926868,0.0 -103.183594,32.472695,0.0 -108.896484,32.026706,0.0 -109.863281,31.203405,0.0 -110.039062,37.788081,0.0 </coordinates>
</LineString>
</Placemark>
</atom:content>
<atom:link rel="self" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/00046fb465f5002e56b7a"/>
<atom:link rel="edit" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/00046fb465f5002e56b7a"/>
<atom:author>
<atom:name>Roman</atom:name>
</atom:author>
<atom:contributor>
<atom:name>Roman</atom:name>
</atom:contributor>
</atom:entry>
</atom:feed>
"""
MAP_FEATURE_ENTRY = """<?xml version="1.0" encoding="UTF-8"?>
<atom:entry xmlns="http://www.opengis.net/kml/2.2" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005" gd:etag="W/"CkMBRH44fyp7ImA9WxJbF08."">
<atom:id>http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/00046fb4632573b19e0b7</atom:id>
<atom:published>2009-07-27T18:47:35.037Z</atom:published>
<atom:updated>2009-07-27T18:47:35.037Z</atom:updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-07-27T18:47:35.037Z</app:edited>
<atom:category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/maps/2008#feature"/>
<atom:title>Some feature title</atom:title>
<atom:content type="application/vnd.google-earth.kml+xml">
<Placemark>
<name>Some feature title</name>
<description><![CDATA[<div dir="ltr">Some feature content</div>]]></description>
<Style>
<IconStyle>
<Icon>
<href>http://maps.gstatic.com/intl/en_us/mapfiles/ms/micons/ylw-pushpin.png</href>
</Icon>
</IconStyle>
</Style>
<Point>
<coordinates>-113.818359,41.442726,0.0</coordinates>
</Point>
</Placemark>
</atom:content>
<atom:link rel="self" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/00046fb4632573b19e0b7"/>
<atom:link rel="edit" type="application/atom+xml" href="http://maps.google.com/maps/feeds/features/208825816854482607313/00046fb45f88fa910bcea/full/00046fb4632573b19e0b7"/>
<atom:author>
<atom:name>Roman</atom:name>
</atom:author>
<atom:contributor>
<atom:name>Roman</atom:name>
</atom:contributor>
</atom:entry>
"""
MAP_FEATURE_KML = """<Placemark>
<name>Some feature title</name>
<description><![CDATA[<div dir="ltr">Some feature content</div>]]></description>
<Style>
<IconStyle>
<Icon>
<href>http://maps.gstatic.com/intl/en_us/mapfiles/ms/micons/ylw-pushpin.png</href>
</Icon>
</IconStyle>
</Style>
<Point>
<coordinates>-113.818359,41.442726,0.0</coordinates>
</Point>
</Placemark>
"""
SITES_LISTPAGE_ENTRY = '''<?xml version="1.0" encoding="UTF-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<id>http:///sites.google.com/feeds/content/site/gdatatestsite/1712987567114738703</id>
<updated>2009-06-16T00:37:37.393Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#listpage"/>
<title type="text">ListPagesTitle</title>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<jot:section xmlns:jot="http://www.google.com/ns/jotspot/srvtmpl/" target="content-1">
<div dir="ltr">stuff go here<div>asdf</div>
<div>sdf</div>
<div>
<br/>
</div>
</div>
</jot:section>
</div>
</content>
<link rel="self" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/1712987567114738703"/>
<link rel="edit" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/1712987567114738703"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<gs:worksheet xmlns:gs="http://schemas.google.com/spreadsheets/2006" name="listpage"/>
<gs:header xmlns:gs="http://schemas.google.com/spreadsheets/2006" row="1"/>
<gs:data xmlns:gs="http://schemas.google.com/spreadsheets/2006" startRow="2">
<gs:column index="A" name="Owner"/>
<gs:column index="B" name="Description"/>
<gs:column index="C" name="Resolution"/>
<gs:column index="D" name="Complete"/>
<gs:column index="E" name="MyCo"/>
</gs:data>
<gd:feedLink xmlns:gd="http://schemas.google.com/g/2005" href="http:///sites.google.com/feeds/content/site/gdatatestsite?parent=abc"/>
</entry>'''
SITES_COMMENT_ENTRY = '''<?xml version="1.0" encoding="UTF-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id>
<updated>2009-06-15T18:40:22.407Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#comment"/>
<title type="text"/>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">first comment</div>
</content>
<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123parent"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<thr:in-reply-to xmlns:thr="http://purl.org/syndication/thread/1.0" href="http://sites.google.com/site/gdatatestsite/annoucment/testpost" ref="http://sites.google.com/feeds/content/site/gdatatestsite/abc123" source="http://sites.google.com/feeds/content/site/gdatatestsite" type="text/html"/>
</entry>'''
SITES_LISTITEM_ENTRY = '''<?xml version="1.0" encoding="UTF-8"?>
<entry xmlns="http://www.w3.org/2005/Atom">
<id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id>
<updated>2009-06-16T00:34:55.633Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#listitem"/>
<title type="text"/>
<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123def"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="A" name="Owner">test value</gs:field>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="B" name="Description">test</gs:field>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="C" name="Resolution">90</gs:field>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="D" name="Complete"/>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="E" name="MyCo">2009-05-31</gs:field>
</entry>'''
SITES_CONTENT_FEED = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/"
xmlns:sites="http://schemas.google.com/sites/2008" xmlns:gs="http://schemas.google.com/spreadsheets/2006"
xmlns:dc="http://purl.org/dc/terms" xmlns:batch="http://schemas.google.com/gdata/batch"
xmlns:gd="http://schemas.google.com/g/2005" xmlns:thr="http://purl.org/syndication/thread/1.0">
<id>http://sites.google.com/feeds/content/site/gdatatestsite</id>
<updated>2009-06-15T21:35:43.282Z</updated>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite"/>
<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite"/>
<generator version="1" uri="http://sites.google.com">Google Sites</generator>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>http:///sites.google.com/feeds/content/site/gdatatestsite/1712987567114738703</id>
<updated>2009-06-16T00:37:37.393Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#listpage"/>
<title type="text">ListPagesTitle</title>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<jot:section xmlns:jot="http://www.google.com/ns/jotspot/srvtmpl/" target="content-1">
<div dir="ltr">stuff go here<div>asdf</div>
<div>sdf</div>
<div>
<br/>
</div>
</div>
</jot:section>
</div>
</content>
<link rel="alternate" type="text/html" href="http:///sites.google.com/site/gdatatestsite/asdfsdfsdf"/>
<link rel="self" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/1712987567114738703"/>
<link rel="edit" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/1712987567114738703"/>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/12345"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<gs:worksheet xmlns:gs="http://schemas.google.com/spreadsheets/2006" name="listpage"/>
<gs:header xmlns:gs="http://schemas.google.com/spreadsheets/2006" row="1"/>
<gs:data xmlns:gs="http://schemas.google.com/spreadsheets/2006" startRow="2">
<gs:column index="A" name="Owner"/>
<gs:column index="B" name="Description"/>
<gs:column index="C" name="Resolution"/>
<gs:column index="D" name="Complete"/>
<gs:column index="E" name="MyCo"/>
</gs:data>
<sites:revision>2</sites:revision>
<gd:deleted/>
<sites:pageName>home</sites:pageName>
<gd:feedLink xmlns:gd="http://schemas.google.com/g/2005" href="http://sites.google.com/feeds/content/site/gdatatestsite?parent=abc"/>
</entry>
<entry>
<id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id>
<updated>2009-06-17T00:40:37.082Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#filecabinet"/>
<title type="text">filecabinet</title>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<jot:section xmlns:jot="http://www.google.com/ns/jotspot/srvtmpl/" target="content-1">
<div dir="ltr">sdf</div>
</jot:section>
</div>
</content>
<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<gd:feedLink xmlns:gd="http://schemas.google.com/g/2005" href="http://sites.google.com/feeds/content/site/gdatatestsite?parent=8472761212299270332"/>
</entry>
<entry>
<id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id>
<updated>2009-06-16T00:34:55.633Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#listitem"/>
<title type="text"/>
<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123def"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="A" name="Owner">test value</gs:field>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="B" name="Description">test</gs:field>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="C" name="Resolution">90</gs:field>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="D" name="Complete"/>
<gs:field xmlns:gs="http://schemas.google.com/spreadsheets/2006" index="E" name="MyCo">2009-05-31</gs:field>
</entry>
<entry>
<id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id>
<updated>2009-06-15T18:40:32.922Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#attachment"/>
<title type="text">testFile.ods</title>
<link rel="alternate" type="application/vnd.oasis.opendocument.spreadsheet" href="http://sites.google.com/feeds/SOMELONGURL"/>
<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<gd:deleted/>
<sites:pageName>something else</sites:pageName>
</entry>
<entry>
<id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id>
<updated>2009-06-15T18:40:22.407Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#comment"/>
<title type="text"/>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">first comment</div>
</content>
<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<thr:in-reply-to xmlns:thr="http://purl.org/syndication/thread/1.0" href="http://sites.google.com/site/gdatatestsite/annoucment/testpost" ref="http://sites.google.com/feeds/content/site/gdatatestsite/abc123" source="http://sites.google.com/feeds/content/site/gdatatestsite" type="text/html"/>
</entry>
<entry>
<id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id>
<updated>2009-06-15T18:40:16.388Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#announcement"/>
<title type="text">TestPost</title>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<jot:section xmlns:jot="http://www.google.com/ns/jotspot/srvtmpl/" target="content-1">
<div dir="ltr">content goes here</div>
</jot:section>
</div>
</content>
<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
</entry>
<entry>
<id>http://sites.google.com/feeds/content/site/gdatatestsite/abc123</id>
<updated>2009-06-12T23:37:59.417Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#webpage"/>
<title type="text">Home</title>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<jot:section xmlns:jot="http://www.google.com/ns/jotspot/srvtmpl/" target="content-1">
<div dir="ltr">Some Content goes here<div>
<br/>
</div>
<div>
<jot:embed height="300" id="4981865780428052" props="align:left;width:250;maxDepth:6" src="http://www.google.com/chart?SOMELONGURL" style="display: block; text-align: left; " type="toc" width="250"/>
<br/>
</div>
</div>
</jot:section>
</div>
</content>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
</entry>
<entry>
<id>http://sites.google.com/feeds/content/site/gdatatestsite/2639323850129333500</id>
<updated>2009-06-12T23:32:09.191Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#announcementspage"/>
<title type="text">annoucment</title>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
</div>
</content>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="edit" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http:///sites.google.com/feeds/content/site/gdatatestsite/abc123"/>
<author>
<name>Test User</name>
<email>[email protected]</email>
</author>
<gd:feedLink xmlns:gd="http://schemas.google.com/g/2005" href="http://sites.google.com/feeds/content/site/gdatatestsite?parent=abc123"/>
</entry>
</feed>'''
SITES_ACTIVITY_FEED = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/">
<id>http://sites.google.com/feeds/activity/site/siteName</id>
<updated>2009-08-19T05:46:01.503Z</updated>
<title>Activity</title>
<link rel="alternate" type="text/html" href="http://sites.google.com/a/site/siteName/system/app/pages/recentChanges"/>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://sites.google.com/feeds/activity/site/siteName"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/activity/site/siteName"/>
<generator version="1" uri="http://sites.google.com">Google Sites</generator>
<openSearch:startIndex>1</openSearch:startIndex>
<entry xmlns:gd="http://schemas.google.com/g/2005" gd:etag="W/"DUENSH0zfyl7ImA9WxNTFEs."">
<id>http://sites.google.com/feeds/activity/site/siteName/197441951793148343</id>
<updated>2009-08-17T00:08:19.387Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#deletion" label="deletion"/>
<title>NewWebpage3</title>
<summary type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">User deleted <a href="http://sites.google.com/site/siteName/newwebpage">NewWebpage3</a>
</div>
</summary>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http://sites.google.com/feeds/revision/site/siteName/6397361387376148502"/>
<link rel="http://schemas.google.com/sites/2008#current" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/siteName/6397361387376148502"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/activity/site/siteName/197441951793148343"/>
<author>
<name>User</name>
<email>[email protected]</email>
</author>
</entry>
<entry xmlns:gd="http://schemas.google.com/g/2005" gd:etag="W/"DUEMQnk6eSl7ImA9WxNTFEs."">
<id>http://sites.google.com/feeds/activity/site/siteName/7299542210274956360</id>
<updated>2009-08-17T00:08:03.711Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#edit" label="edit"/>
<title>NewWebpage3</title>
<summary type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">User edited <a href="http://sites.google.com/site/siteName/newwebpage">NewWebpage3</a>
</div>
</summary>
<link rel="http://schemas.google.com/sites/2008#revision" type="application/atom+xml" href="http://sites.google.com/feeds/revision/site/siteName/6397361387376148502"/>
<link rel="http://schemas.google.com/sites/2008#current" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/siteName/6397361387376148502"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/activity/site/siteName/7299542210274956360"/>
<author>
<name>User</name>
<email>[email protected]</email>
</author>
</entry>
</feed>'''
SITES_REVISION_FEED = '''
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/" xmlns:sites="http://schemas.google.com/sites/2008" xmlns:gs="http://schemas.google.com/spreadsheets/2006" xmlns:dc="http://purl.org/dc/terms" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005" xmlns:thr="http://purl.org/syndication/thread/1.0">
<id>http://sites.google.com/feeds/revision/site/siteName/2947510322163358574</id>
<updated>2009-08-19T06:20:18.151Z</updated>
<title>Revisions</title>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="http://sites.google.com/feeds/revision/2947510322163358574"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/revision/site/siteName/2947510322163358574"/>
<generator version="1" uri="http://sites.google.com">Google Sites</generator>
<openSearch:startIndex>1</openSearch:startIndex>
<entry gd:etag="W/"DEQNRXY-fil7ImA9WxNTFkg."">
<id>http://sites.google.com/feeds/revision/site/siteName/2947510322163358574/1</id>
<updated>2009-08-19T04:33:14.856Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/sites/2008#comment" label="comment"/>
<title/>
<content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
<table cellspacing="0" class="sites-layout-name-one-column sites-layout-hbox">
<tbody>
<tr>
<td class="sites-layout-tile sites-tile-name-content-1">testcomment</td>
</tr>
</tbody>
</table>
</div>
</content>
<link rel="http://schemas.google.com/sites/2008#parent" type="application/atom+xml" href="http://sites.google.com/feeds/content/site/siteName/54395424125706119"/>
<link rel="alternate" type="text" href="http://sites.google.com/site/system/app/pages/admin/compare?wuid=wuid%3Agx%3A28e7a9057c581b6e&rev1=1"/>
<link rel="self" type="application/atom+xml" href="http://sites.google.com/feeds/revision/site/siteName/2947510322163358574/1"/>
<author>
<name>User</name>
<email>[email protected]</email>
</author>
<thr:in-reply-to href="http://sites.google.com/site/siteName/code/js" ref="http://sites.google.com/feeds/content/site/siteName/54395424125706119" source="http://sites.google.com/feeds/content/google.com/siteName" type="text/html;charset=UTF-8"/>
<sites:revision>1</sites:revision>
</entry>
</feed>'''
SITES_SITE_FEED = '''
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/" xmlns:gAcl="http://schemas.google.com/acl/2007" xmlns:sites="http://schemas.google.com/sites/2008" xmlns:gs="http://schemas.google.com/spreadsheets/2006" xmlns:dc="http://purl.org/dc/terms" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005" xmlns:thr="http://purl.org/syndication/thread/1.0">
<id>https://sites.google.com/feeds/site/example.com</id>
<updated>2009-12-09T01:05:54.631Z</updated>
<title>Site</title>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="https://sites.google.com/feeds/site/example.com"/>
<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="https://sites.google.com/feeds/site/example.com"/>
<link rel="self" type="application/atom+xml" href="https://sites.google.com/feeds/site/example.com"/>
<generator version="1" uri="http://sites.google.com">Google Sites</generator>
<openSearch:startIndex>1</openSearch:startIndex>
<entry gd:etag="W/"DkIHQH4_eCl7I2A9WxNaF0Q."">
<id>https://sites.google.com/feeds/site/example.com/new-test-site</id>
<updated>2009-12-02T22:55:31.040Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-12-02T22:55:31.040Z</app:edited>
<title>New Test Site</title>
<summary>A new site to hold memories</summary>
<link rel="alternate" type="text/html" href="http://sites.google.com/a/example.com/new-test-site/"/>
<link rel="http://schemas.google.com/sites/2008#source" type="application/atom+xml" href="http://sites.google.com/feeds/site/example.com/source-site"/>
<link rel="http://schemas.google.com/acl/2007#accessControlList" type="application/atom+xml" href="http://sites.google.com/feeds/acl/site/example.com/new-test-site"/>
<link rel="self" type="application/atom+xml" href="https://sites.google.com/feeds/site/example.com/new-test-site"/>
<link rel="edit" type="application/atom+xml" href="https://sites.google.com/feeds/site/example.com/new-test-site"/>
<sites:siteName>new-test-site</sites:siteName>
<sites:theme>iceberg</sites:theme>
</entry>
<entry gd:etag="W/"CE8MQH48fyl7I2A9WxNaGUo."">
<id>https://sites.google.com/feeds/site/example.com/newautosite2</id>
<updated>2009-12-05T00:28:01.077Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-12-05T00:28:01.077Z</app:edited>
<title>newAutoSite3</title>
<summary>A new site to hold memories2</summary>
<link rel="alternate" type="text/html" href="http://sites.google.com/a/example.com/newautosite2/"/>
<link rel="http://schemas.google.com/acl/2007#accessControlList" type="application/atom+xml" href="http://sites.google.com/feeds/acl/site/examp.e.com/newautosite2"/>
<link rel="self" type="application/atom+xml" href="https://sites.google.com/feeds/site/example.com/newautosite2"/>
<link rel="edit" type="application/atom+xml" href="https://sites.google.com/feeds/site/example.com/newautosite2"/>
<sites:siteName>newautosite2</sites:siteName>
<sites:theme>default</sites:theme>
</entry>
</feed>'''
SITES_ACL_FEED = '''
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearch/1.1/" xmlns:gAcl="http://schemas.google.com/acl/2007" xmlns:sites="http://schemas.google.com/sites/2008" xmlns:gs="http://schemas.google.com/spreadsheets/2006" xmlns:dc="http://purl.org/dc/terms" xmlns:batch="http://schemas.google.com/gdata/batch" xmlns:gd="http://schemas.google.com/g/2005" xmlns:thr="http://purl.org/syndication/thread/1.0">
<id>https://sites.google.comsites.google.com/feeds/acl/site/example.com/new-test-site</id>
<updated>2009-12-09T01:24:59.080Z</updated>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/acl/2007#accessRule"/>
<title>Acl</title>
<link rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" href="https://sites.google.com/feeds/acl/site/example.com/new-test-site"/>
<link rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" href="https://sites.google.com/feeds/acl/site/example.com/new-test-site"/>
<link rel="self" type="application/atom+xml" href="https://sites.google.com/feeds/acl/site/example.com/new-test-site"/>
<generator version="1" uri="http://sites.google.com">Google Sites</generator>
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>https://sites.google.com/feeds/acl/site/google.com/new-test-site/user%3Auser%40example.com</id>
<updated>2009-12-09T01:24:59.080Z</updated>
<app:edited xmlns:app="http://www.w3.org/2007/app">2009-12-09T01:24:59.080Z</app:edited>
<category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/acl/2007#accessRule"/>
<link rel="self" type="application/atom+xml" href="https://sites.google.com/feeds/acl/site/example.com/new-test-site/user%3Auser%40example.com"/>
<link rel="edit" type="application/atom+xml" href="https://sites.google.com/feeds/acl/site/example.com/new-test-site/user%3Auser%40example.com"/>
<gAcl:scope type="user" value="[email protected]"/>
<gAcl:role value="owner"/>
</entry>
</feed>'''
ANALYTICS_ACCOUNT_FEED_old = '''
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:dxp='http://schemas.google.com/analytics/2009'>
<id>http://www.google.com/analytics/feeds/accounts/[email protected]</id>
<updated>2009-06-25T03:55:22.000-07:00</updated>
<title type='text'>Profile list for [email protected]</title>
<link rel='self' type='application/atom+xml' href='http://www.google.com/analytics/feeds/accounts/default'/>
<author>
<name>Google Analytics</name>
</author>
<generator version='1.0'>Google Analytics</generator>
<openSearch:totalResults>12</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>12</openSearch:itemsPerPage>
<entry>
<id>http://www.google.com/analytics/feeds/accounts/ga:1174</id>
<updated>2009-06-25T03:55:22.000-07:00</updated>
<title type='text'>www.googlestore.com</title>
<link rel='alternate' type='text/html' href='http://www.google.com/analytics'/>
<dxp:tableId>ga:1174</dxp:tableId>
<dxp:property name='ga:accountId' value='30481'/>
<dxp:property name='ga:accountName' value='Google Store'/>
<dxp:property name='ga:profileId' value='1174'/>
<dxp:property name='ga:webPropertyId' value='UA-30481-1'/>
<dxp:property name='ga:currency' value='USD'/>
<dxp:property name='ga:timezone' value='America/Los_Angeles'/>
</entry>
</feed>'''
ANALYTICS_ACCOUNT_FEED = '''
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:dxp='http://schemas.google.com/analytics/2009' xmlns:ga='http://schemas.google.com/ga/2009' xmlns:openSearch='http://a9.com/-/spec/opensearch/1.1/' xmlns:gd='http://schemas.google\
.com/g/2005' gd:etag='W/"DE8CRH47eCp7I2A9WxNWFU4."' gd:kind='analytics#accounts'>
<id>http://www.google.com/analytics/feeds/accounts/[email protected]</id>
<updated>2009-10-14T09:14:25.000-07:00</updated>
<title>Profile list for [email protected]</title>
<link rel='self' type='application/atom+xml' href='http://www.google.com/analytics/feeds/accounts/default?v=2'/>
<author>
<name>Google Analytics</name>
</author>
<generator version='1.0'>Google Analytics</generator>
<openSearch:totalResults>37</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>37</openSearch:itemsPerPage>
<dxp:segment id='gaid::-11' name='Visits from iPhones'>
<dxp:definition>ga:operatingSystem==iPhone</dxp:definition>
</dxp:segment>
<entry gd:etag='W/"DE8CRH47eCp7I2A9WxNWFU4."' gd:kind='analytics#account'>
<id>http://www.google.com/analytics/feeds/accounts/ga:1174</id>
<updated>2009-10-14T09:14:25.000-07:00</updated>
<title>www.googlestore.com</title>
<link rel='alternate' type='text/html' href='http://www.google.com/analytics'/>
<ga:goal active='true' name='Completing Order' number='1' value='10.0'>
<ga:destination caseSensitive='false' expression='/purchaseComplete.html' matchType='regex' step1Required='false'>
<ga:step name='View Product Categories' number='1' path='/Apps|Accessories|Fun|Kid\+s|Office'/>
<ga:step name='View Product' number='2' path='/Apps|Accessories|Fun|Kid\+s|Office|Wearables'/>
</ga:destination>
</ga:goal>
<ga:goal active='true' name='Browsed my site over 5 minutes' number='6' value='0.0'>
<ga:engagement comparison='>' thresholdValue='300' type='timeOnSite'/>
</ga:goal>
<ga:goal active='true' name='Visited > 4 pages' number='7' value='0.25'>
<ga:engagement comparison='>' thresholdValue='4' type='pagesVisited'/>
</ga:goal>
<ga:customVariable index='1' name='My Custom Variable' scope='3'/>
<ga:customVariable index='2' name='My Seconds Variable' scope='1'/>
<dxp:property name='ga:accountId' value='30481'/>
<dxp:property name='ga:accountName' value='Google Store'/>
<dxp:property name='ga:profileId' value='1174'/>
<dxp:property name='ga:webPropertyId' value='UA-30481-1'/>
<dxp:property name='ga:currency' value='USD'/>
<dxp:property name='ga:timezone' value='America/Los_Angeles'/>
<dxp:tableId>ga:1174</dxp:tableId>
</entry>
</feed>'''
ANALYTICS_DATA_FEED = '''
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:dxp='http://schemas.google.com/analytics/2009'>
<id>http://www.google.com/analytics/feeds/data?ids=ga:1174&dimensions=ga:medium,ga:source&metrics=ga:bounces,ga:visits&filters=ga:medium%3D%3Dreferral&start-date=2008-10-01&end-date=2008-10-31</id>
<updated>2008-10-31T16:59:59.999-07:00</updated>
<title type='text'>Google Analytics Data for Profile 1174</title>
<link rel='self' type='application/atom+xml' href='http://www.google.com/analytics/feeds/data?max-results=5&sort=-ga%3Avisits&end-date=2008-10-31&start-date=2008-10-01&metrics=ga%3Avisits%2Cga%3Abounces&ids=ga%3A1174&dimensions=ga%3Asource%2Cga%3Amedium&filters=ga%3Amedium%3D%3Dreferral'/>
<link rel='next' type='application/atom+xml' href='http://www.google.com/analytics/feeds/data?start-index=6&max-results=5&sort=-ga%3Avisits&end-date=2008-10-31&start-date=2008-10-01&metrics=ga%3Avisits%2Cga%3Abounces&ids=ga%3A1174&dimensions=ga%3Asource%2Cga%3Amedium&filters=ga%3Amedium%3D%3Dreferral'/>
<author>
<name>Google Analytics</name>
</author>
<generator version='1.0'>Google Analytics</generator>
<openSearch:totalResults>6451</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>2</openSearch:itemsPerPage>
<dxp:startDate>2008-10-01</dxp:startDate>
<dxp:endDate>2008-10-31</dxp:endDate>
<dxp:segment id='gaid::-11' name='Visits from iPhones'>
<dxp:definition>ga:operatingSystem==iPhone</dxp:definition>
</dxp:segment>
<dxp:aggregates>
<dxp:metric confidenceInterval='0.0' name='ga:visits' type='integer' value='136540'/>
<dxp:metric confidenceInterval='0.0' name='ga:bounces' type='integer' value='101535'/>
</dxp:aggregates>
<dxp:containsSampledData>true</dxp:containsSampledData>
<dxp:dataSource>
<dxp:tableId>ga:1174</dxp:tableId>
<dxp:tableName>www.googlestore.com</dxp:tableName>
<dxp:property name='ga:profileId' value='1174'/>
<dxp:property name='ga:webPropertyId' value='UA-30481-1'/>
<dxp:property name='ga:accountName' value='Google Store'/>
</dxp:dataSource>
<entry>
<id>http://www.google.com/analytics/feeds/data?ids=ga:1174&ga:medium=referral&ga:source=blogger.com&filters=ga:medium%3D%3Dreferral&start-date=2008-10-01&end-date=2008-10-31</id>
<updated>2008-10-30T17:00:00.001-07:00</updated>
<title type='text'>ga:source=blogger.com | ga:medium=referral</title>
<link rel='alternate' type='text/html' href='http://www.google.com/analytics'/>
<dxp:dimension name='ga:source' value='blogger.com'/>
<dxp:dimension name='ga:medium' value='referral'/>
<dxp:metric confidenceInterval='0.0' name='ga:visits' type='integer' value='68140'/>
<dxp:metric confidenceInterval='0.0' name='ga:bounces' type='integer' value='61095'/>
</entry>
</feed>'''
ANALYTICS_MGMT_PROFILE_FEED = '''
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:dxp='http://schemas.google.com/analytics/2009' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gd='http://schemas.google.com/g/2005' gd:kind='analytics#profiles'>
<id>https://www.google.com/analytics/feeds/datasources/ga/accounts/~all/webproperties/~all/profiles</id>
<updated>2010-06-14T22:18:48.676Z</updated>
<title type='text'>Google Analytics Profiles for [email protected]</title>
<link rel='self' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/~all/webproperties/~all/profiles'/>
<author>
<name>Google Analytics</name>
</author>
<generator version='1.0'>Google Analytics</generator>
<openSearch:totalResults>1</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>1000</openSearch:itemsPerPage>
<entry gd:etag='W/"CkQAQ3Y-fSp7I2A9WxFXGEU."' gd:kind='analytics#profile'>
<id>https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174</id>
<updated>2010-06-09T05:58:15.436-07:00</updated>
<title type='text'>Google Analytics Profile www.googlestore.com</title>
<link rel='self' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174'/>
<link rel='http://schemas.google.com/ga/2009#parent' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1' gd:targetKind='analytics#webproperty'/>
<link rel='http://schemas.google.com/ga/2009#child' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174/goals' gd:targetKind='analytics#goals'/>
<dxp:property name='ga:accountId' value='30481'/>
<dxp:property name='ga:webPropertyId' value='UA-30481-1'/>
<dxp:property name='ga:profileName' value='www.googlestore.com'/>
<dxp:property name='ga:profileId' value='1174'/>
<dxp:property name='dxp:tableId' value='ga:1174'/>
<dxp:property name='ga:currency' value='USD'/>
<dxp:property name='ga:timezone' value='America/Los_Angeles'/>
</entry>
</feed>
'''
ANALYTICS_MGMT_GOAL_FEED = '''
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:dxp='http://schemas.google.com/analytics/2009' xmlns:ga='http://schemas.google.com/ga/2009' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gd='http://schemas.google.com/g/2005' gd:kind='analytics#goals'>
<id>https://www.google.com/analytics/feeds/datasources/ga/accounts/~all/webproperties/~all/profiles/~all/goals</id>
<updated>2010-06-14T22:21:18.485Z</updated>
<title type='text'>Google Analytics Goals for [email protected]</title>
<link rel='self' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/~all/webproperties/~all/profiles/~all/goals'/>
<author>
<name>Google Analytics</name>
</author>
<generator version='1.0'>Google Analytics</generator>
<openSearch:totalResults>3</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>1000</openSearch:itemsPerPage>
<entry gd:etag='W/"DUYCQn08fip7I2A9WxBWFUo."' gd:kind='analytics#goal'>
<id>https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174/goals/1</id>
<updated>2010-02-07T13:12:43.377-08:00</updated>
<title type='text'>Google Analytics Goal 1</title>
<link rel='self' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174/goals/1'/>
<link rel='http://schemas.google.com/ga/2009#parent' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174' gd:targetKind='analytics#profile'/>
<ga:goal active='true' name='Completing Order' number='1' value='10.0'>
<ga:destination caseSensitive='false' expression='/purchaseComplete.html' matchType='regex' step1Required='false'>
<ga:step name='View Product Categories' number='1' path='/Apps|Accessories'/>
</ga:destination>
</ga:goal>
<dxp:property name='ga:profileId' value='1174'/>
</entry>
<entry gd:etag='W/"DUYCQn08fip7I2A9WxBWFUo."' gd:kind='analytics#goal'>
<id>https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174/goals/2</id>
<updated>2010-02-07T13:12:43.376-08:00</updated>
<title type='text'>Google Analytics Goal 2</title>
<link rel='self' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174/goals/2'/>
<link rel='http://schemas.google.com/ga/2009#parent' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/accounts/30481/webproperties/UA-30481-1/profiles/1174' gd:targetKind='analytics#profile'/>
<ga:goal active='true' name='Browsed my site over 5 minutes' number='2' value='0.0'>
<ga:engagement comparison='>' thresholdValue='300' type='timeOnSite'/>
</ga:goal>
<dxp:property name='ga:profileId' value='1174'/>
</entry>
</feed>
'''
ANALYTICS_MGMT_ADV_SEGMENT_FEED = '''
<feed xmlns='http://www.w3.org/2005/Atom' xmlns:dxp='http://schemas.google.com/analytics/2009' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:gd='http://schemas.google.com/g/2005' gd:kind='analytics#segments'>
<id>https://www.google.com/analytics/feeds/datasources/ga/segments</id>
<updated>2010-06-14T22:22:02.728Z</updated>
<title type='text'>Google Analytics Advanced Segments for [email protected]</title>
<link rel='self' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/segments'/>
<author>
<name>Google Analytics</name>
</author>
<generator version='1.0'>Google Analytics</generator>
<openSearch:totalResults>2</openSearch:totalResults>
<openSearch:startIndex>1</openSearch:startIndex>
<openSearch:itemsPerPage>1000</openSearch:itemsPerPage>
<entry gd:etag='W/"YDwqeyM."' gd:kind='analytics#segment'>
<id>https://www.google.com/analytics/feeds/datasources/ga/segments/gaid::0</id>
<updated>2009-10-26T13:00:44.915-07:00</updated>
<title type='text'>Google Analytics Advanced Segment Sources Form Google</title>
<link rel='self' type='application/atom+xml' href='https://www.google.com/analytics/feeds/datasources/ga/segments/gaid::0'/>
<dxp:segment id='gaid::0' name='Sources Form Google'>
<dxp:definition>ga:source=~^\Qgoogle\E</dxp:definition>
</dxp:segment>
</entry>
</feed>
'''
MULTIDOMAIN_USER_ENTRY = """<?xml version="1.0"?>
<atom:entry xmlns:atom='http://www.w3.org/2005/Atom' xmlns:apps='http://schemas.google.com/apps/2006'>
<apps:property name="password" value="51eea05d46317fadd5cad6787a8f562be90b4446"/>
<apps:property name="hashFunction" value="SHA-1"/>
<apps:property name="userEmail" value="[email protected]"/>
<apps:property name="firstName" value="Liz"/>
<apps:property name="lastName" value="Smith"/>
<apps:property name="isAdmin" value="true"/>
</atom:entry>"""
MULTIDOMAIN_USER_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensesearchrss/1.0/"
xmlns:apps="http://schemas.google.com/apps/2006">
<id>https://apps-apis.google.com/a/feeds/user/2.0/example.com</id>
<updated>2010-01-26T23:38:13.215Z</updated>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/user/2.0/example.com" />
<link rel="http://schemas.google.com/g/2005#post"
type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/user/2.0/example.com" />
<link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/user/2.0/example.com?start=admin%40example.com" />
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>https://apps-apis.google.com/a/feeds/user/2.0/example.com/admin%40example.com</id>
<updated>2010-01-26T23:38:13.210Z</updated>
<link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/user/2.0/example.com/admin%40example.com" />
<link rel="edit" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/user/2.0/example.com/admin%40example.com" />
<apps:property name="lastName" value="Brown" />
<apps:property name="isChangePasswordAtNextLogin" value="false" />
<apps:property name="isSuspended" value="false" />
<apps:property name="userEmail" value="[email protected]" />
<apps:property name="isAdmin" value="true" />
<apps:property name="firstName" value="Joe" />
<apps:property name="ipWhitelisted" value="false" />
</entry>
<entry>
<id>https://apps-apis.google.com/a/feeds/user/2.0/example.com/liz%40example.com</id>
<updated>2010-01-26T23:38:13.210Z</updated>
<link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/user/2.0/example.com/liz%40example.com" />
<link rel="edit" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/user/2.0/example.com/liz%40example.com" />
<apps:property name="lastName" value="Smith" />
<apps:property name="isChangePasswordAtNextLogin" value="false" />
<apps:property name="isSuspended" value="false" />
<apps:property name="userEmail" value="[email protected]" />
<apps:property name="isAdmin" value="true" />
<apps:property name="firstName" value="Elizabeth" />
<apps:property name="ipWhitelisted" value="false" />
</entry>
</feed>"""
MULTIDOMAIN_USER_RENAME_REQUEST = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom' xmlns:apps='http://schemas.google.com/apps/2006'>
<apps:property name='newEmail' value='[email protected]'/>
</entry>"""
MULTIDOMAIN_ALIAS_ENTRY = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom' xmlns:apps='http://schemas.google.com/apps/2006'>
<id>https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/helpdesk%40gethelp_example.com</id>
<updated>2008-10-17T15:02:45.646Z</updated>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/helpdesk%40gethelp_example.com'/>
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/helpdesk%40gethelp_example.com'/>
<apps:property name="userEmail" value="[email protected]" />
<apps:property name="aliasEmail" value="helpdesk@gethelp_example.com" />
</entry>"""
MULTIDOMAIN_ALIAS_FEED = """<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/" xmlns:apps="http://schemas.google.com/apps/2006">
<id>https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com</id>
<updated>2010-01-26T23:38:13.215Z</updated>
<link rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com" />
<link rel="http://schemas.google.com/g/2005#post"
type="application/atom+xml" href="https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com" />
<link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com?start=helpdesk%40gethelp_example.com" />
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/helpdesk%40gethelp_example.com</id>
<updated>2010-01-26T23:38:13.210Z</updated>
<link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/helpdesk%40gethelp_example.com" />
<link rel="edit" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/helpdesk%40gethelp_example.com" />
<apps:property name="userEmail" value="[email protected]" />
<apps:property name="aliasEmail" value="helpdesk@gethelp_example.com" />
</entry>
<entry>
<id>https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/support%40gethelp_example.com</id>
<updated>2010-01-26T23:38:13.210Z</updated>
<link rel="self" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/support%40gethelp_example.com" />
<link rel="edit" type="application/atom+xml"
href="https://apps-apis.google.com/a/feeds/alias/2.0/gethelp_example.com/support%40gethelp_example.com" />
<apps:property name="userEmail" value="[email protected]" />
<apps:property name="aliasEmail" value="support@gethelp_example.com" />
</entry>
</feed>"""
USER_ENTRY1 = """<?xml version='1.0' encoding='UTF-8'?>
<entry xmlns='http://www.w3.org/2005/Atom' xmlns:apps='http://schemas.google.com/apps/2006' xmlns:gd='http://schemas.google.com/g/2005'>
<id>http://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/abcd12310</id>
<updated>1970-01-01T00:00:00.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind' term='http://schemas.google.com/apps/2006#user'/>
<title type='text'>abcd12310</title>
<link rel='self' type='application/atom+xml' href='http://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/abcd12310'/>
<link rel='edit' type='application/atom+xml' href='http://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/abcd12310'/>
<apps:login userName='abcd12310' suspended='false' ipWhitelisted='false' admin='false' changePasswordAtNextLogin='false' agreedToTerms='false'/><apps:quota limit='25600'/>
<apps:name familyName='efgh3' givenName='abcd33'/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.nicknames' href='http://apps-apis.google.com/a/feeds/srkapps.com/nickname/2.0?username=abcd12310'/>
<gd:feedLink rel='http://schemas.google.com/apps/2006#user.emailLists' href='http://apps-apis.google.com/a/feeds/srkapps.com/emailList/2.0?recipient=abcd12310%40srkapps.com'/>
</entry>"""
USER_FEED1 = """<?xml version='1.0' encoding='utf-8'?>
<ns0:feed xmlns:ns0="http://www.w3.org/2005/Atom">
<ns0:category scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/apps/2006#user" />
<ns0:id>
https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0</ns0:id>
<ns1:startIndex xmlns:ns1="http://a9.com/-/spec/opensearchrss/1.0/">
1</ns1:startIndex>
<ns0:title type="text">Users</ns0:title>
<ns0:link href="https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0"
rel="http://schemas.google.com/g/2005#feed"
type="application/atom+xml" />
<ns0:link href="https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0"
rel="http://schemas.google.com/g/2005#post"
type="application/atom+xml" />
<ns0:link href="https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0?startUsername=user8306"
rel="self" type="application/atom+xml" />
<ns0:updated>1970-01-01T00:00:00.000Z</ns0:updated>
<ns0:entry>
<ns1:name familyName="LastName8306"
givenName="FirstName8306"
xmlns:ns1="http://schemas.google.com/apps/2006" />
<ns0:category scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/apps/2006#user" />
<ns0:id>
https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/user8306</ns0:id>
<ns0:updated>1970-01-01T00:00:00.000Z</ns0:updated>
<ns1:quota limit="25600"
xmlns:ns1="http://schemas.google.com/apps/2006" />
<ns1:feedLink href="https://apps-apis.google.com/a/feeds/srkapps.com/nickname/2.0?username=user8306"
rel="http://schemas.google.com/apps/2006#user.nicknames"
xmlns:ns1="http://schemas.google.com/g/2005" />
<ns1:feedLink href="https://apps-apis.google.com/a/feeds/srkapps.com/emailList/2.0?recipient=user8306%40srkapps.com"
rel="http://schemas.google.com/apps/2006#user.emailLists"
xmlns:ns1="http://schemas.google.com/g/2005" />
<ns0:title type="text">user8306</ns0:title>
<ns0:link href="https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/user8306"
rel="self" type="application/atom+xml" />
<ns0:link href="https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/user8306"
rel="edit" type="application/atom+xml" />
<ns1:login admin="false" agreedToTerms="false"
changePasswordAtNextLogin="false" ipWhitelisted="false"
suspended="false" userName="user8306"
xmlns:ns1="http://schemas.google.com/apps/2006" />
</ns0:entry>
<ns0:entry>
<ns1:name familyName="LastName8307"
givenName="FirstName8307"
xmlns:ns1="http://schemas.google.com/apps/2006" />
<ns0:category scheme="http://schemas.google.com/g/2005#kind"
term="http://schemas.google.com/apps/2006#user" />
<ns0:id>
https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/user8307</ns0:id>
<ns0:updated>1970-01-01T00:00:00.000Z</ns0:updated>
<ns1:quota limit="25600"
xmlns:ns1="http://schemas.google.com/apps/2006" />
<ns1:feedLink href="https://apps-apis.google.com/a/feeds/srkapps.com/nickname/2.0?username=user8307"
rel="http://schemas.google.com/apps/2006#user.nicknames"
xmlns:ns1="http://schemas.google.com/g/2005" />
<ns1:feedLink href="https://apps-apis.google.com/a/feeds/srkapps.com/emailList/2.0?recipient=user8307%40srkapps.com"
rel="http://schemas.google.com/apps/2006#user.emailLists"
xmlns:ns1="http://schemas.google.com/g/2005" />
<ns0:title type="text">user8307</ns0:title>
<ns0:link href="https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/user8307"
rel="self" type="application/atom+xml" />
<ns0:link href="https://apps-apis.google.com/a/feeds/srkapps.com/user/2.0/user8307"
rel="edit" type="application/atom+xml" />
<ns1:login admin="false" agreedToTerms="false"
changePasswordAtNextLogin="false" ipWhitelisted="false"
suspended="false" userName="user8307"
xmlns:ns1="http://schemas.google.com/apps/2006" />
</ns0:entry>
</ns0:feed>"""
NICKNAME_ENTRY = """<?xml version='1.0' encoding='utf-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/nehag</id>
<updated>1970-01-01T00:00:00.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname' />
<title type='text'>nehag</title>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/nehag' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/nehag' />
<apps:nickname name='nehag' />
<apps:login userName='neha' />
</entry>"""
NICKNAME_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0</id>
<updated>1970-01-01T00:00:00.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname' />
<title type='text'>Nicknames</title>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0' />
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0' />
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0' />
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>
https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/nehag</id>
<updated>1970-01-01T00:00:00.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname' />
<title type='text'>nehag</title>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/nehag' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/nehag' />
<apps:nickname name='nehag' />
<apps:login userName='neha' />
</entry>
<entry>
<id>
https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/richag</id>
<updated>1970-01-01T00:00:00.000Z</updated>
<category scheme='http://schemas.google.com/g/2005#kind'
term='http://schemas.google.com/apps/2006#nickname' />
<title type='text'>richag</title>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/richag' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/srkapps.net/nickname/2.0/richag' />
<apps:nickname name='richag' />
<apps:login userName='richa' />
</entry>
</feed>"""
GROUP_ENTRY = """<?xml version='1.0' encoding='utf-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial%40srkapps.com</id>
<updated>2011-11-10T16:54:56.784Z</updated>
<link rel='self' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial%40srkapps.com' />
<link rel='edit' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial%40srkapps.com' />
<apps:property name='groupId' value='[email protected]' />
<apps:property name='groupName' value='Trial' />
<apps:property name='emailPermission' value='Domain' />
<apps:property name='permissionPreset' value='Custom' />
<apps:property name='description' value='For try' />
</entry>"""
GROUP_FEED= """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com</id>
<updated>2011-11-10T16:56:03.830Z</updated>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com' />
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com' />
<link rel='self' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com' />
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>
http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/firstgroup%40srkapps.com</id>
<updated>2011-11-10T16:56:03.830Z</updated>
<link rel='self' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/firstgroup%40srkapps.com' />
<link rel='edit' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/firstgroup%40srkapps.com' />
<apps:property name='groupId' value='[email protected]' />
<apps:property name='groupName' value='FirstGroup' />
<apps:property name='emailPermission' value='Domain' />
<apps:property name='permissionPreset' value='Custom' />
<apps:property name='description' value='First group' />
</entry>
<entry>
<id>
http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial%40srkapps.com</id>
<updated>2011-11-10T16:56:03.830Z</updated>
<link rel='self' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial%40srkapps.com' />
<link rel='edit' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial%40srkapps.com' />
<apps:property name='groupId' value='[email protected]' />
<apps:property name='groupName' value='Trial' />
<apps:property name='emailPermission' value='Domain' />
<apps:property name='permissionPreset' value='Custom' />
<apps:property name='description' value='For try' />
</entry>
</feed>"""
GROUP_MEMBER_ENTRY = """<?xml version='1.0' encoding='utf-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/abcd12310%40srkapps.com</id>
<updated>2011-11-10T16:58:40.804Z</updated>
<link rel='self' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/abcd12310%40srkapps.com' />
<link rel='edit' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/abcd12310%40srkapps.com' />
<apps:property name='memberType' value='User' />
<apps:property name='memberId' value='[email protected]' />
<apps:property name='directMember' value='true' />
</entry>"""
GROUP_MEMBER_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member</id>
<updated>2011-11-10T16:57:15.574Z</updated>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member' />
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member' />
<link rel='self' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member' />
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>
http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/abcd12310%40srkapps.com</id>
<updated>2011-11-10T16:57:15.574Z</updated>
<link rel='self' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/abcd12310%40srkapps.com' />
<link rel='edit' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/abcd12310%40srkapps.com' />
<apps:property name='memberType' value='User' />
<apps:property name='memberId' value='[email protected]' />
<apps:property name='directMember' value='true' />
</entry>
<entry>
<id>
http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/neha.technocrat%40srkapps.com</id>
<updated>2011-11-10T16:57:15.574Z</updated>
<link rel='self' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/neha.technocrat%40srkapps.com' />
<link rel='edit' type='application/atom+xml'
href='http://apps-apis.google.com/a/feeds/group/2.0/srkapps.com/trial/member/neha.technocrat%40srkapps.com' />
<apps:property name='memberType' value='User' />
<apps:property name='memberId' value='[email protected]' />
<apps:property name='directMember' value='true' />
</entry>
</feed>"""
ORGANIZATION_UNIT_CUSTOMER_ID_ENTRY = """<?xml version='1.0' encoding='utf-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
https://apps-apis.google.com/a/feeds/customer/2.0/C123A456B</id>
<updated>2011-11-21T13:17:02.274Z</updated>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/customer/2.0/C123A456B' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/customer/2.0/C123A456B' />
<apps:property name='customerOrgUnitDescription'
value='example.com' />
<apps:property name='customerId' value='C123A456B' />
<apps:property name='customerOrgUnitName' value='example.com' />
<apps:property name='description' value='tempdescription' />
<apps:property name='name' value='example.com' />
</entry>"""
ORGANIZATION_UNIT_ORGUNIT_ENTRY = """<?xml version='1.0' encoding='utf-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/Test+Organization</id>
<updated>2011-11-21T13:32:12.334Z</updated>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/Test+Organization' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/Test+Organization' />
<apps:property name='description' value='New Test Org' />
<apps:property name='parentOrgUnitPath' value='Test' />
<apps:property name='name' value='Test Organization' />
<apps:property name='orgUnitPath' value='Test/Test+Organization' />
<apps:property name='blockInheritance' value='false' />
</entry>"""
ORGANIZATION_UNIT_ORGUNIT_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B</id>
<updated>2011-11-21T13:47:12.551Z</updated>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B' />
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B' />
<link rel='http://schemas.google.com/g/2005#batch'
type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/batch' />
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B?get=all' />
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>
https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/testOrgUnit92</id>
<updated>2011-11-21T13:42:45.349Z</updated>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/testOrgUnit92' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/testOrgUnit92' />
<apps:property name='description' value='test92' />
<apps:property name='parentOrgUnitPath' value='Test' />
<apps:property name='name' value='testOrgUnit92' />
<apps:property name='orgUnitPath' value='Test/testOrgUnit92' />
<apps:property name='blockInheritance' value='false' />
</entry>
<entry>
<id>
https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/testOrgUnit93</id>
<updated>2011-11-21T13:42:45.349Z</updated>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/testOrgUnit93' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orgunit/2.0/C123A456B/testOrgUnit93' />
<apps:property name='description' value='test93' />
<apps:property name='parentOrgUnitPath' value='Test' />
<apps:property name='name' value='testOrgUnit93' />
<apps:property name='orgUnitPath' value='Test/testOrgUnit93' />
<apps:property name='blockInheritance' value='false' />
</entry>
</feed>"""
ORGANIZATION_UNIT_ORGUSER_ENTRY = """<?xml version='1.0' encoding='utf-8'?>
<entry xmlns='http://www.w3.org/2005/Atom'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>
https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/admin%40example.com</id>
<updated>2011-11-21T14:05:17.734Z</updated>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/admin%40example.com' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/admin%40example.com' />
<apps:property name='orgUserEmail' value='[email protected]' />
<apps:property name='orgUnitPath' value='Test' />
</entry>"""
ORGANIZATION_UNIT_ORGUSER_FEED = """<?xml version='1.0' encoding='utf-8'?>
<feed xmlns='http://www.w3.org/2005/Atom'
xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/'
xmlns:apps='http://schemas.google.com/apps/2006'>
<id>https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B</id>
<updated>2011-11-21T14:10:48.206Z</updated>
<link rel='http://schemas.google.com/g/2005#feed'
type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B' />
<link rel='http://schemas.google.com/g/2005#post'
type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B' />
<link rel='http://schemas.google.com/g/2005#batch'
type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/batch' />
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B?get=all' />
<openSearch:startIndex>1</openSearch:startIndex>
<entry>
<id>
https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/user720430%40example.com</id>
<updated>2011-11-21T14:09:16.600Z</updated>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/user720430%40example.com' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/user720430%40example.com' />
<apps:property name='orgUserEmail'
value='[email protected]' />
<apps:property name='orgUnitPath' value='Test' />
</entry>
<entry>
<id>
https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/user832648%40example.com</id>
<updated>2011-11-21T14:09:16.600Z</updated>
<link rel='self' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/user832648%40example.com' />
<link rel='edit' type='application/atom+xml'
href='https://apps-apis.google.com/a/feeds/orguser/2.0/C123A456B/user832648%40example.com' />
<apps:property name='orgUserEmail'
value='[email protected]' />
<apps:property name='orgUnitPath' value='Test' />
</entry>
</feed>"""
| apache-2.0 |
acsone/purchase-workflow | purchase_fiscal_position_update/purchase.py | 23 | 2878 | # -*- coding: utf-8 -*-
#############################################################################
#
# Purchase Fiscal Position Update module for Odoo
# Copyright (C) 2011-2014 Julius Network Solutions SARL <[email protected]>
# Copyright (C) 2014 Akretion (http://www.akretion.com)
# @author Mathieu Vatel <mathieu _at_ julius.fr>
# @author Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api, _
class purchase_order(models.Model):
_inherit = "purchase.order"
@api.onchange('fiscal_position')
def fiscal_position_change(self):
'''Function executed by the on_change on the fiscal_position field
of a purchase order ; it updates taxes on all order lines'''
res = {'value': {}}
lines_without_product = []
if self.order_line:
for line in self.order_line:
fp = self.fiscal_position
if line.product_id:
taxes = line.product_id.supplier_taxes_id
if fp:
taxes = fp.map_tax(taxes)
line.taxes_id = [(6, 0, taxes.ids)]
else:
lines_without_product.append(line.name)
if lines_without_product:
res['warning'] = {'title': _('Warning')}
if len(lines_without_product) == len(self.order_line):
res['warning']['message'] = _(
"The Purchase Order Lines were not updated to the new "
"Fiscal Position because they don't have Products.\n"
"You should update the Taxes of each "
"Purchase Order Line manually.")
else:
res['warning']['message'] = _(
"The following Purchase Order Lines were not updated "
"to the new Fiscal Position because they don't have a "
"Product:\n- %s\nYou should update the "
"Taxes of these Purchase Order Lines manually."
) % ('\n- '.join(lines_without_product))
return res
| agpl-3.0 |
bblay/iris | lib/iris/experimental/animate.py | 3 | 4670 | # (C) British Crown Copyright 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Wrapper for animating iris cubes using iris or matplotlib plotting functions
"""
import warnings
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import iris
def animate(cube_iterator, plot_func, fig=None, **kwargs):
"""
Animates the given cube iterator.
Args:
* cube_iterator (iterable of :class:`iris.cube.Cube` objects):
Each animation frame corresponds to each :class:`iris.cube.Cube`
object. See :meth:`iris.cube.Cube.slices`.
* plot_func (:mod:`~iris.plot` or :mod:`~iris.quickplot` plot):
Plotting function used to animate.
Kwargs:
* fig (:class:`matplotlib.figure.Figure` instance):
By default, the current figure will be used or a new figure instance
created if no figure is available. See :func:`matplotlib.pyplot.gcf`.
* coords (list of :class:`~iris.coords.Coord` objects or coordinate names):
Use the given coordinates as the axes for the plot. The order of the
given coordinates indicates which axis to use for each, where the first
element is the horizontal axis of the plot and the second element is
the vertical axis of the plot.
* interval (int, float or long):
Defines the time interval in milliseconds between successive frames.
A default interval of 100ms is set.
* vmin, vmax (int, float or long):
Color scaling values, see :class:`matplotlib.colors.Normalize` for
further details. Default values are determined by the min-max across
the data set over the entire sequence.
See :class:`matplotlib.animation.FuncAnimation` for details of other valid
keyword arguments.
Returns:
:class:`~matplotlib.animation.FuncAnimation` object suitable for
saving and or plotting.
For example, to animate along a set of cube slices::
cube_iter = cubes.slices(('grid_longitude', 'grid_latitude'))
ani = animate(cube_iter, qplt.contourf)
plt.show()
"""
kwargs.setdefault('interval', 100)
coords = kwargs.pop('coords', None)
if fig is None:
fig = plt.gcf()
def update_animation_iris(i, cubes, vmin, vmax, coords):
# Clearing the figure is currently necessary for compatibility with
# the iris quickploting module - due to the colorbar.
plt.gcf().clf()
plot_func(cubes[i], vmin=vmin, vmax=vmax, coords=coords)
# Turn cube iterator into a list to determine plot ranges.
# NOTE: we check that we are not providing a cube as this has a deprecated
# iter special method.
if (hasattr(cube_iterator, '__iter__') and not
isinstance(cube_iterator, iris.cube.Cube)):
cubes = iris.cube.CubeList(cube_iterator)
else:
msg = 'iterable type object required for animation, {} given'.format(
type(cube_iterator))
raise TypeError(msg)
supported = ['iris.plot', 'iris.quickplot']
if plot_func.__module__ not in supported:
msg = ('Given plotting module "{}" may not be supported, intended '
'use: {}.')
msg = msg.format(plot_func.__module__, supported)
warnings.warn(msg, UserWarning)
supported = ['contour', 'contourf', 'pcolor', 'pcolormesh']
if plot_func.__name__ not in supported:
msg = ('Given plotting function "{}" may not be supported, intended '
'use: {}.')
msg = msg.format(plot_func.__name__, supported)
warnings.warn(msg, UserWarning)
# Determine plot range.
vmin = kwargs.pop('vmin', min([cc.data.min() for cc in cubes]))
vmax = kwargs.pop('vmax', max([cc.data.max() for cc in cubes]))
update = update_animation_iris
frames = xrange(len(cubes))
return animation.FuncAnimation(fig, update,
frames=frames,
fargs=(cubes, vmin, vmax, coords),
**kwargs)
| gpl-3.0 |
amenonsen/ansible | lib/ansible/module_utils/network/frr/providers/cli/config/bgp/process.py | 38 | 5183 | #
# (c) 2019, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
import re
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.network.frr.providers.providers import register_provider
from ansible.module_utils.network.frr.providers.providers import CliProvider
from ansible.module_utils.network.frr.providers.cli.config.bgp.neighbors import Neighbors
from ansible.module_utils.network.frr.providers.cli.config.bgp.address_family import AddressFamily
REDISTRIBUTE_PROTOCOLS = frozenset(['ospf', 'ospf6', 'eigrp', 'isis', 'table',
'static', 'connected', 'sharp', 'nhrp', 'kernel', 'babel', 'rip'])
@register_provider('frr', 'frr_bgp')
class Provider(CliProvider):
def render(self, config=None):
commands = list()
existing_as = None
if config:
match = re.search(r'router bgp (\d+)', config, re.M)
if match:
existing_as = match.group(1)
operation = self.params['operation']
context = None
if self.params['config']:
context = 'router bgp %s' % self.get_value('config.bgp_as')
if operation == 'delete':
if existing_as:
commands.append('no router bgp %s' % existing_as)
elif context:
commands.append('no %s' % context)
else:
self._validate_input(config)
if operation == 'replace':
if existing_as and int(existing_as) != self.get_value('config.bgp_as'):
commands.append('no router bgp %s' % existing_as)
config = None
elif operation == 'override':
if existing_as:
commands.append('no router bgp %s' % existing_as)
config = None
context_commands = list()
for key, value in iteritems(self.get_value('config')):
if value is not None:
meth = getattr(self, '_render_%s' % key, None)
if meth:
resp = meth(config)
if resp:
context_commands.extend(to_list(resp))
if context and context_commands:
commands.append(context)
commands.extend(context_commands)
commands.append('exit')
return commands
def _render_router_id(self, config=None):
cmd = 'bgp router-id %s' % self.get_value('config.router_id')
if not config or cmd not in config:
return cmd
def _render_log_neighbor_changes(self, config=None):
cmd = 'bgp log-neighbor-changes'
log_neighbor_changes = self.get_value('config.log_neighbor_changes')
if log_neighbor_changes is True:
if not config or cmd not in config:
return cmd
elif log_neighbor_changes is False:
if config and cmd in config:
return 'no %s' % cmd
def _render_networks(self, config=None):
commands = list()
safe_list = list()
for entry in self.get_value('config.networks'):
network = entry['prefix']
if entry['masklen']:
network = '%s/%s' % (entry['prefix'], entry['masklen'])
safe_list.append(network)
cmd = 'network %s' % network
if entry['route_map']:
cmd += ' route-map %s' % entry['route_map']
if not config or cmd not in config:
commands.append(cmd)
if self.params['operation'] == 'replace':
if config:
matches = re.findall(r'network (\S+)', config, re.M)
for entry in set(matches).difference(safe_list):
commands.append('no network %s' % entry)
return commands
def _render_neighbors(self, config):
""" generate bgp neighbor configuration
"""
return Neighbors(self.params).render(config)
def _render_address_family(self, config):
""" generate address-family configuration
"""
return AddressFamily(self.params).render(config)
def _validate_input(self, config):
def device_has_AF(config):
return re.search(r'address-family (?:.*)', config)
address_family = self.get_value('config.address_family')
root_networks = self.get_value('config.networks')
operation = self.params['operation']
if root_networks and operation == 'replace':
if address_family:
for item in address_family:
if item['networks']:
raise ValueError('operation is replace but provided both root level networks and networks under %s %s address family'
% (item['afi'], item['safi']))
if config and device_has_AF(config):
raise ValueError('operation is replace and device has one or more address family activated but root level network(s) provided')
| gpl-3.0 |
7WebPages/coveragepy | tests/test_results.py | 3 | 3022 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Tests for coverage.py's results analysis."""
from coverage.results import Numbers
from tests.coveragetest import CoverageTest
class NumbersTest(CoverageTest):
"""Tests for coverage.py's numeric measurement summaries."""
run_in_temp_dir = False
def test_basic(self):
n1 = Numbers(n_files=1, n_statements=200, n_missing=20)
self.assertEqual(n1.n_statements, 200)
self.assertEqual(n1.n_executed, 180)
self.assertEqual(n1.n_missing, 20)
self.assertEqual(n1.pc_covered, 90)
def test_addition(self):
n1 = Numbers(n_files=1, n_statements=200, n_missing=20)
n2 = Numbers(n_files=1, n_statements=10, n_missing=8)
n3 = n1 + n2
self.assertEqual(n3.n_files, 2)
self.assertEqual(n3.n_statements, 210)
self.assertEqual(n3.n_executed, 182)
self.assertEqual(n3.n_missing, 28)
self.assertAlmostEqual(n3.pc_covered, 86.666666666)
def test_sum(self):
n1 = Numbers(n_files=1, n_statements=200, n_missing=20)
n2 = Numbers(n_files=1, n_statements=10, n_missing=8)
n3 = sum([n1, n2])
self.assertEqual(n3.n_files, 2)
self.assertEqual(n3.n_statements, 210)
self.assertEqual(n3.n_executed, 182)
self.assertEqual(n3.n_missing, 28)
self.assertAlmostEqual(n3.pc_covered, 86.666666666)
def test_pc_covered_str(self):
n0 = Numbers(n_files=1, n_statements=1000, n_missing=0)
n1 = Numbers(n_files=1, n_statements=1000, n_missing=1)
n999 = Numbers(n_files=1, n_statements=1000, n_missing=999)
n1000 = Numbers(n_files=1, n_statements=1000, n_missing=1000)
self.assertEqual(n0.pc_covered_str, "100")
self.assertEqual(n1.pc_covered_str, "99")
self.assertEqual(n999.pc_covered_str, "1")
self.assertEqual(n1000.pc_covered_str, "0")
def test_pc_covered_str_precision(self):
assert Numbers._precision == 0
Numbers.set_precision(1)
n0 = Numbers(n_files=1, n_statements=10000, n_missing=0)
n1 = Numbers(n_files=1, n_statements=10000, n_missing=1)
n9999 = Numbers(n_files=1, n_statements=10000, n_missing=9999)
n10000 = Numbers(n_files=1, n_statements=10000, n_missing=10000)
self.assertEqual(n0.pc_covered_str, "100.0")
self.assertEqual(n1.pc_covered_str, "99.9")
self.assertEqual(n9999.pc_covered_str, "0.1")
self.assertEqual(n10000.pc_covered_str, "0.0")
Numbers.set_precision(0)
def test_covered_ratio(self):
n = Numbers(n_files=1, n_statements=200, n_missing=47)
self.assertEqual(n.ratio_covered, (153, 200))
n = Numbers(
n_files=1, n_statements=200, n_missing=47,
n_branches=10, n_missing_branches=3, n_partial_branches=1000,
)
self.assertEqual(n.ratio_covered, (160, 210))
| apache-2.0 |
MichaelNedzelsky/intellij-community | python/testData/inspections/PyStringFormatInspection/src/string-format.py | 39 | 3852 | '#%(language)s has %(#)03d quote types.' % {'language': "Python", "#": 2} #ok
'%d %s' % 5 #Too few arguments for format string
'Hello world' % 25 #Too many arguments for format string
"%(name)f(name)" % {'name': 23.2} #ok
"%()s" % {'': "name"} #ok
'test%(name)' % {'name': 23} #There are no format specifier character
'work%*d' % (2, 34) #ok
'work%(name)*d' % (12, 32) #Can't use '*' in formats when using a mapping
'%*.*d' % (2, 5, 5) #ok
'%*.*d' % (2, 4) #Too few arguments for format string
'%*.*d' % (2, 4, 5, 6) #Too many arguments for format string
'%**d' % (2, 5) #There are no format specifier character
'%(name1)s %(name2)s (name3) %s' % {'name1': 'a', 'name2': 'b', 'name3': 'c'} #Too few mapping keys
'%(name1s' % {'name1': 'a'} #Too few mapping keys
'%%%(name)ld' % {'name': 12} #ok
"%(name)f(name)" % 23.2 #Format requires a mapping
"%(name)f(name)" % (23.2) #Format requires a mapping
'%d%d' % {'name1': 2, 'name2': 3} #Format doesn't require a mapping
'%12.2f' % 2.74 #ok
'Hello world' % () #ok
'Hello world' % [] #ok
'Hello world' % {} #ok
'%d%d' % ((5), (5)) #ok
"%(name)d %(name)d" % {"name": 43} #ok
"%(name)d" % {'a': 4, "name": 5} #ok
'%% name %(name)c' % {'a': 4} #Key 'name' has no following argument
'%d %u %f %F %s %r' % (2, 3, 4.1, 4.0, "name", "str") #ok
'%d %d %d' % (4, "a", "b") #Unexpected type
'%f %f %f' % (4, 5, "test") #Unexpected type
'%d' % "name" #Unexpected type
m = {'language': "Python", "#": 2}
'#%(language)s has %(#)03d quote types.' % m #ok
i = "test"
'%(name)s' % {'name': i} #ok
'%s' % i #ok
'%f' % i #Unexpected type
'%f' % (2 * 3 + 5) #ok
s = "%s" % "a".upper() #ok
x = ['a', 'b', 'c']
print "%d: %s" % (len(x), ", ".join(x)) #ok
m = [1, 2, 3, 4, 5]
"%d" % m[0] #ok
"%d %s" % (m[0], m[4]) #ok
"%s" % m #ok
"%s" % m[1:3] #ok
"%d" % m[1:2] #ok
"%d" % m #Unexpected type
"%d" % [] #Unexpected type
def greet(all):
print "Hello %s" % ("World" if all else "Human") #ok
"%s" % [x + 1 for x in [1, 2, 3, 4]] #ok
"%s" % [x + y for x in []] #ok
"%s" % [] #ok
"%f" % [x + 1 for x in [1, 2, 3, 4]] #Unexpected type
"%d %d" % (3, 5) #ok
"Hello %s %s" % tuple(['world', '!']) #ok
def foo(a):
if a == 1:
return "a", "b"
else:
return "c", "d"
print "%s" % foo(1) #Too many arguments for format string
print("| [%(issue_id)s|http://youtrack.jetbrains.net/issue/%(issue_id)s] (%(issue_type)s)|%(summary)s|" % (issue_id, issue_type, summary)) #Format requires a mapping (PY-704)
my_list = list()
for i in range(0,3):
my_list.append( ("hey", "you") )
for item in my_list:
print '%s %s' % item # ok (PY-734)
def bar():
return None
"%s %s" % bar() #Too few arguments for format string
"%s" % {} # ok; str() works
"%s" % {'a': 1, 'b': 2} # ok, no names in template and arg counts don't match
"%s" % object() # ok, str() works
"foo" % {'bar':1, 'baz':2} # ok: empty template that could use names
a = ('a', 1) if 1 else ('b', 2)
"%s is %d" % a # ok, must infer unified tuple type
#PY-3064, because original type of a is tuple, not list
a = (1,2,3)
print '%d:%d' % a[:2]
print '%d:%d' % a[1:2]
string = "qwerty"
print '%d:%d' % string[:2]
print '%s:%s' % string[:2]
print '%s' % string[:2]
print '%d' % string[:2]
my_tuple = (1,2,3,4,5,6,7,8)
print '%d, %d' % my_tuple[:7:3]
print '%d, %d, %d' % my_tuple[:7:3]
print '%d, %d, %d, %d' % my_tuple[:7:3]
# PY-12801
print '%d %s' % ((42,) + ('spam',))
print '%d %s' % (('ham',) + ('spam',))
print '%d %s' % ((42,) + ())
print '%d' % ((42,) + ('spam',))
# PY-11274
import collections
print '%(foo)s' % collections.OrderedDict(foo=None)
class MyDict(collections.Mapping):
def __getitem__(self, key):
return 'spam'
def __iter__(self):
yield 'spam'
def __len__(self):
return 1
print '%(foo)s' % MyDict()
foo = {1, 2, 3}
print('%s %s %s' % foo)
'%s %s %s' % (x for x in range(10))
| apache-2.0 |
pleaseproject/python-for-android | python3-alpha/python3-src/Lib/pipes.py | 46 | 9377 | """Conversion pipeline templates.
The problem:
------------
Suppose you have some data that you want to convert to another format,
such as from GIF image format to PPM image format. Maybe the
conversion involves several steps (e.g. piping it through compress or
uuencode). Some of the conversion steps may require that their input
is a disk file, others may be able to read standard input; similar for
their output. The input to the entire conversion may also be read
from a disk file or from an open file, and similar for its output.
The module lets you construct a pipeline template by sticking one or
more conversion steps together. It will take care of creating and
removing temporary files if they are necessary to hold intermediate
data. You can then use the template to do conversions from many
different sources to many different destinations. The temporary
file names used are different each time the template is used.
The templates are objects so you can create templates for many
different conversion steps and store them in a dictionary, for
instance.
Directions:
-----------
To create a template:
t = Template()
To add a conversion step to a template:
t.append(command, kind)
where kind is a string of two characters: the first is '-' if the
command reads its standard input or 'f' if it requires a file; the
second likewise for the output. The command must be valid /bin/sh
syntax. If input or output files are required, they are passed as
$IN and $OUT; otherwise, it must be possible to use the command in
a pipeline.
To add a conversion step at the beginning:
t.prepend(command, kind)
To convert a file to another file using a template:
sts = t.copy(infile, outfile)
If infile or outfile are the empty string, standard input is read or
standard output is written, respectively. The return value is the
exit status of the conversion pipeline.
To open a file for reading or writing through a conversion pipeline:
fp = t.open(file, mode)
where mode is 'r' to read the file, or 'w' to write it -- just like
for the built-in function open() or for os.popen().
To create a new template object initialized to a given one:
t2 = t.clone()
For an example, see the function test() at the end of the file.
""" # '
import re
import os
import tempfile
import string
__all__ = ["Template"]
# Conversion step kinds
FILEIN_FILEOUT = 'ff' # Must read & write real files
STDIN_FILEOUT = '-f' # Must write a real file
FILEIN_STDOUT = 'f-' # Must read a real file
STDIN_STDOUT = '--' # Normal pipeline element
SOURCE = '.-' # Must be first, writes stdout
SINK = '-.' # Must be last, reads stdin
stepkinds = [FILEIN_FILEOUT, STDIN_FILEOUT, FILEIN_STDOUT, STDIN_STDOUT, \
SOURCE, SINK]
class Template:
"""Class representing a pipeline template."""
def __init__(self):
"""Template() returns a fresh pipeline template."""
self.debugging = 0
self.reset()
def __repr__(self):
"""t.__repr__() implements repr(t)."""
return '<Template instance, steps=%r>' % (self.steps,)
def reset(self):
"""t.reset() restores a pipeline template to its initial state."""
self.steps = []
def clone(self):
"""t.clone() returns a new pipeline template with identical
initial state as the current one."""
t = Template()
t.steps = self.steps[:]
t.debugging = self.debugging
return t
def debug(self, flag):
"""t.debug(flag) turns debugging on or off."""
self.debugging = flag
def append(self, cmd, kind):
"""t.append(cmd, kind) adds a new step at the end."""
if type(cmd) is not type(''):
raise TypeError('Template.append: cmd must be a string')
if kind not in stepkinds:
raise ValueError('Template.append: bad kind %r' % (kind,))
if kind == SOURCE:
raise ValueError('Template.append: SOURCE can only be prepended')
if self.steps and self.steps[-1][1] == SINK:
raise ValueError('Template.append: already ends with SINK')
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
raise ValueError('Template.append: missing $IN in cmd')
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
raise ValueError('Template.append: missing $OUT in cmd')
self.steps.append((cmd, kind))
def prepend(self, cmd, kind):
"""t.prepend(cmd, kind) adds a new step at the front."""
if type(cmd) is not type(''):
raise TypeError('Template.prepend: cmd must be a string')
if kind not in stepkinds:
raise ValueError('Template.prepend: bad kind %r' % (kind,))
if kind == SINK:
raise ValueError('Template.prepend: SINK can only be appended')
if self.steps and self.steps[0][1] == SOURCE:
raise ValueError('Template.prepend: already begins with SOURCE')
if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
raise ValueError('Template.prepend: missing $IN in cmd')
if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
raise ValueError('Template.prepend: missing $OUT in cmd')
self.steps.insert(0, (cmd, kind))
def open(self, file, rw):
"""t.open(file, rw) returns a pipe or file object open for
reading or writing; the file is the other end of the pipeline."""
if rw == 'r':
return self.open_r(file)
if rw == 'w':
return self.open_w(file)
raise ValueError('Template.open: rw must be \'r\' or \'w\', not %r'
% (rw,))
def open_r(self, file):
"""t.open_r(file) and t.open_w(file) implement
t.open(file, 'r') and t.open(file, 'w') respectively."""
if not self.steps:
return open(file, 'r')
if self.steps[-1][1] == SINK:
raise ValueError('Template.open_r: pipeline ends width SINK')
cmd = self.makepipeline(file, '')
return os.popen(cmd, 'r')
def open_w(self, file):
if not self.steps:
return open(file, 'w')
if self.steps[0][1] == SOURCE:
raise ValueError('Template.open_w: pipeline begins with SOURCE')
cmd = self.makepipeline('', file)
return os.popen(cmd, 'w')
def copy(self, infile, outfile):
return os.system(self.makepipeline(infile, outfile))
def makepipeline(self, infile, outfile):
cmd = makepipeline(infile, self.steps, outfile)
if self.debugging:
print(cmd)
cmd = 'set -x; ' + cmd
return cmd
def makepipeline(infile, steps, outfile):
# Build a list with for each command:
# [input filename or '', command string, kind, output filename or '']
list = []
for cmd, kind in steps:
list.append(['', cmd, kind, ''])
#
# Make sure there is at least one step
#
if not list:
list.append(['', 'cat', '--', ''])
#
# Take care of the input and output ends
#
[cmd, kind] = list[0][1:3]
if kind[0] == 'f' and not infile:
list.insert(0, ['', 'cat', '--', ''])
list[0][0] = infile
#
[cmd, kind] = list[-1][1:3]
if kind[1] == 'f' and not outfile:
list.append(['', 'cat', '--', ''])
list[-1][-1] = outfile
#
# Invent temporary files to connect stages that need files
#
garbage = []
for i in range(1, len(list)):
lkind = list[i-1][2]
rkind = list[i][2]
if lkind[1] == 'f' or rkind[0] == 'f':
(fd, temp) = tempfile.mkstemp()
os.close(fd)
garbage.append(temp)
list[i-1][-1] = list[i][0] = temp
#
for item in list:
[inf, cmd, kind, outf] = item
if kind[1] == 'f':
cmd = 'OUT=' + quote(outf) + '; ' + cmd
if kind[0] == 'f':
cmd = 'IN=' + quote(inf) + '; ' + cmd
if kind[0] == '-' and inf:
cmd = cmd + ' <' + quote(inf)
if kind[1] == '-' and outf:
cmd = cmd + ' >' + quote(outf)
item[1] = cmd
#
cmdlist = list[0][1]
for item in list[1:]:
[cmd, kind] = item[1:3]
if item[0] == '':
if 'f' in kind:
cmd = '{ ' + cmd + '; }'
cmdlist = cmdlist + ' |\n' + cmd
else:
cmdlist = cmdlist + '\n' + cmd
#
if garbage:
rmcmd = 'rm -f'
for file in garbage:
rmcmd = rmcmd + ' ' + quote(file)
trapcmd = 'trap ' + quote(rmcmd + '; exit') + ' 1 2 3 13 14 15'
cmdlist = trapcmd + '\n' + cmdlist + '\n' + rmcmd
#
return cmdlist
# Reliably quote a string as a single argument for /bin/sh
# Safe unquoted
_safechars = frozenset(string.ascii_letters + string.digits + '@%_-+=:,./')
def quote(file):
"""Return a shell-escaped version of the file string."""
for c in file:
if c not in _safechars:
break
else:
if not file:
return "''"
return file
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + file.replace("'", "'\"'\"'") + "'"
| apache-2.0 |
SlimSaber/android_external_gtest | test/gtest_test_utils.py | 397 | 10437 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest:
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO([email protected]): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| bsd-3-clause |
maitesin/forner.io | post/urls.py | 1 | 1371 | from django.conf.urls import url, handler404
from . import views
urlpatterns = [
url(r'^$', views.IndexPostListView.as_view(), name="Blog"),
url(r'^(?P<year>[0-9]{4})/?$', views.YearPostListView.as_view(), name="Year"),
url(r'^(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/?$', views.MonthPostListView.as_view(), name="Month"),
url(r'^(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]+)/?$', views.DayPostListView.as_view(), name="Day"),
url(r'^(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]+)/(?P<title>\w+)/?$', views.PostDetailView.as_view(), name="Post"),
url(r'^tag/(?P<name>\w+)/?$', views.TagPostListView.as_view(), name="Tag"),
url(r'^category/(?P<name>\w+)/?$', views.CategoryPostListView.as_view(), name="Category"),
url(r'^all/?$', views.AllPostListView.as_view(), name="All"),
url(r'^projects/?$', views.projects, name="Projects"),
url(r'^projects/tries?$', views.tries, name="Tries"),
url(r'^projects/acc?$', views.acc, name="ACC"),
url(r'^projects/website?$', views.website, name="Website"),
url(r'^projects/resume?$', views.resumecli, name="Resume CLI"),
url(r'^resume/?$', views.resume, name="Resume"),
url(r'^rss/?$', views.LatestPostFeed(), name="RSS"),
url(r'^atom/$', views.LatestPostAtom(), name="Atom"),
]
handler404 = 'post.views.handler404'
handler500 = 'post.views.handler500'
| gpl-2.0 |
sbidoul/odoo | addons/survey_crm/survey.py | 385 | 2051 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class survey_mail_compose_message(osv.TransientModel):
_inherit = 'survey.mail.compose.message'
def default_get(self, cr, uid, fields, context=None):
res = super(survey_mail_compose_message, self).default_get(cr, uid, fields, context=context)
if context.get('active_model') == 'crm.lead' and context.get('active_ids'):
partner_ids = []
emails_list = []
for lead in self.pool.get('crm.lead').browse(cr, uid, context.get('active_ids'), context=context):
if lead.partner_id:
partner_ids.append(lead.partner_id.id)
else:
email = lead.contact_name and "%s <%s>" % (lead.contact_name, lead.email_from or "") or lead.email_from or None
if email and email not in emails_list:
emails_list.append(email)
multi_email = "\n".join(emails_list)
res.update({'partner_ids': list(set(partner_ids)), 'multi_email': multi_email})
return res
| agpl-3.0 |
sekikn/incubator-airflow | airflow/providers/google/cloud/operators/kubernetes_engine.py | 7 | 13403 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains Google Kubernetes Engine operators."""
import os
import tempfile
from typing import Dict, Optional, Sequence, Union
from google.cloud.container_v1.types import Cluster
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import KubernetesPodOperator
from airflow.providers.google.cloud.hooks.kubernetes_engine import GKEHook
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
from airflow.utils.decorators import apply_defaults
from airflow.utils.process_utils import execute_in_subprocess, patch_environ
class GKEDeleteClusterOperator(BaseOperator):
"""
Deletes the cluster, including the Kubernetes endpoint and all worker nodes.
To delete a certain cluster, you must specify the ``project_id``, the ``name``
of the cluster, the ``location`` that the cluster is in, and the ``task_id``.
**Operator Creation**: ::
operator = GKEClusterDeleteOperator(
task_id='cluster_delete',
project_id='my-project',
location='cluster-location'
name='cluster-name')
.. seealso::
For more detail about deleting clusters have a look at the reference:
https://google-cloud-python.readthedocs.io/en/latest/container/gapic/v1/api.html#google.cloud.container_v1.ClusterManagerClient.delete_cluster
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GKEDeleteClusterOperator`
:param project_id: The Google Developers Console [project ID or project number]
:type project_id: str
:param name: The name of the resource to delete, in this case cluster name
:type name: str
:param location: The name of the Google Compute Engine zone in which the cluster
resides.
:type location: str
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:type gcp_conn_id: str
:param api_version: The api version to use
:type api_version: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = [
'project_id',
'gcp_conn_id',
'name',
'location',
'api_version',
'impersonation_chain',
]
@apply_defaults
def __init__(
self,
*,
name: str,
location: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v2',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.location = location
self.api_version = api_version
self.name = name
self.impersonation_chain = impersonation_chain
self._check_input()
def _check_input(self) -> None:
if not all([self.project_id, self.name, self.location]):
self.log.error('One of (project_id, name, location) is missing or incorrect')
raise AirflowException('Operator has incorrect or missing input.')
def execute(self, context) -> Optional[str]:
hook = GKEHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
delete_result = hook.delete_cluster(name=self.name, project_id=self.project_id)
return delete_result
class GKECreateClusterOperator(BaseOperator):
"""
Create a Google Kubernetes Engine Cluster of specified dimensions
The operator will wait until the cluster is created.
The **minimum** required to define a cluster to create is:
``dict()`` ::
cluster_def = {'name': 'my-cluster-name',
'initial_node_count': 1}
or
``Cluster`` proto ::
from google.cloud.container_v1.types import Cluster
cluster_def = Cluster(name='my-cluster-name', initial_node_count=1)
**Operator Creation**: ::
operator = GKEClusterCreateOperator(
task_id='cluster_create',
project_id='my-project',
location='my-location'
body=cluster_def)
.. seealso::
For more detail on about creating clusters have a look at the reference:
:class:`google.cloud.container_v1.types.Cluster`
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GKECreateClusterOperator`
:param project_id: The Google Developers Console [project ID or project number]
:type project_id: str
:param location: The name of the Google Compute Engine zone in which the cluster
resides.
:type location: str
:param body: The Cluster definition to create, can be protobuf or python dict, if
dict it must match protobuf message Cluster
:type body: dict or google.cloud.container_v1.types.Cluster
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:type gcp_conn_id: str
:param api_version: The api version to use
:type api_version: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = [
'project_id',
'gcp_conn_id',
'location',
'api_version',
'body',
'impersonation_chain',
]
@apply_defaults
def __init__(
self,
*,
location: str,
body: Optional[Union[Dict, Cluster]],
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v2',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.location = location
self.api_version = api_version
self.body = body
self.impersonation_chain = impersonation_chain
self._check_input()
def _check_input(self) -> None:
if not all([self.project_id, self.location, self.body]) or not (
(isinstance(self.body, dict) and "name" in self.body and "initial_node_count" in self.body)
or (getattr(self.body, "name", None) and getattr(self.body, "initial_node_count", None))
):
self.log.error(
"One of (project_id, location, body, body['name'], "
"body['initial_node_count']) is missing or incorrect"
)
raise AirflowException("Operator has incorrect or missing input.")
def execute(self, context) -> str:
hook = GKEHook(
gcp_conn_id=self.gcp_conn_id,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
create_op = hook.create_cluster(cluster=self.body, project_id=self.project_id)
return create_op
KUBE_CONFIG_ENV_VAR = "KUBECONFIG"
class GKEStartPodOperator(KubernetesPodOperator):
"""
Executes a task in a Kubernetes pod in the specified Google Kubernetes
Engine cluster
This Operator assumes that the system has gcloud installed and has configured a
connection id with a service account.
The **minimum** required to define a cluster to create are the variables
``task_id``, ``project_id``, ``location``, ``cluster_name``, ``name``,
``namespace``, and ``image``
.. seealso::
For more detail about Kubernetes Engine authentication have a look at the reference:
https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#internal_ip
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GKEStartPodOperator`
:param location: The name of the Google Kubernetes Engine zone in which the
cluster resides, e.g. 'us-central1-a'
:type location: str
:param cluster_name: The name of the Google Kubernetes Engine cluster the pod
should be spawned in
:type cluster_name: str
:param use_internal_ip: Use the internal IP address as the endpoint.
:param project_id: The Google Developers Console project id
:type project_id: str
:param gcp_conn_id: The google cloud connection id to use. This allows for
users to specify a service account.
:type gcp_conn_id: str
"""
template_fields = {'project_id', 'location', 'cluster_name'} | set(KubernetesPodOperator.template_fields)
@apply_defaults
def __init__(
self,
*,
location: str,
cluster_name: str,
use_internal_ip: bool = False,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.location = location
self.cluster_name = cluster_name
self.gcp_conn_id = gcp_conn_id
self.use_internal_ip = use_internal_ip
if self.gcp_conn_id is None:
raise AirflowException(
"The gcp_conn_id parameter has become required. If you want to use Application Default "
"Credentials (ADC) strategy for authorization, create an empty connection "
"called `google_cloud_default`.",
)
def execute(self, context) -> Optional[str]:
hook = GoogleBaseHook(gcp_conn_id=self.gcp_conn_id)
self.project_id = self.project_id or hook.project_id
if not self.project_id:
raise AirflowException(
"The project id must be passed either as "
"keyword project_id parameter or as project_id extra "
"in Google Cloud connection definition. Both are not set!"
)
# Write config to a temp file and set the environment variable to point to it.
# This is to avoid race conditions of reading/writing a single file
with tempfile.NamedTemporaryFile() as conf_file, patch_environ(
{KUBE_CONFIG_ENV_VAR: conf_file.name}
), hook.provide_authorized_gcloud():
# Attempt to get/update credentials
# We call gcloud directly instead of using google-cloud-python api
# because there is no way to write kubernetes config to a file, which is
# required by KubernetesPodOperator.
# The gcloud command looks at the env variable `KUBECONFIG` for where to save
# the kubernetes config file.
cmd = [
"gcloud",
"container",
"clusters",
"get-credentials",
self.cluster_name,
"--zone",
self.location,
"--project",
self.project_id,
]
if self.use_internal_ip:
cmd.append('--internal-ip')
execute_in_subprocess(cmd)
# Tell `KubernetesPodOperator` where the config file is located
self.config_file = os.environ[KUBE_CONFIG_ENV_VAR]
return super().execute(context)
| apache-2.0 |
tdsmith/ponysay | src/colourstack.py | 3 | 5198 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
ponysay - Ponysay, cowsay reimplementation for ponies
Copyright (C) 2012, 2013, 2014 Erkin Batu Altunbaş et al.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
If you intend to redistribute ponysay or a fork of it commercially,
it contains aggregated images, some of which may not be commercially
redistribute, you would be required to remove those. To determine
whether or not you may commercially redistribute an image make use
that line ‘FREE: yes’, is included inside the image between two ‘$$$’
lines and the ‘FREE’ is and upper case and directly followed by
the colon.
'''
from common import *
class ColourStack():
'''
ANSI colour stack
This is used to make layers with independent coloursations
'''
def __init__(self, autopush, autopop):
'''
Constructor
@param autopush:str String that, when used, will create a new independently colourised layer
@param autopop:str String that, when used, will end the current layer and continue of the previous layer
'''
self.autopush = autopush
self.autopop = autopop
self.lenpush = len(autopush)
self.lenpop = len(autopop)
self.bufproto = ' ' * (self.lenpush if self.lenpush > self.lenpop else self.lenpop)
self.stack = []
self.push()
self.seq = None
def push(self):
'''
Create a new independently colourised layer
@return :str String that should be inserted into your buffer
'''
self.stack.insert(0, [self.bufproto, None, None, [False] * 9])
if len(self.stack) == 1:
return None
return '\033[0m'
def pop(self):
'''
End the current layer and continue of the previous layer
@return :str String that should be inserted into your buffer
'''
old = self.stack.pop(0)
rc = '\033[0;'
if len(self.stack) == 0: # last resort in case something made it pop too mush
push()
new = self.stack[0]
if new[1] is not None: rc += new[1] + ';'
if new[2] is not None: rc += new[2] + ';'
for i in range(0, 9):
if new[3][i]:
rc += str(i + 1) + ';'
return rc[:-1] + 'm'
def feed(self, char):
'''
Use this, in sequence, for which character in your buffer that contains yor autopush and autopop
string, the automatically get push and pop string to insert after each character
@param :chr One character in your buffer
@return :str The text to insert after the input character
'''
if self.seq is not None:
self.seq += char
if (char == '~') or (('a' <= char) and (char <= 'z')) or (('A' <= char) and (char <= 'Z')):
if (self.seq[0] == '[') and (self.seq[-1] == 'm'):
self.seq = self.seq[1:-1].split(';')
(i, n) = (0, len(self.seq))
while i < n:
part = self.seq[i]
p = 0 if part == '' else int(part)
i += 1
if p == 0: self.stack[0][1:] = [None, None, [False] * 9]
elif 1 <= p <= 9: self.stack[0][3][p - 1] = True
elif 21 <= p <= 29: self.stack[0][3][p - 21] = False
elif p == 39: self.stack[0][1] = None
elif p == 49: self.stack[0][2] = None
elif 30 <= p <= 37: self.stack[0][1] = part
elif 90 <= p <= 97: self.stack[0][1] = part
elif 40 <= p <= 47: self.stack[0][2] = part
elif 100 <= p <= 107: self.stack[0][2] = part
elif p == 38:
self.stack[0][1] = '%s;%s;%s' % (part, self.seq[i], self.seq[i + 1])
i += 2
elif p == 48:
self.stack[0][2] = '%s;%s;%s' % (part, self.seq[i], self.seq[i + 1])
i += 2
self.seq = None
elif char == '\033':
self.seq = ''
buf = self.stack[0][0]
buf = buf[1:] + char
rc = ''
if buf[-self.lenpush:] == self.autopush: rc = self.push()
elif buf[-self.lenpop:] == self.autopop: rc = self.pop()
self.stack[0][0] = buf
return rc
| gpl-3.0 |
yitian134/chromium | ppapi/generators/idl_option.py | 178 | 2662 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import getopt
import sys
from idl_log import ErrOut, InfoOut, WarnOut
OptionMap = { }
def GetOption(name):
if name not in OptionMap:
raise RuntimeError('Could not find option "%s".' % name)
return OptionMap[name].Get()
class Option(object):
def __init__(self, name, desc, default = None, callfunc = None,
testfunc = None, cookie = None):
# Verify this option is not a duplicate
if name in OptionMap:
raise RuntimeError('Option "%s" already exists.' % name)
self.name = name
self.desc = desc
self.default = default
self.value = default
self.callfunc = callfunc
self.testfunc = testfunc
self.cookie = cookie
OptionMap[name] = self
def Set(self, value):
if self.testfunc:
if not self.testfunc(self, value): return False
# If this is a boolean option, set it to true
if self.default is None:
self.value = True
else:
self.value = value
if self.callfunc:
self.callfunc(self)
return True
def Get(self):
return self.value
def DumpOption(option):
if len(option.name) > 1:
out = ' --%-15.15s\t%s' % (option.name, option.desc)
else:
out = ' -%-15.15s\t%s' % (option.name, option.desc)
if option.default:
out = '%s\n\t\t\t(Default: %s)\n' % (out, option.default)
InfoOut.Log(out)
def DumpHelp(option=None):
InfoOut.Log('Usage:')
for opt in sorted(OptionMap.keys()):
DumpOption(OptionMap[opt])
sys.exit(0)
#
# Default IDL options
#
# -h : Help, prints options
# --verbose : use verbose output
# --test : test this module
#
Option('h', 'Help', callfunc=DumpHelp)
Option('help', 'Help', callfunc=DumpHelp)
Option('verbose', 'Verbose')
Option('test', 'Test the IDL scripts')
def ParseOptions(args):
short_opts= ""
long_opts = []
# Build short and long option lists
for name in sorted(OptionMap.keys()):
option = OptionMap[name]
if len(name) > 1:
if option.default is None:
long_opts.append('%s' % name)
else:
long_opts.append('%s=' % name)
else:
if option.default is None:
short_opts += name
else:
short_opts += '%s:' % name
try:
opts, filenames = getopt.getopt(args, short_opts, long_opts)
for opt, val in opts:
if len(opt) == 2: opt = opt[1:]
if opt[0:2] == '--': opt = opt[2:]
OptionMap[opt].Set(val)
except getopt.error, e:
ErrOut.Log('Illegal option: %s\n' % str(e))
DumpHelp()
sys.exit(-1)
return filenames
| bsd-3-clause |
AdrieleD/gr-mac1 | docs/doxygen/doxyxml/text.py | 19 | 1837 | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Utilities for extracting text from generated classes.
"""
def is_string(txt):
if isinstance(txt, str):
return True
try:
if isinstance(txt, unicode):
return True
except NameError:
pass
return False
def description(obj):
if obj is None:
return None
return description_bit(obj).strip()
def description_bit(obj):
if hasattr(obj, 'content'):
contents = [description_bit(item) for item in obj.content]
result = ''.join(contents)
elif hasattr(obj, 'content_'):
contents = [description_bit(item) for item in obj.content_]
result = ''.join(contents)
elif hasattr(obj, 'value'):
result = description_bit(obj.value)
elif is_string(obj):
return obj
else:
raise StandardError('Expecting a string or something with content, content_ or value attribute')
# If this bit is a paragraph then add one some line breaks.
if hasattr(obj, 'name') and obj.name == 'para':
result += "\n\n"
return result
| gpl-3.0 |
khs26/rotamer_library | rotamer/library/old_rotamer_data.py | 1 | 5430 | import numpy as np
import tables as ts
import playground.group_rotation.amino_acids as amino
import pele.amber.read_amber as ra
import playground.group_rotation.chirality as chir
import networkx as nx
class RotamerGroupTemplate(ts.IsDescription):
"""
A compound data type for interoperating with ROTAMER_GROUP_TEMPLATE in GMIN.
Fortran type definition:
TYPE ROTAMER_GROUP_TEMPLATE
CHAR(LEN=16) :: GROUP_NAME
CHAR(LEN=16) :: RES_NAME
CHAR(LEN=16) :: DIHEDRAL_ATOM_NAMES(4)
CHAR(LEN=16), ALLOCATABLE :: MOVING_ATOM_NAMES(:)
END TYPE ROTAMER_GROUP_TEMPLATE
These are designed to be read from HDF5 files, and so this implementation uses
the appropriate types from PyTables.
As we do not know the length of the moving atoms array a priori, we instead
use a CArray (HDF5 compressible dataset) to store moving atom names for each
group. The individual row corresponding to a group then contains the name of the
CArray with the relevant atom names in it.
"""
group_name = ts.StringCol(itemsize=16)
res_name = ts.StringCol(itemsize=16)
dihedral_atom_names = ts.StringCol(itemsize=16, shape=(4))
moving_atoms_carray = ts.StringCol(itemsize=24)
# Open a file in "w"rite mode
fileh = ts.open_file("amber_rotamer_groups.h5", mode="w")
# Get the HDF5 root group
root = fileh.root
# Create the groups for the templates themselves and one for storing the moving atoms arrays.
for groupname in ("RotamerGroupTemplates", "MovingAtomsArrays"):
group = fileh.create_group(root, groupname)
# Create a filter, telling it to compress with zlib.
filters = ts.Filters(complib='zlib')
for amino_acid in (amino.amino_acids):
table = fileh.create_table("/RotamerGroupTemplates",
amino_acid,
RotamerGroupTemplate,
"Template for {res}".format(res=amino_acid))
# Get the record object associated with the table.
group_template = table.row
# Read in an appropriate topology file and create a molecular graph.
filename = '/scratch/khs26/rotamer_lib_igb2/{res}/{res}/{res}/coords.prmtop'.format(res=amino_acid)
topology = ra.read_topology(filename)
mol_graph = ra.create_atoms_and_residues(topology)
# Get the residue name for the first residue.
res = next(residue for residue in mol_graph.residues.nodes() if residue.index == 1)
# Get a list of dihedrals we are interested in for this residue.
dihedrals = sorted([k[1] for k in amino.def_parameters if k[0] == amino_acid
and not ('C' in k[1] and 'CA' in k[1])])
# For each pair of atoms in a dihedral, find their highest-ranked neighbours for defining the dihedral angle.
dihedral_atoms = {}
dihedral_moving_atoms = {}
for atom_pair in dihedrals:
atom0 = next(n for n in mol_graph.atoms.nodes() if n.name == atom_pair[0] and n.residue == res)
atom1 = next(n for n in mol_graph.atoms.nodes() if n.name == atom_pair[1] and n.residue == res)
atom_1 = next(atom for atom in chir.chiral_order(mol_graph.atoms, atom0, depth=2) if atom != atom1)
atom2 = next(atom for atom in chir.chiral_order(mol_graph.atoms, atom1, depth=2) if atom != atom0)
dihedral_atoms[(atom0.name, atom1.name)] = (atom_1, atom0, atom1, atom2)
# Now find the moving atoms by breaking the dihedral bond and choosing the subgraph containing atom1.
mol_graph.atoms.remove_edge(atom0, atom1)
dihedral_moving_atoms[(atom0.name, atom1.name)] = nx.node_connected_component(mol_graph.atoms, atom1)
mol_graph.atoms.add_edge(atom0, atom1)
# Loop through the possible dihedral atom pairs for the amino acid.
# i is going to form part of the CArray name
for i, dihedral in enumerate(dihedrals):
moving_atom_names = [atom.name for atom in dihedral_moving_atoms[dihedral]]
carray_name = 'carray_{res}_{ind}'.format(res=amino_acid, ind=str(i))
print amino_acid, i, dihedral, moving_atom_names, carray_name
ca = fileh.create_carray(root.MovingAtomsArrays,
name=carray_name,
atom=ts.StringAtom(16),
shape=(len(moving_atom_names),),
filters=filters)
ca[0:] = moving_atom_names
group_template['group_name'] = "{res}_{dih0}_{dih1}".format(res=amino_acid, dih0=dihedral[0], dih1=dihedral[1])
group_template['res_name'] = "{res}".format(res=amino_acid)
group_template['dihedral_atom_names'] = np.array([x.name for x in dihedral_atoms[dihedral]])
group_template['moving_atoms_carray'] = carray_name
# Append this element to the row and move on.
group_template.append()
# Flush the table buffers
table.flush()
# Read the records from table "/RotamerGroupTemplates/ARG" and select some
table = root.RotamerGroupTemplates.ARG
e = [(p['group_name'], p['res_name'], p['dihedral_atom_names'], p['moving_atoms_carray']) for p in table]
for elem in e:
print("Selected values ==>", elem)
print("Carray:", root.MovingAtomsArrays._v_children[elem[-1]][:])
print("Total selected records ==> ", len(e))
# Finally, close the file (this also will flush all the remaining buffers!)
fileh.close() | mit |
krafczyk/spack | var/spack/repos/builtin/packages/gslib/package.py | 2 | 3232 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gslib(Package):
"""Highly scalable Gather-scatter code with AMG and XXT solvers"""
homepage = "https://github.com/gslib/gslib"
git = "https://github.com/gslib/gslib.git"
version('1.0.2', tag='v1.0.2')
version('1.0.1', tag='v1.0.1')
version('1.0.0', tag='v1.0.0')
variant('mpi', default=True, description='Build with MPI')
variant('mpiio', default=True, description='Build with MPI I/O')
variant('blas', default=False, description='Build with BLAS')
depends_on('mpi', when="+mpi")
depends_on('mpi', when="+mpiio")
depends_on('blas', when="+blas")
conflicts('~mpi', when='+mpiio')
def install(self, spec, prefix):
src_dir = 'src'
lib_dir = 'lib'
libname = 'libgs.a'
if self.version == Version('1.0.1'):
makefile = 'Makefile'
else:
makefile = 'src/Makefile'
cc = self.compiler.cc
if '+mpiio' not in spec:
filter_file(r'MPIIO.*?=.*1', 'MPIIO = 0', makefile)
if '+mpi' in spec:
cc = spec['mpi'].mpicc
else:
filter_file(r'MPI.*?=.*1', 'MPI = 0', makefile)
filter_file(r'MPIIO.*?=.*1', 'MPIIO = 0', makefile)
make_cmd = "CC=" + cc
if '+blas' in spec:
filter_file(r'BLAS.*?=.*0', 'BLAS = 1', makefile)
blas = spec['blas'].libs
ld_flags = blas.ld_flags
filter_file(r'\$\(LDFLAGS\)', ld_flags, makefile)
if self.version == Version('1.0.1'):
make(make_cmd)
make('install')
install_tree(lib_dir, prefix.lib)
elif self.version == Version('1.0.0'):
with working_dir(src_dir):
make(make_cmd)
mkdir(prefix.lib)
install(libname, prefix.lib)
# Should only install the headers (this will be fixed in gslib on
# future releases).
install_tree(src_dir, prefix.include)
| lgpl-2.1 |
Sabayon/entropy | server/eit/commands/repo.py | 1 | 7196 | # -*- coding: utf-8 -*-
"""
@author: Fabio Erculiani <[email protected]>
@contact: [email protected]
@copyright: Fabio Erculiani
@license: GPL-2
B{Entropy Infrastructure Toolkit}.
"""
import sys
import argparse
from entropy.i18n import _
from entropy.output import blue, darkgreen, purple, teal
from entropy.server.interfaces import RepositoryConfigParser
from _entropy.eit.commands.descriptor import EitCommandDescriptor
from _entropy.eit.commands.command import EitCommand
from _entropy.eit.utils import print_table
class EitRepo(EitCommand):
"""
Main Eit repo command.
"""
NAME = "repo"
ALIASES = []
ALLOW_UNPRIVILEGED = False
def __init__(self, args):
super(EitRepo, self).__init__(args)
self._nsargs = None
def _get_parser(self):
""" Overridden from EitCommand """
descriptor = EitCommandDescriptor.obtain_descriptor(
EitRepo.NAME)
parser = argparse.ArgumentParser(
description=descriptor.get_description(),
formatter_class=argparse.RawDescriptionHelpFormatter,
prog="%s %s" % (sys.argv[0], EitRepo.NAME))
subparsers = parser.add_subparsers(
title="action", description=_("manage repositories"),
help=_("available commands"))
show_parser = subparsers.add_parser(
"show", help=_("show repositories and mirrors status"))
show_parser.set_defaults(func=self._show)
add_parser = subparsers.add_parser(
"add", help=_("add a repository"))
add_parser.add_argument(
"id", metavar="<repository>",
help=_("repository name"))
add_parser.add_argument(
"--desc", metavar="<description>", required=True,
help=_("repository description"))
add_parser.add_argument(
"--repo", nargs='+',
metavar="<repo uri>", required=True,
help=_("synchronization URI for both packages and database"))
add_parser.add_argument(
"--repo-only", nargs='*', default=[],
metavar="<database only uri>",
help=_("synchronization URI for database only"))
add_parser.add_argument(
"--pkg-only", nargs='*', default=[],
metavar="<packages only uri>",
help=_("synchronization URI for packages only"))
add_parser.add_argument(
"--base", action="store_true", default=None,
help=_("set this to make this repository the "
"'base' for all the others"))
add_parser.set_defaults(func=self._add)
remove_parser = subparsers.add_parser("remove",
help=_("remove a repository"))
remove_parser.add_argument(
"id", nargs='+',
metavar="<repository>",
help=_("repository name"))
remove_parser.set_defaults(func=self._remove)
return parser
INTRODUCTION = """\
Manage Entropy Server Repositories.
"""
SEE_ALSO = "eit-status(1)"
def man(self):
"""
Overridden from EitCommand.
"""
return self._man()
def parse(self):
""" Overridden from EitCommand """
parser = self._get_parser()
try:
nsargs = parser.parse_args(self._args)
except IOError as err:
sys.stderr.write("%s\n" % (err,))
return parser.print_help, []
# Python 3.3 bug #16308
if not hasattr(nsargs, "func"):
return parser.print_help, []
self._nsargs = nsargs
return self._call_exclusive, [nsargs.func, None]
def _show(self, entropy_server):
entropy_server._show_interface_status()
entropy_server.Mirrors._show_interface_status(
entropy_server.repository())
return 0
def _add(self, entropy_server):
"""
Eit Repo Add command.
"""
current_repos = entropy_server.repositories()
repository_id = self._nsargs.id
desc = self._nsargs.desc
repos = self._nsargs.repo
pkg_only = self._nsargs.pkg_only
repo_only = self._nsargs.repo_only
base = self._nsargs.base
if repository_id in current_repos:
entropy_server.output(
"[%s] %s" % (
purple(repository_id),
blue(_("repository already configured")),),
level="error", importance=1)
return 1
toc = []
toc.append((
purple(_("Repository id:")),
teal(repository_id)))
toc.append((
darkgreen(_("Description:")),
teal(desc)))
base_str = _("Yes")
if base is None:
base_str = _("Unset")
elif not base:
base_str = _("No")
toc.append((
darkgreen(_("Base repository:")),
teal(base_str)))
for uri in repos:
toc.append((purple(_("Packages + Database URI:")), uri))
for uri in repo_only:
toc.append((purple(_("Database only URI:")), uri))
for uri in pkg_only:
toc.append((purple(_("Packages only URI:")), uri))
toc.append(" ")
print_table(entropy_server, toc)
parser = RepositoryConfigParser()
added = parser.add(repository_id, desc, repos,
repo_only, pkg_only, base)
if added:
entropy_server.output(
"[%s] %s" % (
purple(repository_id),
blue(_("repository added succesfully")),))
else:
entropy_server.output(
"[%s] %s" % (
purple(repository_id),
blue(_("cannot add repository")),),
level="warning", importance=1)
return 0
def _remove(self, entropy_server):
"""
Eit Repo Remove command.
"""
current_repos = entropy_server.repositories()
exit_st = 0
for repository_id in self._nsargs.id:
if repository_id not in current_repos:
entropy_server.output(
"[%s] %s" % (
purple(repository_id),
blue(_("repository not available")),),
level="warning", importance=1)
exit_st = 1
continue
parser = RepositoryConfigParser()
removed = parser.remove(repository_id)
if not removed:
exit_st = 1
entropy_server.output(
"[%s] %s" % (
purple(repository_id),
blue(_("cannot remove repository")),),
level="warning", importance=1)
else:
entropy_server.output(
"[%s] %s" % (
purple(repository_id),
blue(_("repository removed succesfully")),))
return exit_st
EitCommandDescriptor.register(
EitCommandDescriptor(
EitRepo,
EitRepo.NAME,
_("manage repositories"))
)
| gpl-2.0 |
ifduyue/django | tests/forms_tests/field_tests/test_charfield.py | 14 | 6327 | from django.forms import (
CharField, HiddenInput, PasswordInput, Textarea, TextInput,
ValidationError,
)
from django.test import SimpleTestCase
from . import FormFieldAssertionsMixin
class CharFieldTest(FormFieldAssertionsMixin, SimpleTestCase):
def test_charfield_1(self):
f = CharField()
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertIsNone(f.max_length)
self.assertIsNone(f.min_length)
def test_charfield_2(self):
f = CharField(required=False)
self.assertEqual('1', f.clean(1))
self.assertEqual('hello', f.clean('hello'))
self.assertEqual('', f.clean(None))
self.assertEqual('', f.clean(''))
self.assertEqual('[1, 2, 3]', f.clean([1, 2, 3]))
self.assertIsNone(f.max_length)
self.assertIsNone(f.min_length)
def test_charfield_3(self):
f = CharField(max_length=10, required=False)
self.assertEqual('12345', f.clean('12345'))
self.assertEqual('1234567890', f.clean('1234567890'))
msg = "'Ensure this value has at most 10 characters (it has 11).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('1234567890a')
self.assertEqual(f.max_length, 10)
self.assertIsNone(f.min_length)
def test_charfield_4(self):
f = CharField(min_length=10, required=False)
self.assertEqual('', f.clean(''))
msg = "'Ensure this value has at least 10 characters (it has 5).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertIsNone(f.max_length)
self.assertEqual(f.min_length, 10)
def test_charfield_5(self):
f = CharField(min_length=10, required=True)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
msg = "'Ensure this value has at least 10 characters (it has 5).'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean('12345')
self.assertEqual('1234567890', f.clean('1234567890'))
self.assertEqual('1234567890a', f.clean('1234567890a'))
self.assertIsNone(f.max_length)
self.assertEqual(f.min_length, 10)
def test_charfield_length_not_int(self):
"""
Setting min_length or max_length to something that is not a number
raises an exception.
"""
with self.assertRaises(ValueError):
CharField(min_length='a')
with self.assertRaises(ValueError):
CharField(max_length='a')
msg = '__init__() takes 1 positional argument but 2 were given'
with self.assertRaisesMessage(TypeError, msg):
CharField('a')
def test_charfield_widget_attrs(self):
"""
CharField.widget_attrs() always returns a dictionary and includes
minlength/maxlength if min_length/max_length are defined on the field
and the widget is not hidden.
"""
# Return an empty dictionary if max_length and min_length are both None.
f = CharField()
self.assertEqual(f.widget_attrs(TextInput()), {})
self.assertEqual(f.widget_attrs(Textarea()), {})
# Return a maxlength attribute equal to max_length.
f = CharField(max_length=10)
self.assertEqual(f.widget_attrs(TextInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'maxlength': '10'})
self.assertEqual(f.widget_attrs(Textarea()), {'maxlength': '10'})
# Return a minlength attribute equal to min_length.
f = CharField(min_length=5)
self.assertEqual(f.widget_attrs(TextInput()), {'minlength': '5'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'minlength': '5'})
self.assertEqual(f.widget_attrs(Textarea()), {'minlength': '5'})
# Return both maxlength and minlength when both max_length and
# min_length are set.
f = CharField(max_length=10, min_length=5)
self.assertEqual(f.widget_attrs(TextInput()), {'maxlength': '10', 'minlength': '5'})
self.assertEqual(f.widget_attrs(PasswordInput()), {'maxlength': '10', 'minlength': '5'})
self.assertEqual(f.widget_attrs(Textarea()), {'maxlength': '10', 'minlength': '5'})
self.assertEqual(f.widget_attrs(HiddenInput()), {})
def test_charfield_strip(self):
"""
Values have whitespace stripped but not if strip=False.
"""
f = CharField()
self.assertEqual(f.clean(' 1'), '1')
self.assertEqual(f.clean('1 '), '1')
f = CharField(strip=False)
self.assertEqual(f.clean(' 1'), ' 1')
self.assertEqual(f.clean('1 '), '1 ')
def test_strip_before_checking_empty(self):
"""
A whitespace-only value, ' ', is stripped to an empty string and then
converted to the empty value, None.
"""
f = CharField(required=False, empty_value=None)
self.assertIsNone(f.clean(' '))
def test_clean_non_string(self):
"""CharField.clean() calls str(value) before stripping it."""
class StringWrapper:
def __init__(self, v):
self.v = v
def __str__(self):
return self.v
value = StringWrapper(' ')
f1 = CharField(required=False, empty_value=None)
self.assertIsNone(f1.clean(value))
f2 = CharField(strip=False)
self.assertEqual(f2.clean(value), ' ')
def test_charfield_disabled(self):
f = CharField(disabled=True)
self.assertWidgetRendersTo(f, '<input type="text" name="f" id="id_f" disabled required />')
def test_null_characters_prohibited(self):
f = CharField()
msg = 'Null characters are not allowed.'
with self.assertRaisesMessage(ValidationError, msg):
f.clean('\x00something')
| bsd-3-clause |
code-sauce/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/operator_pd_full_test.py | 33 | 2355 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import operator_pd_full
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class OperatorPDFullTest(test.TestCase):
# The only method needing checked (because it isn't part of the parent class)
# is the check for symmetry.
def setUp(self):
self._rng = np.random.RandomState(42)
def _random_positive_def_array(self, *shape):
matrix = self._rng.rand(*shape)
return math_ops.matmul(matrix, matrix, adjoint_b=True).eval()
def testPositiveDefiniteMatrixDoesntRaise(self):
with self.test_session():
matrix = self._random_positive_def_array(2, 3, 3)
operator = operator_pd_full.OperatorPDFull(matrix, verify_pd=True)
operator.to_dense().eval() # Should not raise
def testNegativeDefiniteMatrixRaises(self):
with self.test_session():
matrix = -1 * self._random_positive_def_array(3, 2, 2)
operator = operator_pd_full.OperatorPDFull(matrix, verify_pd=True)
# Could fail inside Cholesky decomposition, or later when we test the
# diag.
with self.assertRaisesOpError("x > 0|LLT"):
operator.to_dense().eval()
def testNonSymmetricMatrixRaises(self):
with self.test_session():
matrix = self._random_positive_def_array(3, 2, 2)
matrix[0, 0, 1] += 0.001
operator = operator_pd_full.OperatorPDFull(matrix, verify_pd=True)
with self.assertRaisesOpError("x == y"):
operator.to_dense().eval()
if __name__ == "__main__":
test.main()
| apache-2.0 |
garnaat/boto | boto/cloudhsm/__init__.py | 111 | 1654 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the AWS CloudHSM service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.cloudhsm.layer1 import CloudHSMConnection
return get_regions('cloudhsm', connection_cls=CloudHSMConnection)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit |
michael-weinstein/GATKbyDirectory | GATKbyDirectory0.1.py | 1 | 10718 | #!/usr/bin/env python3
import os
import re
def checkargs(): #subroutine for validating commandline arguments
import argparse #loads the required library for reading the commandline
parser = argparse.ArgumentParser()
parser.add_argument ("-T", "--analysis_type", help = "Which tool to run?") #tells the parser to look for -f and stuff after it and call that the filename
parser.add_argument ("-e", "--standard_min_confidence_threshold_for_emitting", help = "Minimum confidence for emitting.")
parser.add_argument ("-c", "--standard_min_confidence_threshold_for_calling", help = "Minimum confidence for calling?") #tells the parser to look for -f and stuff after it and call that the filename
parser.add_argument ("-R", "--reference_sequence", help = "Reference Genome?") #tells the parser to look for -f and stuff after it and call that the filename
parser.add_argument ("-D", "--directory", help = "BAM File Directory?") #tells the parser to look for -f and stuff after it and call that the filename
parser.add_argument ("--dontUseSoftClippedBases", help = "An option that is useful for RNAseq based variant calling", action = 'store_true')
parser.add_argument ("-9", "--clobber", help = "Overwrite all the things without asking first.", action = 'store_true')
args = parser.parse_args() #puts the arguments into the args object
if not os.path.isfile(args.reference_sequence):
usage('Reference sequence file specified was not found.')
if not os.path.isdir(args.directory):
usage('Target directory specified was not found.')
if not args.standard_min_confidence_threshold_for_emitting: #if not given use the default value
args.standard_min_confidence_threshold_for_emitting = "10"
else:
try:
int(args.standard_min_confidence_threshold_for_emitting)
except:
usage('standard_min_confidence_threshold_for_emitting value given was not integer type')
if not args.standard_min_confidence_threshold_for_calling: #if not given, use the default value
try:
int(args.standard_min_confidence_threshold_for_calling)
except:
usage('standard_min_confidence_threshold_for_calling value given was not integer type')
return (args)
def usage(sin): #This subroutine prints directions
print ('Error: ' + sin)
print ('This program is designed to automate the generation and submission of GATK scatter/gather jobs on the Hoffman2 cluster.')
quit("Please correct errors and try again.")
class DirectoryItem(object):
def __init__(self, filename,directory):
self.filename = filename
self.isDirectory = os.path.isdir(directory + "/" + filename)
def filelist(directory):
import os
allfiles = os.listdir(directory)
items = []
for file in allfiles:
items.append(DirectoryItem(file,directory))
return items
def yesanswer(question): #asks the question passed in and returns True if the answer is yes, False if the answer is no, and keeps the user in a loop until one of those is given. Also useful for walking students through basic logical python functions
answer = False #initializes the answer variable to false. Not absolutely necessary, since it should be undefined at this point and test to false, but explicit is always better than implicit
while not answer: #enters the loop and stays in it until answer is equal to True
print (question + ' (Y/N)') #Asks the question contained in the argument passed into this subroutine
answer = input('>>') #sets answer equal to some value input by the user
if str(answer) == 'y' or str(answer) == 'Y': #checks if the answer is a valid yes answer
return True #sends back a value of True because of the yes answer
elif str(answer) == 'n' or str(answer) == 'N': #checks to see if the answer is a valid form of no
return False #sends back a value of False because it was not a yes answer
else: #if the answer is not a value indicating a yes or no
print ('Invalid response.')
answer = False #set ansewr to false so the loop will continue until a satisfactory answer is given
class Job(object):
def __init__(self, item, args):
self.shortname = item.filename
self.fullname = (args.directory + "/" + item.filename)
extension = self.shortname[len(self.shortname)-4:len(self.shortname)]
if extension != ".bam":
self.isBamFile = False
else:
self.isBamFile = True
if item.isDirectory:
subdirectorylist = os.listdir(self.fullname)
subdirectorybams = []
for subdirectoryitem in subdirectorylist:
if re.match('.*\.bam$', subdirectoryitem):
self.isBamFile = True
subdirectorybams.append(self.fullname + "/" + subdirectoryitem)
if self.isBamFile:
writelist = True
if os.path.isfile(self.shortname + ".list") and not args.clobber:
writelist = yesanswer("List file for " + item.filename + " directory already exists. Overwrite?")
if writelist or args.clobber:
listfile = open(self.shortname + ".list",'w')
for subdirectoryitem in subdirectorybams:
if not os.path.isfile(subdirectoryitem + ".bai"):
if not yesanswer(self.shortname + ".bai (BAM Index File) is missing. Use anyway?"):
if not yesanswer("Do you want to continue this run? No jobs have been sent to qsub yet."):
quit("Goodbye!")
else:
continue
listfile.write(os.getcwd() + "/" + subdirectoryitem + "\n")
listfile.close()
self.fullname = self.shortname + ".list"
else:
self.isBamFile = yesanswer("Include existing list file for " + item.filename + " in this run?")
if self.isBamFile:
if os.path.isfile(self.fullname + ".vcf") and not args.clobber:
self.isBamFile = yesanswer(self.shortname + ".vcf already exists. Overwrite?")
if not self.isBamFile:
if not yesanswer("Do you want to continue this run? No jobs have been sent to qsub yet."):
quit("Goodbye!")
if not os.path.isfile(self.fullname + ".bai") and not self.fullname[len(self.fullname)-5:len(self.fullname)] == ".list":
self.isBamFile = yesanswer(self.shortname + ".bai (BAM Index File) is missing. Submit job anyway?")
if not self.isBamFile:
if not yesanswer("Do you want to continue this run? No jobs have been sent to qsub yet."):
quit("Goodbye!")
def create (self, args): #create a the scatter/gather scala object
# self.cleanfilename = re.sub('\W','',self.shortname)
# self.cleanfilename = re.sub('_','',self.cleanfilename)
if os.path.isfile(self.shortname + ".scatter.scala") and not args.clobber:
if not yesanswer(self.shortname + ".scatter.scala already exists. Overwrite?"):
return False
scala = open(self.shortname + ".scatter.scala",'w')
scala.write("import org.broadinstitute.gatk.queue.QScript" + "\n")
scala.write("import org.broadinstitute.gatk.queue.extensions.gatk._" + "\n")
scala.write("class callVariants extends QScript {" + "\n")
scala.write("\tdef script() {" + "\n")
if (args.analysis_type).lower() == 'haplotypecaller':
analysistype = "HaplotypeCaller"
objectname = "hc"
elif (args.analysis_type).lower() == 'unifiedgenotyper':
analysistype = "UnifiedGenotyper"
objectname = "genotyper"
else:
quit("Invalid Analysis type. Must be either UnifiedGenotyper or HaplotypeCaller." + "\n")
scala.write("\t\tval " + objectname + " = new " + analysistype + "\n")
scala.write("\t\t" + objectname + ".reference_sequence = new File (\"" + args.reference_sequence + "\")" + "\n")
if args.standard_min_confidence_threshold_for_emitting != "":
scala.write("\t\t" + objectname + ".standard_min_confidence_threshold_for_emitting = " + args.standard_min_confidence_threshold_for_emitting + "\n")
if args.standard_min_confidence_threshold_for_calling != "":
scala.write("\t\t" + objectname + ".standard_min_confidence_threshold_for_calling = " + args.standard_min_confidence_threshold_for_calling + "\n")
scala.write("\t\t" + objectname + ".input_file :+= new File (\"" + self.fullname + "\")" + "\n")
scala.write("\t\t" + objectname + ".out = new File (\"" + self.fullname + ".vcf\")" + "\n")
if args.dontUseSoftClippedBases:
scala.write("\t\t" + objectname + ".dontUseSoftClippedBases = true" + "\n")
scala.write("\t\t" + objectname + ".scatterCount = 20" + "\n")
scala.write("\t\t" + objectname + ".memoryLimit = 2" + "\n")
scala.write("\t\t" + "add(" + objectname + ")" + "\n")
scala.write("\t" + "}" + "\n")
scala.write("}" + "\n")
scala.close()
return True
def execute (self, args): #sends the scatter/gather job to qsub
os.system('java -Xmx1g -Djava.io.tmpdir=tmp -jar /u/local/apps/gatk-queue/3.2.2/Queue.jar -S ' + self.shortname +'.scatter.scala -startFromScratch -qsub -jobResReq "h_data=4g,h_rt=24:00:00" -run')
return True
def main():
print ("Checking command line arguments...", end = "")
args = checkargs()
# if args.clobber:
# if not yesanswer('Clobber set, files may be overwritten without asking. Continue?'):
# quit("Goodbye!")
print ("OK\nGetting target directory contents...", end = "")
directorycontents = filelist(args.directory) #returns an array of objects with filename and if it is a directory
print ("OK\nCreating a list of GATK jobs...", end = "")
jobs = []
for item in directorycontents:
jobs.append(Job(item, args))
print ("OK\nCreating scatter/gather scala objects...")
for job in jobs:
if job.isBamFile:
print("for " + job.shortname)
job.create(args)
print ("OK\nSubmitting jobs to queue...")
for job in jobs:
if job.isBamFile:
print("for " + job.shortname)
job.execute(args)
print ("OK")
quit("Done!")
main() | gpl-3.0 |
flyfei/python-for-android | python3-alpha/python3-src/Lib/idlelib/CodeContext.py | 128 | 8353 | """CodeContext - Extension to display the block context above the edit window
Once code has scrolled off the top of a window, it can be difficult to
determine which block you are in. This extension implements a pane at the top
of each IDLE edit window which provides block structure hints. These hints are
the lines which contain the block opening keywords, e.g. 'if', for the
enclosing block. The number of hint lines is determined by the numlines
variable in the CodeContext section of config-extensions.def. Lines which do
not open blocks are not shown in the context hints pane.
"""
import tkinter
from tkinter.constants import TOP, LEFT, X, W, SUNKEN
import re
from sys import maxsize as INFINITY
from idlelib.configHandler import idleConf
BLOCKOPENERS = set(["class", "def", "elif", "else", "except", "finally", "for",
"if", "try", "while", "with"])
UPDATEINTERVAL = 100 # millisec
FONTUPDATEINTERVAL = 1000 # millisec
getspacesfirstword =\
lambda s, c=re.compile(r"^(\s*)(\w*)"): c.match(s).groups()
class CodeContext:
menudefs = [('options', [('!Code Conte_xt', '<<toggle-code-context>>')])]
context_depth = idleConf.GetOption("extensions", "CodeContext",
"numlines", type="int", default=3)
bgcolor = idleConf.GetOption("extensions", "CodeContext",
"bgcolor", type="str", default="LightGray")
fgcolor = idleConf.GetOption("extensions", "CodeContext",
"fgcolor", type="str", default="Black")
def __init__(self, editwin):
self.editwin = editwin
self.text = editwin.text
self.textfont = self.text["font"]
self.label = None
# self.info is a list of (line number, indent level, line text, block
# keyword) tuples providing the block structure associated with
# self.topvisible (the linenumber of the line displayed at the top of
# the edit window). self.info[0] is initialized as a 'dummy' line which
# starts the toplevel 'block' of the module.
self.info = [(0, -1, "", False)]
self.topvisible = 1
visible = idleConf.GetOption("extensions", "CodeContext",
"visible", type="bool", default=False)
if visible:
self.toggle_code_context_event()
self.editwin.setvar('<<toggle-code-context>>', True)
# Start two update cycles, one for context lines, one for font changes.
self.text.after(UPDATEINTERVAL, self.timer_event)
self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
def toggle_code_context_event(self, event=None):
if not self.label:
# Calculate the border width and horizontal padding required to
# align the context with the text in the main Text widget.
#
# All values are passed through int(str(<value>)), since some
# values may be pixel objects, which can't simply be added to ints.
widgets = self.editwin.text, self.editwin.text_frame
# Calculate the required vertical padding
padx = 0
for widget in widgets:
padx += int(str( widget.pack_info()['padx'] ))
padx += int(str( widget.cget('padx') ))
# Calculate the required border width
border = 0
for widget in widgets:
border += int(str( widget.cget('border') ))
self.label = tkinter.Label(self.editwin.top,
text="\n" * (self.context_depth - 1),
anchor=W, justify=LEFT,
font=self.textfont,
bg=self.bgcolor, fg=self.fgcolor,
width=1, #don't request more than we get
padx=padx, border=border,
relief=SUNKEN)
# Pack the label widget before and above the text_frame widget,
# thus ensuring that it will appear directly above text_frame
self.label.pack(side=TOP, fill=X, expand=False,
before=self.editwin.text_frame)
else:
self.label.destroy()
self.label = None
idleConf.SetOption("extensions", "CodeContext", "visible",
str(self.label is not None))
idleConf.SaveUserCfgFiles()
def get_line_info(self, linenum):
"""Get the line indent value, text, and any block start keyword
If the line does not start a block, the keyword value is False.
The indentation of empty lines (or comment lines) is INFINITY.
"""
text = self.text.get("%d.0" % linenum, "%d.end" % linenum)
spaces, firstword = getspacesfirstword(text)
opener = firstword in BLOCKOPENERS and firstword
if len(text) == len(spaces) or text[len(spaces)] == '#':
indent = INFINITY
else:
indent = len(spaces)
return indent, text, opener
def get_context(self, new_topvisible, stopline=1, stopindent=0):
"""Get context lines, starting at new_topvisible and working backwards.
Stop when stopline or stopindent is reached. Return a tuple of context
data and the indent level at the top of the region inspected.
"""
assert stopline > 0
lines = []
# The indentation level we are currently in:
lastindent = INFINITY
# For a line to be interesting, it must begin with a block opening
# keyword, and have less indentation than lastindent.
for linenum in range(new_topvisible, stopline-1, -1):
indent, text, opener = self.get_line_info(linenum)
if indent < lastindent:
lastindent = indent
if opener in ("else", "elif"):
# We also show the if statement
lastindent += 1
if opener and linenum < new_topvisible and indent >= stopindent:
lines.append((linenum, indent, text, opener))
if lastindent <= stopindent:
break
lines.reverse()
return lines, lastindent
def update_code_context(self):
"""Update context information and lines visible in the context pane.
"""
new_topvisible = int(self.text.index("@0,0").split('.')[0])
if self.topvisible == new_topvisible: # haven't scrolled
return
if self.topvisible < new_topvisible: # scroll down
lines, lastindent = self.get_context(new_topvisible,
self.topvisible)
# retain only context info applicable to the region
# between topvisible and new_topvisible:
while self.info[-1][1] >= lastindent:
del self.info[-1]
elif self.topvisible > new_topvisible: # scroll up
stopindent = self.info[-1][1] + 1
# retain only context info associated
# with lines above new_topvisible:
while self.info[-1][0] >= new_topvisible:
stopindent = self.info[-1][1]
del self.info[-1]
lines, lastindent = self.get_context(new_topvisible,
self.info[-1][0]+1,
stopindent)
self.info.extend(lines)
self.topvisible = new_topvisible
# empty lines in context pane:
context_strings = [""] * max(0, self.context_depth - len(self.info))
# followed by the context hint lines:
context_strings += [x[2] for x in self.info[-self.context_depth:]]
self.label["text"] = '\n'.join(context_strings)
def timer_event(self):
if self.label:
self.update_code_context()
self.text.after(UPDATEINTERVAL, self.timer_event)
def font_timer_event(self):
newtextfont = self.text["font"]
if self.label and newtextfont != self.textfont:
self.textfont = newtextfont
self.label["font"] = self.textfont
self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
| apache-2.0 |
sanbinabu/Wox | PythonHome/Lib/site-packages/chardet/sjisprober.py | 1777 | 3764 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| mit |
wzyuliyang/scrapy | scrapy/utils/benchserver.py | 130 | 1312 | import random
from six.moves.urllib.parse import urlencode
from twisted.web.server import Site
from twisted.web.resource import Resource
from twisted.internet import reactor
class Root(Resource):
isLeaf = True
def getChild(self, name, request):
return self
def render(self, request):
total = _getarg(request, 'total', 100, int)
show = _getarg(request, 'show', 10, int)
nlist = [random.randint(1, total) for _ in range(show)]
request.write("<html><head></head><body>")
args = request.args.copy()
for nl in nlist:
args['n'] = nl
argstr = urlencode(args, doseq=True)
request.write("<a href='/follow?{0}'>follow {1}</a><br>"
.format(argstr, nl))
request.write("</body></html>")
return ''
def _getarg(request, name, default=None, type=str):
return type(request.args[name][0]) \
if name in request.args else default
if __name__ == '__main__':
root = Root()
factory = Site(root)
httpPort = reactor.listenTCP(8998, Site(root))
def _print_listening():
httpHost = httpPort.getHost()
print("Bench server at http://{}:{}".format(httpHost.host, httpHost.port))
reactor.callWhenRunning(_print_listening)
reactor.run()
| bsd-3-clause |
cheral/orange3 | Orange/widgets/utils/overlay.py | 2 | 20113 | """
Overlay Message Widget
----------------------
A Widget to display a temporary dismissable message over another widget.
"""
import sys
import enum
import functools
import operator
from collections import namedtuple
from AnyQt.QtWidgets import (
QHBoxLayout, QPushButton, QLabel, QSizePolicy, QStyle, QAbstractButton,
QStyleOptionButton, QStylePainter, QFocusFrame, QWidget, QStyleOption
)
from AnyQt.QtGui import QIcon, QPixmap, QPainter
from AnyQt.QtCore import Qt, QSize, QRect, QPoint, QEvent, QTimer
from AnyQt.QtCore import pyqtSignal as Signal, pyqtSlot as Slot
class OverlayWidget(QWidget):
"""
A widget positioned on top of another widget.
"""
def __init__(self, parent=None, alignment=Qt.AlignCenter, **kwargs):
super().__init__(parent, **kwargs)
self.setContentsMargins(0, 0, 0, 0)
self.__alignment = alignment
self.__widget = None
def setWidget(self, widget):
"""
Set the widget over which this overlay should be displayed (anchored).
:type widget: QWidget
"""
if self.__widget is not None:
self.__widget.removeEventFilter(self)
self.__widget.destroyed.disconnect(self.__on_destroyed)
self.__widget = widget
if self.__widget is not None:
self.__widget.installEventFilter(self)
self.__widget.destroyed.connect(self.__on_destroyed)
if self.__widget is None:
self.hide()
else:
self.__layout()
def widget(self):
"""
Return the overlaid widget.
:rtype: QWidget | None
"""
return self.__widget
def setAlignment(self, alignment):
"""
Set overlay alignment.
:type alignment: Qt.Alignment
"""
if self.__alignment != alignment:
self.__alignment = alignment
if self.__widget is not None:
self.__layout()
def alignment(self):
"""
Return the overlay alignment.
:rtype: Qt.Alignment
"""
return self.__alignment
def eventFilter(self, recv, event):
# reimplemented
if recv is self.__widget:
if event.type() == QEvent.Resize or event.type() == QEvent.Move:
self.__layout()
elif event.type() == QEvent.Show:
self.show()
elif event.type() == QEvent.Hide:
self.hide()
return super().eventFilter(recv, event)
def event(self, event):
# reimplemented
if event.type() == QEvent.LayoutRequest:
self.__layout()
return True
else:
return super().event(event)
def paintEvent(self, event):
opt = QStyleOption()
opt.initFrom(self)
painter = QPainter(self)
self.style().drawPrimitive(QStyle.PE_Widget, opt, painter, self)
def __layout(self):
# position itself over `widget`
widget = self.__widget
if widget is None:
return
alignment = self.__alignment
policy = self.sizePolicy()
if widget.isWindow():
bounds = widget.geometry()
else:
bounds = QRect(widget.mapToGlobal(QPoint(0, 0)),
widget.size())
if self.isWindow():
bounds = bounds
else:
bounds = QRect(self.parent().mapFromGlobal(bounds.topLeft()),
bounds.size())
sh = self.sizeHint()
minsh = self.minimumSizeHint()
minsize = self.minimumSize()
if minsize.isNull():
minsize = minsh
maxsize = bounds.size().boundedTo(self.maximumSize())
minsize = minsize.boundedTo(maxsize)
effectivesh = sh.expandedTo(minsize).boundedTo(maxsize)
hpolicy = policy.horizontalPolicy()
vpolicy = policy.verticalPolicy()
def getsize(hint, minimum, maximum, policy):
if policy == QSizePolicy.Ignored:
return maximum
elif policy & QSizePolicy.ExpandFlag:
return maximum
else:
return max(hint, minimum)
width = getsize(effectivesh.width(), minsize.width(),
maxsize.width(), hpolicy)
heightforw = self.heightForWidth(width)
if heightforw > 0:
height = getsize(heightforw, minsize.height(),
maxsize.height(), vpolicy)
else:
height = getsize(effectivesh.height(), minsize.height(),
maxsize.height(), vpolicy)
size = QSize(width, height)
if alignment & Qt.AlignLeft:
x = bounds.x()
elif alignment & Qt.AlignRight:
x = bounds.right() - size.width()
else:
x = bounds.x() + max(0, bounds.width() - size.width()) // 2
if alignment & Qt.AlignTop:
y = bounds.y()
elif alignment & Qt.AlignBottom:
y = bounds.bottom() - size.height()
else:
y = bounds.y() + max(0, bounds.height() - size.height()) // 2
geom = QRect(QPoint(x, y), size)
self.setGeometry(geom)
@Slot()
def __on_destroyed(self):
self.__widget = None
if self.isVisible():
self.hide()
class SimpleButton(QAbstractButton):
"""
A simple icon button widget.
"""
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
self.__focusframe = None
def focusInEvent(self, event):
# reimplemented
event.accept()
self.__focusframe = QFocusFrame(self)
self.__focusframe.setWidget(self)
def focusOutEvent(self, event):
# reimplemented
event.accept()
self.__focusframe.deleteLater()
self.__focusframe = None
def sizeHint(self):
# reimplemented
self.ensurePolished()
iconsize = self.iconSize()
icon = self.icon()
if not icon.isNull():
iconsize = icon.actualSize(iconsize)
return iconsize
def minimumSizeHint(self):
# reimplemented
return self.sizeHint()
def paintEvent(self, event):
# reimplemented
painter = QStylePainter(self)
option = QStyleOptionButton()
option.initFrom(self)
option.icon = self.icon()
option.iconSize = self.iconSize()
icon = self.icon()
if not icon.isNull():
if option.state & QStyle.State_Active:
mode = (QIcon.Normal if option.state & QStyle.State_MouseOver
else QIcon.Active)
else:
mode = QIcon.Disabled
pixmap = icon.pixmap(option.iconSize, mode, )
painter.drawItemPixmap(option.rect, Qt.AlignCenter, pixmap)
class MessageWidget(QWidget):
"""
A widget displaying a simple message to the user.
This is an alternative to a full QMessageBox intended for inline
modeless messages.
[[icon] {Message text} (Ok) (Cancel)]
"""
#: Emitted when a button with the AcceptRole is clicked
accepted = Signal()
#: Emitted when a button with the RejectRole is clicked
rejected = Signal()
#: Emitted when a button with the HelpRole is clicked
helpRequested = Signal()
#: Emitted when a button is clicked
clicked = Signal(QAbstractButton)
class StandardButton(enum.IntEnum):
NoButton, Ok, Close, Help = 0x0, 0x1, 0x2, 0x4
NoButton, Ok, Close, Help = list(StandardButton)
class ButtonRole(enum.IntEnum):
InvalidRole, AcceptRole, RejectRole, HelpRole = 0, 1, 2, 3
InvalidRole, AcceptRole, RejectRole, HelpRole = list(ButtonRole)
_Button = namedtuple("_Button", ["button", "role", "stdbutton"])
def __init__(self, parent=None, icon=QIcon(), text="", wordWrap=False,
textFormat=Qt.AutoText, standardButtons=NoButton, **kwargs):
super().__init__(parent, **kwargs)
self.__text = text
self.__icon = QIcon()
self.__wordWrap = wordWrap
self.__standardButtons = MessageWidget.NoButton
self.__buttons = []
layout = QHBoxLayout()
layout.setContentsMargins(8, 0, 8, 0)
self.__iconlabel = QLabel(objectName="icon-label")
self.__iconlabel.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.__textlabel = QLabel(objectName="text-label", text=text,
wordWrap=wordWrap, textFormat=textFormat)
if sys.platform == "darwin":
self.__textlabel.setAttribute(Qt.WA_MacSmallSize)
layout.addWidget(self.__iconlabel)
layout.addWidget(self.__textlabel)
self.setLayout(layout)
self.setIcon(icon)
self.setStandardButtons(standardButtons)
def setText(self, text):
"""
Set the current message text.
:type message: str
"""
if self.__text != text:
self.__text = text
self.__textlabel.setText(text)
def text(self):
"""
Return the current message text.
:rtype: str
"""
return self.__text
def setIcon(self, icon):
"""
Set the message icon.
:type icon: QIcon | QPixmap | QString | QStyle.StandardPixmap
"""
if isinstance(icon, QStyle.StandardPixmap):
icon = self.style().standardIcon(icon)
else:
icon = QIcon(icon)
if self.__icon != icon:
self.__icon = QIcon(icon)
if not self.__icon.isNull():
size = self.style().pixelMetric(
QStyle.PM_SmallIconSize, None, self)
pm = self.__icon.pixmap(QSize(size, size))
else:
pm = QPixmap()
self.__iconlabel.setPixmap(pm)
self.__iconlabel.setVisible(not pm.isNull())
def icon(self):
"""
Return the current icon.
:rtype: QIcon
"""
return QIcon(self.__icon)
def setWordWrap(self, wordWrap):
"""
Set the message text wrap property
:type wordWrap: bool
"""
if self.__wordWrap != wordWrap:
self.__wordWrap = wordWrap
self.__textlabel.setWordWrap(wordWrap)
def wordWrap(self):
"""
Return the message text wrap property.
:rtype: bool
"""
return self.__wordWrap
def setTextFormat(self, textFormat):
"""
Set message text format
:type textFormat: Qt.TextFormat
"""
self.__textlabel.setTextFormat(textFormat)
def textFormat(self):
"""
Return the message text format.
:rtype: Qt.TextFormat
"""
return self.__textlabel.textFormat()
def changeEvent(self, event):
# reimplemented
if event.type() == 177: # QEvent.MacSizeChange:
...
super().changeEvent(event)
def setStandardButtons(self, buttons):
for button in MessageWidget.StandardButton:
existing = self.button(button)
if button & buttons and existing is None:
self.addButton(button)
elif existing is not None:
self.removeButton(existing)
def standardButtons(self):
return functools.reduce(
operator.ior,
(slot.stdbutton for slot in self.__buttons
if slot.stdbutton is not None),
MessageWidget.NoButton)
def addButton(self, button, *rolearg):
"""
addButton(QAbstractButton, ButtonRole)
addButton(str, ButtonRole)
addButton(StandardButton)
Add and return a button
"""
stdbutton = None
if isinstance(button, QAbstractButton):
if len(rolearg) != 1:
raise TypeError("Wrong number of arguments for "
"addButton(QAbstractButton, role)")
role = rolearg[0]
elif isinstance(button, MessageWidget.StandardButton):
if len(rolearg) != 0:
raise TypeError("Wrong number of arguments for "
"addButton(StandardButton)")
stdbutton = button
if button == MessageWidget.Ok:
role = MessageWidget.AcceptRole
button = QPushButton("Ok", default=False, autoDefault=False)
elif button == MessageWidget.Close:
role = MessageWidget.RejectRole
# button = QPushButton(
# default=False, autoDefault=False, flat=True,
# icon=QIcon(self.style().standardIcon(
# QStyle.SP_TitleBarCloseButton)))
button = SimpleButton(
icon=QIcon(self.style().standardIcon(
QStyle.SP_TitleBarCloseButton)))
elif button == MessageWidget.Help:
role = MessageWidget.HelpRole
button = QPushButton("Help", default=False, autoDefault=False)
elif isinstance(button, str):
if len(rolearg) != 1:
raise TypeError("Wrong number of arguments for "
"addButton(str, ButtonRole)")
role = rolearg[0]
button = QPushButton(button, default=False, autoDefault=False)
if sys.platform == "darwin":
button.setAttribute(Qt.WA_MacSmallSize)
self.__buttons.append(MessageWidget._Button(button, role, stdbutton))
button.clicked.connect(self.__button_clicked)
self.__relayout()
return button
def removeButton(self, button):
"""
Remove a `button`.
:type button: QAbstractButton
"""
slot = [s for s in self.__buttons if s.button is button]
if slot:
slot = slot[0]
self.__buttons.remove(slot)
self.layout().removeWidget(slot.button)
slot.button.setParent(None)
def buttonRole(self, button):
"""
Return the ButtonRole for button
:type button: QAbsstractButton
"""
for slot in self.__buttons:
if slot.button is button:
return slot.role
else:
return MessageWidget.InvalidRole
def button(self, standardButton):
"""
Return the button for the StandardButton.
:type standardButton: StandardButton
"""
for slot in self.__buttons:
if slot.stdbutton == standardButton:
return slot.button
else:
return None
def __button_clicked(self):
button = self.sender()
role = self.buttonRole(button)
self.clicked.emit(button)
if role == MessageWidget.AcceptRole:
self.accepted.emit()
self.close()
elif role == MessageWidget.RejectRole:
self.rejected.emit()
self.close()
elif role == MessageWidget.HelpRole:
self.helpRequested.emit()
def __relayout(self):
for slot in self.__buttons:
self.layout().removeWidget(slot.button)
order = {
MessageOverlayWidget.HelpRole: 0,
MessageOverlayWidget.AcceptRole: 2,
MessageOverlayWidget.RejectRole: 3,
}
orderd = sorted(self.__buttons,
key=lambda slot: order.get(slot.role, -1))
prev = self.__textlabel
for slot in orderd:
self.layout().addWidget(slot.button)
QWidget.setTabOrder(prev, slot.button)
def proxydoc(func):
return functools.wraps(func, assigned=["__doc__"], updated=[])
class MessageOverlayWidget(OverlayWidget):
#: Emitted when a button with an Accept role is clicked
accepted = Signal()
#: Emitted when a button with a RejectRole is clicked
rejected = Signal()
#: Emitted when a button is clicked
clicked = Signal(QAbstractButton)
#: Emitted when a button with HelpRole is clicked
helpRequested = Signal()
NoButton, Ok, Close, Help = list(MessageWidget.StandardButton)
InvalidRole, AcceptRole, RejectRole, HelpRole = \
list(MessageWidget.ButtonRole)
def __init__(self, parent=None, text="", icon=QIcon(),
alignment=Qt.AlignTop, wordWrap=False,
standardButtons=NoButton, **kwargs):
super().__init__(parent, alignment=alignment, **kwargs)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
self.__msgwidget = MessageWidget(
parent=self, text=text, icon=icon, wordWrap=wordWrap,
standardButtons=standardButtons
)
self.__msgwidget.accepted.connect(self.accepted)
self.__msgwidget.rejected.connect(self.rejected)
self.__msgwidget.clicked.connect(self.clicked)
self.__msgwidget.helpRequested.connect(self.helpRequested)
self.__msgwidget.accepted.connect(self.hide)
self.__msgwidget.rejected.connect(self.hide)
layout.addWidget(self.__msgwidget)
self.setLayout(layout)
@proxydoc(MessageWidget.setText)
def setText(self, text):
self.__msgwidget.setText(text)
@proxydoc(MessageWidget.text)
def text(self):
return self.__msgwidget.text()
@proxydoc(MessageWidget.setIcon)
def setIcon(self, icon):
self.__msgwidget.setIcon(icon)
@proxydoc(MessageWidget.icon)
def icon(self):
return self.__msgwidget.icon()
@proxydoc(MessageWidget.textFormat)
def textFromat(self):
return self.__msgwidget.textFormat()
@proxydoc(MessageWidget.setTextFormat)
def setTextFormat(self, textFormat):
self.__msgwidget.setTextFormat(textFormat)
@proxydoc(MessageWidget.setStandardButtons)
def setStandardButtons(self, buttons):
self.__msgwidget.setStandardButtons(buttons)
@proxydoc(MessageWidget.addButton)
def addButton(self, *args):
return self.__msgwidget.addButton(*args)
@proxydoc(MessageWidget.removeButton)
def removeButton(self, button):
self.__msgwidget.removeButton(button)
@proxydoc(MessageWidget.buttonRole)
def buttonRole(self, button):
return self.__msgwidget.buttonRole(button)
@proxydoc(MessageWidget.button)
def button(self, standardButton):
return self.__msgwidget.button(standardButton)
import unittest
class TestOverlay(unittest.TestCase):
def setUp(self):
from AnyQt.QtWidgets import QApplication
app = QApplication.instance()
if app is None:
app = QApplication([])
self.app = app
def _exec(self, timeout):
QTimer.singleShot(timeout, self.app.quit)
return self.app.exec_()
def tearDown(self):
del self.app
def test_overlay(self):
container = QWidget()
overlay = MessageOverlayWidget(parent=container)
overlay.setWidget(container)
overlay.setIcon(QStyle.SP_MessageBoxInformation)
container.show()
container.raise_()
self._exec(500)
self.assertTrue(overlay.isVisible())
overlay.setText("Hello world! It's so nice here")
self._exec(500)
button_ok = overlay.addButton(MessageOverlayWidget.Ok)
button_close = overlay.addButton(MessageOverlayWidget.Close)
button_help = overlay.addButton(MessageOverlayWidget.Help)
self.assertTrue(all([button_ok, button_close, button_help]))
self.assertIs(overlay.button(MessageOverlayWidget.Ok), button_ok)
self.assertIs(overlay.button(MessageOverlayWidget.Close), button_close)
self.assertIs(overlay.button(MessageOverlayWidget.Help), button_help)
button = overlay.addButton("Click Me!",
MessageOverlayWidget.AcceptRole)
self.assertIsNot(button, None)
self.assertTrue(overlay.buttonRole(button),
MessageOverlayWidget.AcceptRole)
self._exec(10000)
| bsd-2-clause |
IPVL/swift-kilo | swift/common/middleware/proxy_logging.py | 11 | 13396 | # Copyright (c) 2010-2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Logging middleware for the Swift proxy.
This serves as both the default logging implementation and an example of how
to plug in your own logging format/method.
The logging format implemented below is as follows:
client_ip remote_addr datetime request_method request_path protocol
status_int referer user_agent auth_token bytes_recvd bytes_sent
client_etag transaction_id headers request_time source log_info
request_start_time request_end_time
These values are space-separated, and each is url-encoded, so that they can
be separated with a simple .split()
* remote_addr is the contents of the REMOTE_ADDR environment variable, while
client_ip is swift's best guess at the end-user IP, extracted variously
from the X-Forwarded-For header, X-Cluster-Ip header, or the REMOTE_ADDR
environment variable.
* source (swift.source in the WSGI environment) indicates the code
that generated the request, such as most middleware. (See below for
more detail.)
* log_info (swift.log_info in the WSGI environment) is for additional
information that could prove quite useful, such as any x-delete-at
value or other "behind the scenes" activity that might not
otherwise be detectable from the plain log information. Code that
wishes to add additional log information should use code like
``env.setdefault('swift.log_info', []).append(your_info)`` so as to
not disturb others' log information.
* Values that are missing (e.g. due to a header not being present) or zero
are generally represented by a single hyphen ('-').
The proxy-logging can be used twice in the proxy server's pipeline when there
is middleware installed that can return custom responses that don't follow the
standard pipeline to the proxy server.
For example, with staticweb, the middleware might intercept a request to
/v1/AUTH_acc/cont/, make a subrequest to the proxy to retrieve
/v1/AUTH_acc/cont/index.html and, in effect, respond to the client's original
request using the 2nd request's body. In this instance the subrequest will be
logged by the rightmost middleware (with a swift.source set) and the outgoing
request (with body overridden) will be logged by leftmost middleware.
Requests that follow the normal pipeline (use the same wsgi environment
throughout) will not be double logged because an environment variable
(swift.proxy_access_log_made) is checked/set when a log is made.
All middleware making subrequests should take care to set swift.source when
needed. With the doubled proxy logs, any consumer/processor of swift's proxy
logs should look at the swift.source field, the rightmost log value, to decide
if this is a middleware subrequest or not. A log processor calculating
bandwidth usage will want to only sum up logs with no swift.source.
"""
import time
from urllib import quote, unquote
from swift.common.swob import Request
from swift.common.utils import (get_logger, get_remote_client,
get_valid_utf8_str, config_true_value,
InputProxy, list_from_csv, get_policy_index)
QUOTE_SAFE = '/:'
class ProxyLoggingMiddleware(object):
"""
Middleware that logs Swift proxy requests in the swift log format.
"""
def __init__(self, app, conf, logger=None):
self.app = app
self.log_hdrs = config_true_value(conf.get(
'access_log_headers',
conf.get('log_headers', 'no')))
log_hdrs_only = list_from_csv(conf.get(
'access_log_headers_only', ''))
self.log_hdrs_only = [x.title() for x in log_hdrs_only]
# The leading access_* check is in case someone assumes that
# log_statsd_valid_http_methods behaves like the other log_statsd_*
# settings.
self.valid_methods = conf.get(
'access_log_statsd_valid_http_methods',
conf.get('log_statsd_valid_http_methods',
'GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS'))
self.valid_methods = [m.strip().upper() for m in
self.valid_methods.split(',') if m.strip()]
access_log_conf = {}
for key in ('log_facility', 'log_name', 'log_level', 'log_udp_host',
'log_udp_port', 'log_statsd_host', 'log_statsd_port',
'log_statsd_default_sample_rate',
'log_statsd_sample_rate_factor',
'log_statsd_metric_prefix'):
value = conf.get('access_' + key, conf.get(key, None))
if value:
access_log_conf[key] = value
self.access_logger = logger or get_logger(access_log_conf,
log_route='proxy-access')
self.access_logger.set_statsd_prefix('proxy-server')
self.reveal_sensitive_prefix = int(
conf.get('reveal_sensitive_prefix', 16))
def method_from_req(self, req):
return req.environ.get('swift.orig_req_method', req.method)
def req_already_logged(self, env):
return env.get('swift.proxy_access_log_made')
def mark_req_logged(self, env):
env['swift.proxy_access_log_made'] = True
def obscure_sensitive(self, value):
if value and len(value) > self.reveal_sensitive_prefix:
return value[:self.reveal_sensitive_prefix] + '...'
return value
def log_request(self, req, status_int, bytes_received, bytes_sent,
start_time, end_time, resp_headers=None):
"""
Log a request.
:param req: swob.Request object for the request
:param status_int: integer code for the response status
:param bytes_received: bytes successfully read from the request body
:param bytes_sent: bytes yielded to the WSGI server
:param start_time: timestamp request started
:param end_time: timestamp request completed
:param resp_headers: dict of the response headers
"""
resp_headers = resp_headers or {}
req_path = get_valid_utf8_str(req.path)
the_request = quote(unquote(req_path), QUOTE_SAFE)
if req.query_string:
the_request = the_request + '?' + req.query_string
logged_headers = None
if self.log_hdrs:
if self.log_hdrs_only:
logged_headers = '\n'.join('%s: %s' % (k, v)
for k, v in req.headers.items()
if k in self.log_hdrs_only)
else:
logged_headers = '\n'.join('%s: %s' % (k, v)
for k, v in req.headers.items())
method = self.method_from_req(req)
end_gmtime_str = time.strftime('%d/%b/%Y/%H/%M/%S',
time.gmtime(end_time))
duration_time_str = "%.4f" % (end_time - start_time)
start_time_str = "%.9f" % start_time
end_time_str = "%.9f" % end_time
policy_index = get_policy_index(req.headers, resp_headers)
self.access_logger.info(' '.join(
quote(str(x) if x else '-', QUOTE_SAFE)
for x in (
get_remote_client(req),
req.remote_addr,
end_gmtime_str,
method,
the_request,
req.environ.get('SERVER_PROTOCOL'),
status_int,
req.referer,
req.user_agent,
self.obscure_sensitive(req.headers.get('x-auth-token')),
bytes_received,
bytes_sent,
req.headers.get('etag', None),
req.environ.get('swift.trans_id'),
logged_headers,
duration_time_str,
req.environ.get('swift.source'),
','.join(req.environ.get('swift.log_info') or ''),
start_time_str,
end_time_str,
policy_index
)))
# Log timing and bytes-transferred data to StatsD
metric_name = self.statsd_metric_name(req, status_int, method)
# Only log data for valid controllers (or SOS) to keep the metric count
# down (egregious errors will get logged by the proxy server itself).
if metric_name:
self.access_logger.timing(metric_name + '.timing',
(end_time - start_time) * 1000)
self.access_logger.update_stats(metric_name + '.xfer',
bytes_received + bytes_sent)
def statsd_metric_name(self, req, status_int, method):
if req.path.startswith('/v1/'):
try:
stat_type = [None, 'account', 'container',
'object'][req.path.strip('/').count('/')]
except IndexError:
stat_type = 'object'
else:
stat_type = req.environ.get('swift.source')
if stat_type is None:
return None
stat_method = method if method in self.valid_methods \
else 'BAD_METHOD'
return '.'.join((stat_type, stat_method, str(status_int)))
def __call__(self, env, start_response):
if self.req_already_logged(env):
return self.app(env, start_response)
self.mark_req_logged(env)
start_response_args = [None]
input_proxy = InputProxy(env['wsgi.input'])
env['wsgi.input'] = input_proxy
start_time = time.time()
def my_start_response(status, headers, exc_info=None):
start_response_args[0] = (status, list(headers), exc_info)
def status_int_for_logging(client_disconnect=False, start_status=None):
# log disconnected clients as '499' status code
if client_disconnect or input_proxy.client_disconnect:
ret_status_int = 499
elif start_status is None:
ret_status_int = int(
start_response_args[0][0].split(' ', 1)[0])
else:
ret_status_int = start_status
return ret_status_int
def iter_response(iterable):
iterator = iter(iterable)
try:
chunk = iterator.next()
while not chunk:
chunk = iterator.next()
except StopIteration:
chunk = ''
for h, v in start_response_args[0][1]:
if h.lower() in ('content-length', 'transfer-encoding'):
break
else:
if not chunk:
start_response_args[0][1].append(('Content-Length', '0'))
elif isinstance(iterable, list):
start_response_args[0][1].append(
('Content-Length', str(sum(len(i) for i in iterable))))
resp_headers = dict(start_response_args[0][1])
start_response(*start_response_args[0])
req = Request(env)
# Log timing information for time-to-first-byte (GET requests only)
method = self.method_from_req(req)
if method == 'GET':
status_int = status_int_for_logging()
metric_name = self.statsd_metric_name(req, status_int, method)
if metric_name:
self.access_logger.timing_since(
metric_name + '.first-byte.timing', start_time)
bytes_sent = 0
client_disconnect = False
try:
while chunk:
bytes_sent += len(chunk)
yield chunk
chunk = iterator.next()
except GeneratorExit: # generator was closed before we finished
client_disconnect = True
raise
finally:
status_int = status_int_for_logging(client_disconnect)
self.log_request(
req, status_int, input_proxy.bytes_received, bytes_sent,
start_time, time.time(), resp_headers=resp_headers)
close_method = getattr(iterable, 'close', None)
if callable(close_method):
close_method()
try:
iterable = self.app(env, my_start_response)
except Exception:
req = Request(env)
status_int = status_int_for_logging(start_status=500)
self.log_request(
req, status_int, input_proxy.bytes_received, 0, start_time,
time.time())
raise
else:
return iter_response(iterable)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def proxy_logger(app):
return ProxyLoggingMiddleware(app, conf)
return proxy_logger
| apache-2.0 |
styxit/CouchPotatoServer | libs/requests/__init__.py | 68 | 1856 | # -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('http://python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post("http://httpbin.org/post", data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2013 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.1.0'
__build__ = 0x020100
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| gpl-3.0 |
qiankunshe/sky_engine | testing/legion/rpc_methods.py | 15 | 2301 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines the task RPC methods."""
import logging
import os
import sys
import threading
#pylint: disable=relative-import
import common_lib
import process
class RPCMethods(object):
"""Class exposing RPC methods."""
_dotted_whitelist = ['subprocess']
def __init__(self, server):
self._server = server
self.subprocess = process.Process
def _dispatch(self, method, params):
obj = self
if '.' in method:
# Allow only white listed dotted names
name, method = method.split('.')
assert name in self._dotted_whitelist
obj = getattr(self, name)
return getattr(obj, method)(*params)
def Echo(self, message):
"""Simple RPC method to print and return a message."""
logging.info('Echoing %s', message)
return 'echo %s' % str(message)
def AbsPath(self, path):
"""Returns the absolute path."""
return os.path.abspath(path)
def Quit(self):
"""Call _server.shutdown in another thread.
This is needed because server.shutdown waits for the server to actually
quit. However the server cannot shutdown until it completes handling this
call. Calling this in the same thread results in a deadlock.
"""
t = threading.Thread(target=self._server.shutdown)
t.start()
def GetOutputDir(self):
"""Returns the isolated output directory on the task machine."""
return common_lib.GetOutputDir()
def WriteFile(self, path, text, mode='wb+'):
"""Writes a file on the task machine."""
with open(path, mode) as fh:
fh.write(text)
def ReadFile(self, path, mode='rb'):
"""Reads a file from the local task machine."""
with open(path, mode) as fh:
return fh.read()
def PathJoin(self, *parts):
"""Performs an os.path.join on the task machine.
This is needed due to the fact that there is no guarantee that os.sep will
be the same across all machines in a particular test. This method will
join the path parts locally to ensure the correct separator is used.
"""
return os.path.join(*parts)
def ListDir(self, path):
"""Returns the results of os.listdir."""
return os.listdir(path)
| bsd-3-clause |
gerrit-review/gerrit | tools/version.py | 1 | 1745 | #!/usr/bin/env python
# Copyright (C) 2014 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from optparse import OptionParser
import os.path
import re
import sys
parser = OptionParser()
opts, args = parser.parse_args()
if not len(args):
parser.error('not enough arguments')
elif len(args) > 1:
parser.error('too many arguments')
DEST_PATTERN = r'\g<1>%s\g<3>' % args[0]
def replace_in_file(filename, src_pattern):
try:
f = open(filename, "r")
s = f.read()
f.close()
s = re.sub(src_pattern, DEST_PATTERN, s)
f = open(filename, "w")
f.write(s)
f.close()
except IOError as err:
print('error updating %s: %s' % (filename, err), file=sys.stderr)
src_pattern = re.compile(r'^(\s*<version>)([-.\w]+)(</version>\s*)$',
re.MULTILINE)
for project in ['gerrit-acceptance-framework', 'gerrit-extension-api',
'gerrit-plugin-api', 'gerrit-plugin-gwtui',
'gerrit-war']:
pom = os.path.join('tools', 'maven', '%s_pom.xml' % project)
replace_in_file(pom, src_pattern)
src_pattern = re.compile(r'^(GERRIT_VERSION = ")([-.\w]+)(")$', re.MULTILINE)
replace_in_file('version.bzl', src_pattern)
| apache-2.0 |
romain-li/edx-platform | common/lib/xmodule/xmodule/tests/test_textannotation.py | 83 | 3037 | # -*- coding: utf-8 -*-
"Test for Annotation Xmodule functional logic."
import unittest
from mock import Mock
from lxml import etree
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.textannotation_module import TextAnnotationModule
from . import get_test_system
class TextAnnotationModuleTestCase(unittest.TestCase):
''' text Annotation Module Test Case '''
sample_xml = '''
<annotatable>
<instructions><p>Test Instructions.</p></instructions>
<p>
One Fish. Two Fish.
Red Fish. Blue Fish.
Oh the places you'll go!
</p>
</annotatable>
'''
def setUp(self):
"""
Makes sure that the Module is declared and mocked with the sample xml above.
"""
super(TextAnnotationModuleTestCase, self).setUp()
# return anything except None to test LMS
def test_real_user(useless):
useless_user = Mock(email='[email protected]', id=useless)
return useless_user
# test to make sure that role is checked in LMS
def test_user_role():
return 'staff'
self.system = get_test_system()
self.system.get_real_user = test_real_user
self.system.get_user_role = test_user_role
self.system.anonymous_student_id = None
self.mod = TextAnnotationModule(
Mock(),
self.system,
DictFieldData({'data': self.sample_xml}),
ScopeIds(None, None, None, None)
)
def test_extract_instructions(self):
"""
Tests to make sure that the instructions are correctly pulled from the sample xml above.
It also makes sure that if no instructions exist, that it does in fact return nothing.
"""
xmltree = etree.fromstring(self.sample_xml)
expected_xml = u"<div><p>Test Instructions.</p></div>"
actual_xml = self.mod._extract_instructions(xmltree) # pylint: disable=protected-access
self.assertIsNotNone(actual_xml)
self.assertEqual(expected_xml.strip(), actual_xml.strip())
xmltree = etree.fromstring('<annotatable>foo</annotatable>')
actual = self.mod._extract_instructions(xmltree) # pylint: disable=protected-access
self.assertIsNone(actual)
def test_student_view(self):
"""
Tests the function that passes in all the information in the context
that will be used in templates/textannotation.html
"""
context = self.mod.student_view({}).content
for key in ['display_name',
'tag',
'source',
'instructions_html',
'content_html',
'annotation_storage',
'token',
'diacritic_marks',
'default_tab',
'annotation_mode',
'is_course_staff']:
self.assertIn(key, context)
| agpl-3.0 |
0k/OpenUpgrade | openerp/tools/cache.py | 100 | 5907 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# decorator makes wrappers that have the same API as their wrapped function;
# this is important for the openerp.api.guess() that relies on signatures
from decorator import decorator
from inspect import getargspec
import lru
import logging
logger = logging.getLogger(__name__)
class ormcache(object):
""" LRU cache decorator for orm methods. """
def __init__(self, skiparg=2, size=8192, multi=None, timeout=None):
self.skiparg = skiparg
self.size = size
self.stat_miss = 0
self.stat_hit = 0
self.stat_err = 0
def __call__(self, method):
self.method = method
lookup = decorator(self.lookup, method)
lookup.clear_cache = self.clear
return lookup
def stat(self):
return "lookup-stats hit=%s miss=%s err=%s ratio=%.1f" % \
(self.stat_hit, self.stat_miss, self.stat_err,
(100*float(self.stat_hit))/(self.stat_miss+self.stat_hit))
def lru(self, model):
ormcache = model._ormcache
try:
d = ormcache[self.method]
except KeyError:
d = ormcache[self.method] = lru.LRU(self.size)
return d
def lookup(self, method, *args, **kwargs):
d = self.lru(args[0])
key = args[self.skiparg:]
try:
r = d[key]
self.stat_hit += 1
return r
except KeyError:
self.stat_miss += 1
value = d[key] = self.method(*args, **kwargs)
return value
except TypeError:
self.stat_err += 1
return self.method(*args, **kwargs)
def clear(self, model, *args):
""" Remove *args entry from the cache or all keys if *args is undefined """
d = self.lru(model)
if args:
logger.warn("ormcache.clear arguments are deprecated and ignored "
"(while clearing caches on (%s).%s)",
model._name, self.method.__name__)
d.clear()
model.pool._any_cache_cleared = True
class ormcache_context(ormcache):
def __init__(self, skiparg=2, size=8192, accepted_keys=()):
super(ormcache_context,self).__init__(skiparg,size)
self.accepted_keys = accepted_keys
def __call__(self, method):
# remember which argument is context
args = getargspec(method)[0]
self.context_pos = args.index('context')
return super(ormcache_context, self).__call__(method)
def lookup(self, method, *args, **kwargs):
d = self.lru(args[0])
# Note. The decorator() wrapper (used in __call__ above) will resolve
# arguments, and pass them positionally to lookup(). This is why context
# is not passed through kwargs!
if self.context_pos < len(args):
context = args[self.context_pos]
else:
context = kwargs.get('context') or {}
ckey = [(k, context[k]) for k in self.accepted_keys if k in context]
# Beware: do not take the context from args!
key = args[self.skiparg:self.context_pos] + tuple(ckey)
try:
r = d[key]
self.stat_hit += 1
return r
except KeyError:
self.stat_miss += 1
value = d[key] = self.method(*args, **kwargs)
return value
except TypeError:
self.stat_err += 1
return self.method(*args, **kwargs)
class ormcache_multi(ormcache):
def __init__(self, skiparg=2, size=8192, multi=3):
assert skiparg <= multi
super(ormcache_multi, self).__init__(skiparg, size)
self.multi = multi
def lookup(self, method, *args, **kwargs):
d = self.lru(args[0])
base_key = args[self.skiparg:self.multi] + args[self.multi+1:]
ids = args[self.multi]
result = {}
missed = []
# first take what is available in the cache
for i in ids:
key = base_key + (i,)
try:
result[i] = d[key]
self.stat_hit += 1
except Exception:
self.stat_miss += 1
missed.append(i)
if missed:
# call the method for the ids that were not in the cache
args = list(args)
args[self.multi] = missed
result.update(method(*args, **kwargs))
# store those new results back in the cache
for i in missed:
key = base_key + (i,)
d[key] = result[i]
return result
class dummy_cache(object):
""" Cache decorator replacement to actually do no caching. """
def __init__(self, *l, **kw):
pass
def __call__(self, fn):
fn.clear_cache = self.clear
return fn
def clear(self, *l, **kw):
pass
# For backward compatibility
cache = ormcache
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nate-rcl/support-tools | wiki_to_md/wiki2gfm_test.py | 136 | 31840 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for wiki2gfm."""
import codecs
import StringIO
import unittest
from impl import converter
from impl import formatting_handler
from impl import pragma_handler
class BaseTest(unittest.TestCase):
"""Base test for wiki2gfm tests."""
def setUp(self):
"""Create a base test."""
self.warnings = []
self.output = StringIO.StringIO()
self.pragma_handler = pragma_handler.PragmaHandler(self._TrackWarning)
self.formatting_handler = formatting_handler.FormattingHandler(
self._TrackWarning,
project="test",
issue_map={123: "https://github.com/abcxyz/test/issues/789"},
symmetric_headers=False)
self.converter = converter.Converter(
self.pragma_handler,
self.formatting_handler,
self._TrackWarning,
project="test",
wikipages=["TestPage"])
def assertOutput(self, expected_output):
"""Assert that specific output was written.
Args:
expected_output: The expected value of the output.
"""
self.assertEquals(expected_output, self.output.getvalue())
def assertNoOutput(self, expected_output):
self.assertNotEqual(expected_output, self.output.getvalue())
def assertWarning(self, warning_contents, occurrences=1):
"""Assert that a warning was issued containing the given contents.
This searches all tracked warnings for the contents.
Args:
warning_contents: Text that the warning was expected to contain.
occurrences: The number of occurrences of the warning contents.
"""
occurrences_found = 0
for warning in self.warnings:
if warning_contents in warning[1]:
occurrences_found += 1
if occurrences_found != occurrences:
self.fail("Failed to find '{0}' in {1} warnings (found it in {2})."
.format(warning_contents, occurrences, occurrences_found))
def assertNoWarnings(self):
"""Assert that no warnings were issued."""
self.assertListEqual([], self.warnings)
def _TrackWarning(self, input_line, message):
"""Track a warning by storing it in memory.
Args:
input_line: Line the warning was issued on.
message: The warning message.
"""
self.warnings.append((input_line, message))
class TestPragmaHandler(BaseTest):
"""Tests the pragma handler."""
def testSummaryPragmaGivesWarning(self):
self.pragma_handler.HandlePragma(1, self.output, "summary", "abc")
self.assertWarning("summary")
def testSidebarPragmaGivesWarning(self):
self.pragma_handler.HandlePragma(1, self.output, "sidebar", "abc")
self.assertWarning("sidebar")
def testUnknownPragmaGivesWarning(self):
self.pragma_handler.HandlePragma(1, self.output, "fail!", "abc")
self.assertWarning("fail!")
class TestFormattingHandler(BaseTest):
"""Tests the formatting handler."""
def testHandleHeaderOpen(self):
self.formatting_handler.HandleHeaderOpen(1, self.output, 3)
self.assertOutput("### ")
self.assertNoWarnings()
def testHandleHeaderOpenInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleHeaderOpen(1, self.output, 3)
self.assertOutput("<h3>")
self.assertNoWarnings()
def testHandleHeaderClose(self):
self.formatting_handler.HandleHeaderClose(1, self.output, 3)
self.assertOutput("") # No header closing markup by default.
self.assertNoWarnings()
def testHandleHeaderCloseInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleHeaderClose(1, self.output, 3)
self.assertOutput("</h3>")
self.assertNoWarnings()
def testHandleHeaderCloseSymmetric(self):
self.formatting_handler._symmetric_headers = True
self.formatting_handler.HandleHeaderClose(1, self.output, 3)
self.assertOutput(" ###")
self.assertNoWarnings()
def testHandleHeaderCloseSymmetricInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler._symmetric_headers = True
self.formatting_handler.HandleHeaderClose(1, self.output, 3)
self.assertOutput("</h3>")
self.assertNoWarnings()
def testHandleHRule(self):
self.formatting_handler.HandleHRule(1, self.output)
self.assertOutput("\n---\n")
self.assertNoWarnings()
def testHandleHRuleInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleHRule(1, self.output)
self.assertOutput("<hr />")
self.assertNoWarnings()
def testHandleCodeBlockOpen(self):
self.formatting_handler.HandleCodeBlockOpen(1, self.output, None)
self.assertOutput("```\n")
self.assertNoWarnings()
def testHandleCodeBlockOpenInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleCodeBlockOpen(1, self.output, None)
self.assertOutput("<pre><code>")
self.assertWarning("Code markup was used")
def testHandleCodeBlockOpenWithLanguage(self):
self.formatting_handler.HandleCodeBlockOpen(1, self.output, "idris")
self.assertOutput("```idris\n")
self.assertNoWarnings()
def testHandleCodeBlockOpenWithLanguageInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleCodeBlockOpen(1, self.output, "idris")
self.assertOutput("<pre><code>")
self.assertWarning("Code markup was used")
def testHandleCodeBlockClose(self):
self.formatting_handler.HandleCodeBlockClose(1, self.output)
self.assertOutput("```")
self.assertNoWarnings()
def testHandleCodeBlockCloseInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleCodeBlockClose(1, self.output)
self.assertOutput("</code></pre>")
self.assertNoWarnings()
def testHandleNumericList(self):
self.formatting_handler.HandleNumericListOpen(1, self.output, 1)
self.formatting_handler.HandleText(1, self.output, "a\n")
self.formatting_handler.HandleNumericListOpen(2, self.output, 1)
self.formatting_handler.HandleText(2, self.output, "b\n")
self.formatting_handler.HandleNumericListOpen(3, self.output, 2)
self.formatting_handler.HandleText(3, self.output, "c\n")
self.formatting_handler.HandleListClose(4, self.output) # Closing 2.
self.formatting_handler.HandleNumericListOpen(4, self.output, 1)
self.formatting_handler.HandleText(4, self.output, "d\n")
self.formatting_handler.HandleListClose(5, self.output) # Closing 1.
self.assertOutput(" 1. a\n 1. b\n 1. c\n 1. d\n")
self.assertNoWarnings()
def testHandleNumericListInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleNumericListOpen(1, self.output, 1)
self.formatting_handler.HandleText(1, self.output, "a\n")
self.formatting_handler.HandleNumericListOpen(2, self.output, 1)
self.formatting_handler.HandleText(2, self.output, "b\n")
self.formatting_handler.HandleNumericListOpen(3, self.output, 2)
self.formatting_handler.HandleText(3, self.output, "c\n")
self.formatting_handler.HandleListClose(4, self.output) # Closing 2.
self.formatting_handler.HandleNumericListOpen(4, self.output, 1)
self.formatting_handler.HandleText(4, self.output, "d\n")
self.formatting_handler.HandleListClose(5, self.output) # Closing 1.
self.assertOutput("<ol><li>a\n</li><li>b\n<ol><li>c\n</li></ol></li>"
"<li>d\n</li></ol>")
self.assertWarning("Numeric list markup was used", occurrences=2)
def testHandleBulletList(self):
self.formatting_handler.HandleBulletListOpen(1, self.output, 1)
self.formatting_handler.HandleText(1, self.output, "a\n")
self.formatting_handler.HandleBulletListOpen(2, self.output, 1)
self.formatting_handler.HandleText(2, self.output, "b\n")
self.formatting_handler.HandleBulletListOpen(3, self.output, 2)
self.formatting_handler.HandleText(3, self.output, "c\n")
self.formatting_handler.HandleListClose(4, self.output) # Closing 2.
self.formatting_handler.HandleBulletListOpen(4, self.output, 1)
self.formatting_handler.HandleText(4, self.output, "d\n")
self.formatting_handler.HandleListClose(5, self.output) # Closing 1.
self.assertOutput(" * a\n * b\n * c\n * d\n")
self.assertNoWarnings()
def testHandleBulletListInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleBulletListOpen(1, self.output, 1)
self.formatting_handler.HandleText(1, self.output, "a\n")
self.formatting_handler.HandleBulletListOpen(2, self.output, 1)
self.formatting_handler.HandleText(2, self.output, "b\n")
self.formatting_handler.HandleBulletListOpen(3, self.output, 2)
self.formatting_handler.HandleText(3, self.output, "c\n")
self.formatting_handler.HandleListClose(4, self.output) # Closing 2.
self.formatting_handler.HandleBulletListOpen(4, self.output, 1)
self.formatting_handler.HandleText(4, self.output, "d\n")
self.formatting_handler.HandleListClose(5, self.output) # Closing 1.
self.assertOutput("<ul><li>a\n</li><li>b\n<ul><li>c\n</li></ul></li>"
"<li>d\n</li></ul>")
self.assertWarning("Bulleted list markup was used", occurrences=2)
def testHandleBlockQuote(self):
self.formatting_handler.HandleBlockQuoteOpen(1, self.output, 1)
self.formatting_handler.HandleText(1, self.output, "a\n")
self.formatting_handler.HandleBlockQuoteOpen(2, self.output, 1)
self.formatting_handler.HandleText(2, self.output, "b\n")
self.formatting_handler.HandleBlockQuoteOpen(3, self.output, 2)
self.formatting_handler.HandleText(3, self.output, "c\n")
self.formatting_handler.HandleListClose(4, self.output) # Closing 2.
self.formatting_handler.HandleBlockQuoteOpen(4, self.output, 1)
self.formatting_handler.HandleText(4, self.output, "d\n")
self.formatting_handler.HandleListClose(5, self.output) # Closing 1.
self.assertOutput("> a\n> b\n> > c\n\n> d\n")
self.assertNoWarnings()
def testHandleBlockQuoteInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleBlockQuoteOpen(1, self.output, 1)
self.formatting_handler.HandleText(1, self.output, "a\n")
self.formatting_handler.HandleBlockQuoteOpen(2, self.output, 1)
self.formatting_handler.HandleText(2, self.output, "b\n")
self.formatting_handler.HandleBlockQuoteOpen(3, self.output, 2)
self.formatting_handler.HandleText(3, self.output, "c\n")
self.formatting_handler.HandleListClose(4, self.output) # Closing 2.
self.formatting_handler.HandleBlockQuoteOpen(4, self.output, 1)
self.formatting_handler.HandleText(4, self.output, "d\n")
self.formatting_handler.HandleListClose(5, self.output) # Closing 1.
self.assertOutput("<blockquote>a\nb<br>\n<blockquote>c\n</blockquote>"
"d\n</blockquote>")
self.assertWarning("Blockquote markup was used", occurrences=2)
def testHandleParagraphBreak(self):
self.formatting_handler.HandleText(1, self.output, "a\n")
self.formatting_handler.HandleParagraphBreak(2, self.output)
self.formatting_handler.HandleText(3, self.output, "b\n")
self.assertOutput("a\n\nb\n")
self.assertNoWarnings()
def testHandleParagraphBreakInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleText(1, self.output, "a\n")
self.formatting_handler.HandleParagraphBreak(2, self.output)
self.formatting_handler.HandleText(3, self.output, "b\n")
self.assertOutput("a\n<br>\nb<br>\n")
self.assertNoWarnings()
def testHandleBold(self):
self.formatting_handler.HandleBoldOpen(1, self.output)
self.formatting_handler.HandleText(2, self.output, "xyz")
self.formatting_handler.HandleBoldClose(3, self.output)
self.assertOutput("**xyz**")
self.assertNoWarnings()
def testHandleBoldInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleBoldOpen(1, self.output)
self.formatting_handler.HandleText(2, self.output, "xyz")
self.formatting_handler.HandleBoldClose(3, self.output)
self.assertOutput("<b>xyz</b>")
self.assertWarning("Bold markup was used")
def testHandleItalic(self):
self.formatting_handler.HandleItalicOpen(1, self.output)
self.formatting_handler.HandleText(2, self.output, "xyz")
self.formatting_handler.HandleItalicClose(3, self.output)
self.assertOutput("_xyz_")
self.assertNoWarnings()
def testHandleItalicInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleItalicOpen(1, self.output)
self.formatting_handler.HandleText(2, self.output, "xyz")
self.formatting_handler.HandleItalicClose(3, self.output)
self.assertOutput("<i>xyz</i>")
self.assertWarning("Italic markup was used")
def testHandleStrikethrough(self):
self.formatting_handler.HandleStrikethroughOpen(1, self.output)
self.formatting_handler.HandleText(2, self.output, "xyz")
self.formatting_handler.HandleStrikethroughClose(3, self.output)
self.assertOutput("~~xyz~~")
self.assertNoWarnings()
def testHandleStrikethroughInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleStrikethroughOpen(1, self.output)
self.formatting_handler.HandleText(2, self.output, "xyz")
self.formatting_handler.HandleStrikethroughClose(3, self.output)
self.assertOutput("<del>xyz</del>")
self.assertWarning("Strikethrough markup was used")
def testHandleSuperscript(self):
self.formatting_handler.HandleSuperscript(1, self.output, "xyz")
self.assertOutput("<sup>xyz</sup>")
self.assertNoWarnings()
def testHandleSuperscriptInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleSuperscript(1, self.output, "xyz")
self.assertOutput("<sup>xyz</sup>")
self.assertNoWarnings()
def testHandleSubscript(self):
self.formatting_handler.HandleSubscript(1, self.output, "xyz")
self.assertOutput("<sub>xyz</sub>")
self.assertNoWarnings()
def testHandleSubscriptInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleSubscript(1, self.output, "xyz")
self.assertOutput("<sub>xyz</sub>")
self.assertNoWarnings()
def testHandleInlineCode(self):
self.formatting_handler.HandleInlineCode(1, self.output, "xyz")
self.assertOutput("`xyz`")
self.assertNoWarnings()
def testHandleInlineCodeInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleInlineCode(1, self.output, "xyz")
self.assertOutput("<code>xyz</code>")
self.assertNoWarnings()
# Table handling is tested in the Converter tests,
# as the interactions are multiple and handled there.
def testHandleLink(self):
self.formatting_handler.HandleLink(
1, self.output, "http://example.com", None)
self.assertOutput("http://example.com")
self.assertNoWarnings()
def testHandleLinkInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleLink(
1, self.output, "http://example.com", None)
self.assertOutput("<a href='http://example.com'>http://example.com</a>")
self.assertWarning("Link markup was used")
def testHandleLinkWithDescription(self):
self.formatting_handler.HandleLink(
1, self.output, "http://example.com", "Description")
self.assertOutput("[Description](http://example.com)")
self.assertNoWarnings()
def testHandleLinkWithDescriptionInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleLink(
1, self.output, "http://example.com", "Description")
self.assertOutput("<a href='http://example.com'>Description</a>")
self.assertWarning("Link markup was used")
def testHandleLinkWithImageDescription(self):
self.formatting_handler.HandleLink(
1, self.output, "http://example.com", "http://example.com/a.png")
self.assertOutput("[](http://example.com)")
self.assertNoWarnings()
def testHandleLinkWithImageDescriptionInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleLink(
1, self.output, "http://example.com", "http://example.com/a.png")
self.assertOutput("<a href='http://example.com'>"
"<img src='http://example.com/a.png' /></a>")
self.assertWarning("Link markup was used")
def testHandleImageLink(self):
self.formatting_handler.HandleLink(
1, self.output, "http://example.com/a.png", None)
self.assertOutput("")
self.assertNoWarnings()
def testHandleImageLinkInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleLink(
1, self.output, "http://example.com/a.png", None)
self.assertOutput("<img src='http://example.com/a.png' />")
self.assertWarning("Link markup was used")
def testHandleImageLinkWithDescription(self):
self.formatting_handler.HandleLink(
1, self.output, "http://example.com/a.png", "Description")
self.assertOutput("[Description](http://example.com/a.png)")
self.assertNoWarnings()
def testHandleImageLinkWithDescriptionInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleLink(
1, self.output, "http://example.com/a.png", "Description")
self.assertOutput("<a href='http://example.com/a.png'>Description</a>")
self.assertWarning("Link markup was used")
def testHandleImageLinkWithImageDescription(self):
self.formatting_handler.HandleLink(
1, self.output, "http://example.com/a.png", "http://example.com/b.png")
self.assertOutput("]"
"(http://example.com/a.png)")
self.assertNoWarnings()
def testHandleImageLinkWithImageDescriptionInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleLink(
1, self.output, "http://example.com/a.png", "http://example.com/b.png")
self.assertOutput("<a href='http://example.com/a.png'>"
"<img src='http://example.com/b.png' /></a>")
self.assertWarning("Link markup was used")
def testHandleWiki(self):
self.formatting_handler.HandleWiki(1, self.output, "TestPage", "Test Page")
self.assertOutput("[Test Page](TestPage.md)")
self.assertNoWarnings()
def testHandleWikiInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleWiki(1, self.output, "TestPage", "Test Page")
self.assertOutput("<a href='TestPage.md'>Test Page</a>")
self.assertWarning("Link markup was used")
def testHandleIssue(self):
self.formatting_handler.HandleIssue(1, self.output, "issue ", 123)
self.assertOutput("[issue 789](https://github.com/abcxyz/test/issues/789)")
self.assertWarning("Issue 123 was auto-linked")
self.assertWarning("In the output, it has been linked to the "
"migrated issue on GitHub: 789.")
def testHandleIssueInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleIssue(1, self.output, "issue ", 123)
self.assertOutput("<a href='https://github.com/abcxyz/test/issues/789'>"
"issue 789</a>")
self.assertWarning("Link markup was used")
self.assertWarning("Issue 123 was auto-linked")
self.assertWarning("In the output, it has been linked to the "
"migrated issue on GitHub: 789.")
def testHandleIssueNotInMap(self):
self.formatting_handler.HandleIssue(1, self.output, "issue ", 456)
self.assertOutput("[issue 456](https://code.google.com/p/"
"test/issues/detail?id=456)")
self.assertWarning("Issue 456 was auto-linked")
self.assertWarning("However, it was not found in the issue migration map")
self.assertWarning("As a placeholder, the text has been modified to "
"link to the original Google Code issue page")
def testHandleIssueNotInMapInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleIssue(1, self.output, "issue ", 456)
self.assertOutput("<a href='https://code.google.com/p/"
"test/issues/detail?id=456'>issue 456</a>")
self.assertWarning("Link markup was used")
self.assertWarning("Issue 456 was auto-linked")
self.assertWarning("However, it was not found in the issue migration map")
self.assertWarning("As a placeholder, the text has been modified to "
"link to the original Google Code issue page")
def testHandleIssueNoMap(self):
self.formatting_handler._issue_map = None
self.formatting_handler.HandleIssue(1, self.output, "issue ", 456)
self.assertOutput("[issue 456](https://code.google.com/p/"
"test/issues/detail?id=456)")
self.assertWarning("Issue 456 was auto-linked")
self.assertWarning("However, no issue migration map was specified")
self.assertWarning("As a placeholder, the text has been modified to "
"link to the original Google Code issue page")
def testHandleIssueNoMapInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler._issue_map = None
self.formatting_handler.HandleIssue(1, self.output, "issue ", 456)
self.assertOutput("<a href='https://code.google.com/p/"
"test/issues/detail?id=456'>issue 456</a>")
self.assertWarning("Link markup was used")
self.assertWarning("Issue 456 was auto-linked")
self.assertWarning("However, no issue migration map was specified")
self.assertWarning("As a placeholder, the text has been modified to "
"link to the original Google Code issue page")
def testHandleIssueNotInMapNoProject(self):
self.formatting_handler._project = None
self.formatting_handler.HandleIssue(1, self.output, "issue ", 456)
self.assertOutput("issue 456 (on Google Code)")
self.assertWarning("Issue 456 was auto-linked")
self.assertWarning("However, it was not found in the issue migration map")
self.assertWarning("Additionally, because no project name was specified "
"the issue could not be linked to the original Google "
"Code issue page.")
self.assertWarning("The auto-link has been removed")
def testHandleIssueNotInMapNoProjectInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler._project = None
self.formatting_handler.HandleIssue(1, self.output, "issue ", 456)
self.assertOutput("issue 456 (on Google Code)")
self.assertWarning("Issue 456 was auto-linked")
self.assertWarning("However, it was not found in the issue migration map")
self.assertWarning("Additionally, because no project name was specified "
"the issue could not be linked to the original Google "
"Code issue page.")
self.assertWarning("The auto-link has been removed")
def testHandleIssueNoMapNoProject(self):
self.formatting_handler._issue_map = None
self.formatting_handler._project = None
self.formatting_handler.HandleIssue(1, self.output, "issue ", 456)
self.assertOutput("issue 456 (on Google Code)")
self.assertWarning("Issue 456 was auto-linked")
self.assertWarning("However, no issue migration map was specified")
self.assertWarning("Additionally, because no project name was specified "
"the issue could not be linked to the original Google "
"Code issue page.")
self.assertWarning("The auto-link has been removed")
def testHandleIssueNoMapNoProjectInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler._issue_map = None
self.formatting_handler._project = None
self.formatting_handler.HandleIssue(1, self.output, "issue ", 456)
self.assertOutput("issue 456 (on Google Code)")
self.assertWarning("Issue 456 was auto-linked")
self.assertWarning("However, no issue migration map was specified")
self.assertWarning("Additionally, because no project name was specified "
"the issue could not be linked to the original Google "
"Code issue page.")
self.assertWarning("The auto-link has been removed")
def testHandleRevision(self):
self.formatting_handler.HandleRevision(1, self.output, "revision ", 7)
self.assertOutput("[revision 7](https://code.google.com/p/"
"test/source/detail?r=7)")
self.assertWarning("Revision 7 was auto-linked")
self.assertWarning("As a placeholder, the text has been modified to "
"link to the original Google Code source page")
def testHandleRevisionInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleRevision(1, self.output, "revision ", 7)
self.assertOutput("<a href='https://code.google.com/p/"
"test/source/detail?r=7'>revision 7</a>")
self.assertWarning("Link markup was used")
self.assertWarning("Revision 7 was auto-linked")
self.assertWarning("As a placeholder, the text has been modified to "
"link to the original Google Code source page")
def testHandleRevisionNoProject(self):
self.formatting_handler._project = None
self.formatting_handler.HandleRevision(1, self.output, "revision ", 7)
self.assertOutput("revision 7 (on Google Code)")
self.assertWarning("Revision 7 was auto-linked")
self.assertWarning("Additionally, because no project name was specified "
"the revision could not be linked to the original "
"Google Code source page.")
self.assertWarning("The auto-link has been removed")
def testHandleRevisionNoProjectInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler._project = None
self.formatting_handler.HandleRevision(1, self.output, "revision ", 7)
self.assertOutput("revision 7 (on Google Code)")
self.assertWarning("Revision 7 was auto-linked")
self.assertWarning("Additionally, because no project name was specified "
"the revision could not be linked to the original "
"Google Code source page.")
self.assertWarning("The auto-link has been removed")
def testHandleInHtml(self):
self.formatting_handler.HandleHtmlOpen(
1, self.output, "tag", {"a": "1", "b": "2"}, False)
self.formatting_handler.HandleText(2, self.output, "xyz")
self.formatting_handler.HandleHtmlClose(3, self.output, "tag")
self.assertOutput("<tag a='1' b='2'>xyz</tag>")
self.assertNoWarnings()
def testHandleHtmlInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleHtmlOpen(
1, self.output, "tag", {"a": "1", "b": "2"}, False)
self.formatting_handler.HandleText(2, self.output, "xyz")
self.formatting_handler.HandleHtmlClose(3, self.output, "tag")
self.assertOutput("<tag a='1' b='2'>xyz</tag>")
self.assertNoWarnings()
def testHandleInHtmlSelfClose(self):
self.formatting_handler.HandleHtmlOpen(
1, self.output, "tag", {"a": "1", "b": "2"}, True)
self.assertOutput("<tag a='1' b='2' />")
self.assertNoWarnings()
def testHandleHtmlSelfCloseInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleHtmlOpen(
1, self.output, "tag", {"a": "1", "b": "2"}, True)
self.assertOutput("<tag a='1' b='2' />")
self.assertNoWarnings()
def testHandleGPlus(self):
self.formatting_handler.HandleGPlusOpen(1, self.output, None)
self.formatting_handler.HandleGPlusClose(1, self.output)
self.assertNoOutput("(TODO: Link to Google+ page.)")
self.assertWarning("A Google+ +1 button was embedded on this page")
def testHandleGPlusInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleGPlusOpen(1, self.output, None)
self.formatting_handler.HandleGPlusClose(1, self.output)
self.assertNoOutput("(TODO: Link to Google+ page.)")
self.assertWarning("A Google+ +1 button was embedded on this page")
def testHandleComment(self):
self.formatting_handler.HandleCommentOpen(1, self.output)
self.formatting_handler.HandleText(2, self.output, "xyz")
self.formatting_handler.HandleCommentClose(3, self.output)
self.assertOutput("<a href='Hidden comment: xyz'></a>")
self.assertWarning("A comment was used in the wiki file")
def testHandleCommentInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleCommentOpen(1, self.output)
self.formatting_handler.HandleText(2, self.output, "xyz")
self.formatting_handler.HandleCommentClose(3, self.output)
self.assertOutput("<a href='Hidden comment: xyz'></a>")
self.assertWarning("A comment was used in the wiki file")
def testHandleVideo(self):
self.formatting_handler.HandleVideoOpen(
1, self.output, "FiARsQSlzDc", 320, 240)
self.formatting_handler.HandleVideoClose(1, self.output)
self.assertOutput("<a href='http://www.youtube.com/watch?"
"feature=player_embedded&v=FiARsQSlzDc' target='_blank'>"
"<img src='http://img.youtube.com/vi/FiARsQSlzDc/0.jpg' "
"width='320' height=240 /></a>")
self.assertWarning("GFM does not support embedding the YouTube player")
def testHandleVideoInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleVideoOpen(
1, self.output, "FiARsQSlzDc", 320, 240)
self.formatting_handler.HandleVideoClose(1, self.output)
self.assertOutput("<a href='http://www.youtube.com/watch?"
"feature=player_embedded&v=FiARsQSlzDc' target='_blank'>"
"<img src='http://img.youtube.com/vi/FiARsQSlzDc/0.jpg' "
"width='320' height=240 /></a>")
self.assertWarning("GFM does not support embedding the YouTube player")
def testHandleText(self):
self.formatting_handler.HandleText(1, self.output, "xyz")
self.assertOutput("xyz")
self.assertNoWarnings()
def testHandleTextInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleText(1, self.output, "xyz")
self.assertOutput("xyz")
self.assertNoWarnings()
def testHandleEscapedText(self):
self.formatting_handler.HandleEscapedText(1, self.output, "**_xyz_** <a>")
self.assertOutput("\\*\\*\\_xyz\\_\\*\\* <a>")
self.assertNoWarnings()
def testHandleEscapedTextInHtml(self):
self.formatting_handler._in_html = 1
self.formatting_handler.HandleEscapedText(1, self.output, "**_xyz_** <a>")
self.assertOutput("**_xyz_** <a>")
self.assertNoWarnings()
class TestConverter(BaseTest):
"""Tests the converter."""
def testExamplePage(self):
with codecs.open("example.wiki", "rU", "utf-8") as example_input:
with codecs.open("example.md", "rU", "utf-8") as example_output:
self.converter.Convert(example_input, self.output)
self.assertOutput(example_output.read())
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
pavelchristof/gomoku-ai | tensorflow/contrib/distributions/python/kernel_tests/bijectors/power_transform_test.py | 72 | 2610 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.power_transform import PowerTransform
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class PowerTransformBijectorTest(test.TestCase):
"""Tests correctness of the power transformation."""
def testBijector(self):
with self.test_session():
c = 0.2
bijector = PowerTransform(
power=c, event_ndims=1, validate_args=True)
self.assertEqual("power_transform", bijector.name)
x = np.array([[[-1.], [2.], [-5. + 1e-4]]])
y = (1. + x * c)**(1. / c)
self.assertAllClose(y, bijector.forward(x).eval())
self.assertAllClose(x, bijector.inverse(y).eval())
self.assertAllClose(
(c - 1.) * np.sum(np.log(y), axis=-1),
bijector.inverse_log_det_jacobian(y).eval())
self.assertAllClose(
-bijector.inverse_log_det_jacobian(y).eval(),
bijector.forward_log_det_jacobian(x).eval(),
rtol=1e-4,
atol=0.)
def testScalarCongruency(self):
with self.test_session():
bijector = PowerTransform(
power=0.2, validate_args=True)
assert_scalar_congruency(
bijector, lower_x=-2., upper_x=1.5, rtol=0.05)
def testBijectiveAndFinite(self):
with self.test_session():
bijector = PowerTransform(
power=0.2, event_ndims=0, validate_args=True)
x = np.linspace(-4.999, 10, num=10).astype(np.float32)
y = np.logspace(0.001, 10, num=10).astype(np.float32)
assert_bijective_and_finite(bijector, x, y, rtol=1e-3)
if __name__ == "__main__":
test.main()
| apache-2.0 |
andreivasiliu2211/mraa | tests/mock/i2c_checks_read_byte_data.py | 21 | 2029 | #!/usr/bin/env python
# Author: Alex Tereschenko <[email protected]>
# Copyright (c) 2016 Alex Tereschenko.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import mraa as m
import unittest as u
from i2c_checks_shared import *
class I2cChecksReadByteData(u.TestCase):
def setUp(self):
self.i2c = m.I2c(MRAA_I2C_BUS_NUM)
def tearDown(self):
del self.i2c
def test_i2c_read_byte_data(self):
self.i2c.address(MRAA_MOCK_I2C_ADDR)
expected_res = MRAA_MOCK_I2C_DATA_INIT_BYTE
res = self.i2c.readReg(MRAA_MOCK_I2C_DATA_LEN - 1)
self.assertEqual(res, expected_res, "I2C readReg() returned unexpected data")
def test_i2c_read_byte_data_invalid_addr(self):
self.i2c.address(MRAA_MOCK_I2C_ADDR - 1)
self.assertRaises(ValueError, self.i2c.readReg, MRAA_MOCK_I2C_DATA_LEN - 1)
def test_i2c_read_byte_data_invalid_reg(self):
self.i2c.address(MRAA_MOCK_I2C_ADDR)
self.assertRaises(ValueError, self.i2c.readReg, MRAA_MOCK_I2C_DATA_LEN)
if __name__ == "__main__":
u.main()
| mit |
itamarst/crochet | examples/downloader.py | 3 | 1415 | #!/usr/bin/python
"""
A flask web application that downloads a page in the background.
"""
import logging
from flask import Flask, session, escape
from crochet import setup, run_in_reactor, retrieve_result, TimeoutError
# Can be called multiple times with no ill-effect:
setup()
app = Flask(__name__)
@run_in_reactor
def download_page(url):
"""
Download a page.
"""
from twisted.web.client import getPage
return getPage(url)
@app.route('/')
def index():
if 'download' not in session:
# Calling an @run_in_reactor function returns an EventualResult:
result = download_page('http://www.google.com')
session['download'] = result.stash()
return "Starting download, refresh to track progress."
# Retrieval is a one-time operation, so the uid in the session cannot be
# reused:
result = retrieve_result(session.pop('download'))
try:
download = result.wait(timeout=0.1)
return "Downloaded: " + escape(download)
except TimeoutError:
session['download'] = result.stash()
return "Download in progress..."
except:
# The original traceback of the exception:
return "Download failed:\n" + result.original_failure().getTraceback()
if __name__ == '__main__':
import os, sys
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
app.secret_key = os.urandom(24)
app.run()
| mit |
davidharrigan/django | django/contrib/staticfiles/management/commands/findstatic.py | 463 | 1745 | from __future__ import unicode_literals
import os
from django.contrib.staticfiles import finders
from django.core.management.base import LabelCommand
from django.utils.encoding import force_text
class Command(LabelCommand):
help = "Finds the absolute paths for the given static file(s)."
label = 'static file'
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('--first', action='store_false', dest='all',
default=True,
help="Only return the first match for each static file.")
def handle_label(self, path, **options):
verbosity = options['verbosity']
result = finders.find(path, all=options['all'])
path = force_text(path)
if verbosity >= 2:
searched_locations = ("Looking in the following locations:\n %s" %
"\n ".join(force_text(location)
for location in finders.searched_locations))
else:
searched_locations = ''
if result:
if not isinstance(result, (list, tuple)):
result = [result]
result = (force_text(os.path.realpath(path)) for path in result)
if verbosity >= 1:
file_list = '\n '.join(result)
return ("Found '%s' here:\n %s\n%s" %
(path, file_list, searched_locations))
else:
return '\n'.join(result)
else:
message = ["No matching file found for '%s'." % path]
if verbosity >= 2:
message.append(searched_locations)
if verbosity >= 1:
self.stderr.write('\n'.join(message))
| bsd-3-clause |
numb3r3/kombu | examples/complete_send.py | 31 | 1152 | """
Example producer that sends a single message and exits.
You can use `complete_receive.py` to receive the message sent.
"""
from kombu import Connection, Producer, Exchange, Queue
#: By default messages sent to exchanges are persistent (delivery_mode=2),
#: and queues and exchanges are durable.
exchange = Exchange('kombu_demo', type='direct')
queue = Queue('kombu_demo', exchange, routing_key='kombu_demo')
with Connection('amqp://guest:guest@localhost:5672//') as connection:
#: Producers are used to publish messages.
#: a default exchange and routing key can also be specifed
#: as arguments the Producer, but we rather specify this explicitly
#: at the publish call.
producer = Producer(connection)
#: Publish the message using the json serializer (which is the default),
#: and zlib compression. The kombu consumer will automatically detect
#: encoding, serialization and compression used and decode accordingly.
producer.publish({'hello': 'world'},
exchange=exchange,
routing_key='kombu_demo',
serializer='json', compression='zlib')
| bsd-3-clause |
CT-Data-Collaborative/ctdata-wagtail-cms | ctdata_wagtail/users/migrations/0001_initial.py | 265 | 3054 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django.contrib.auth.models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status', default=False)),
('username', models.CharField(max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], verbose_name='username', error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True)),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=254, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(help_text='Designates whether the user can log into this admin site.', verbose_name='staff status', default=False)),
('is_active', models.BooleanField(help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active', default=True)),
('date_joined', models.DateTimeField(verbose_name='date joined', default=django.utils.timezone.now)),
('groups', models.ManyToManyField(related_name='user_set', blank=True, verbose_name='groups', to='auth.Group', help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_query_name='user')),
('user_permissions', models.ManyToManyField(related_name='user_set', blank=True, verbose_name='user permissions', to='auth.Permission', help_text='Specific permissions for this user.', related_query_name='user')),
('name', models.CharField(max_length=255, verbose_name='Name of User', blank=True)),
],
options={
'verbose_name': 'user',
'abstract': False,
'verbose_name_plural': 'users',
},
managers=[
(b'objects', django.contrib.auth.models.UserManager()),
],
),
]
| mit |
prezi/gunicorn | gunicorn/app/base.py | 37 | 5568 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
from __future__ import print_function
import os
import sys
import traceback
from gunicorn._compat import execfile_
from gunicorn import util
from gunicorn.arbiter import Arbiter
from gunicorn.config import Config, get_default_config_file
from gunicorn import debug
class BaseApplication(object):
"""
An application interface for configuring and loading
the various necessities for any given web framework.
"""
def __init__(self, usage=None, prog=None):
self.usage = usage
self.cfg = None
self.callable = None
self.prog = prog
self.logger = None
self.do_load_config()
def do_load_config(self):
"""
Loads the configuration
"""
try:
self.load_default_config()
self.load_config()
except Exception as e:
print("\nError: %s" % str(e), file=sys.stderr)
sys.stderr.flush()
sys.exit(1)
def load_default_config(self):
# init configuration
self.cfg = Config(self.usage, prog=self.prog)
def init(self, parser, opts, args):
raise NotImplementedError
def load(self):
raise NotImplementedError
def load_config(self):
"""
This method is used to load the configuration from one or several input(s).
Custom Command line, configuration file.
You have to override this method in your class.
"""
raise NotImplementedError
def reload(self):
self.do_load_config()
if self.cfg.spew:
debug.spew()
def wsgi(self):
if self.callable is None:
self.callable = self.load()
return self.callable
def run(self):
try:
Arbiter(self).run()
except RuntimeError as e:
print("\nError: %s\n" % e, file=sys.stderr)
sys.stderr.flush()
sys.exit(1)
class Application(BaseApplication):
def get_config_from_filename(self, filename):
if not os.path.exists(filename):
raise RuntimeError("%r doesn't exist" % filename)
cfg = {
"__builtins__": __builtins__,
"__name__": "__config__",
"__file__": filename,
"__doc__": None,
"__package__": None
}
try:
execfile_(filename, cfg, cfg)
except Exception:
print("Failed to read config file: %s" % filename, file=sys.stderr)
traceback.print_exc()
sys.stderr.flush()
sys.exit(1)
return cfg
def get_config_from_module_name(self, module_name):
return util.import_module(module_name).__dict__
def load_config_from_module_name_or_filename(self, location):
"""
Loads the configuration file: the file is a python file, otherwise raise an RuntimeError
Exception or stop the process if the configuration file contains a syntax error.
"""
try:
cfg = self.get_config_from_module_name(module_name=location)
except ImportError:
cfg = self.get_config_from_filename(filename=location)
for k, v in cfg.items():
# Ignore unknown names
if k not in self.cfg.settings:
continue
try:
self.cfg.set(k.lower(), v)
except:
print("Invalid value for %s: %s\n" % (k, v), file=sys.stderr)
sys.stderr.flush()
raise
return cfg
def load_config_from_file(self, filename):
return self.load_config_from_module_name_or_filename(
location=filename
)
def load_config(self):
# parse console args
parser = self.cfg.parser()
args = parser.parse_args()
# optional settings from apps
cfg = self.init(parser, args, args.args)
# Load up the any app specific configuration
if cfg and cfg is not None:
for k, v in cfg.items():
self.cfg.set(k.lower(), v)
if args.config:
self.load_config_from_file(args.config)
else:
default_config = get_default_config_file()
if default_config is not None:
self.load_config_from_file(default_config)
# Lastly, update the configuration with any command line
# settings.
for k, v in args.__dict__.items():
if v is None:
continue
if k == "args":
continue
self.cfg.set(k.lower(), v)
def run(self):
if self.cfg.check_config:
try:
self.load()
except:
msg = "\nError while loading the application:\n"
print(msg, file=sys.stderr)
traceback.print_exc()
sys.stderr.flush()
sys.exit(1)
sys.exit(0)
if self.cfg.spew:
debug.spew()
if self.cfg.daemon:
util.daemonize(self.cfg.enable_stdio_inheritance)
# set python paths
if self.cfg.pythonpath and self.cfg.pythonpath is not None:
paths = self.cfg.pythonpath.split(",")
for path in paths:
pythonpath = os.path.abspath(path)
if pythonpath not in sys.path:
sys.path.insert(0, pythonpath)
super(Application, self).run()
| mit |
kenshay/ImageScript | Script_Runner/PYTHON/Lib/site-packages/setuptools/command/build_ext.py | 27 | 13173 | import os
import sys
import itertools
import imp
from distutils.command.build_ext import build_ext as _du_build_ext
from distutils.file_util import copy_file
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_var
from distutils.errors import DistutilsError
from distutils import log
from setuptools.extension import Library
from setuptools.extern import six
try:
# Attempt to use Cython for building extensions, if available
from Cython.Distutils.build_ext import build_ext as _build_ext
# Additionally, assert that the compiler module will load
# also. Ref #1229.
__import__('Cython.Compiler.Main')
except ImportError:
_build_ext = _du_build_ext
# make sure _config_vars is initialized
get_config_var("LDSHARED")
from distutils.sysconfig import _config_vars as _CONFIG_VARS
def _customize_compiler_for_shlib(compiler):
if sys.platform == "darwin":
# building .dylib requires additional compiler flags on OSX; here we
# temporarily substitute the pyconfig.h variables so that distutils'
# 'customize_compiler' uses them before we build the shared libraries.
tmp = _CONFIG_VARS.copy()
try:
# XXX Help! I don't have any idea whether these are right...
_CONFIG_VARS['LDSHARED'] = (
"gcc -Wl,-x -dynamiclib -undefined dynamic_lookup")
_CONFIG_VARS['CCSHARED'] = " -dynamiclib"
_CONFIG_VARS['SO'] = ".dylib"
customize_compiler(compiler)
finally:
_CONFIG_VARS.clear()
_CONFIG_VARS.update(tmp)
else:
customize_compiler(compiler)
have_rtld = False
use_stubs = False
libtype = 'shared'
if sys.platform == "darwin":
use_stubs = True
elif os.name != 'nt':
try:
import dl
use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
except ImportError:
pass
if_dl = lambda s: s if have_rtld else ''
def get_abi3_suffix():
"""Return the file extension for an abi3-compliant Extension()"""
for suffix, _, _ in (s for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION):
if '.abi3' in suffix: # Unix
return suffix
elif suffix == '.pyd': # Windows
return suffix
class build_ext(_build_ext):
def run(self):
"""Build extensions in build directory, then copy if --inplace"""
old_inplace, self.inplace = self.inplace, 0
_build_ext.run(self)
self.inplace = old_inplace
if old_inplace:
self.copy_extensions_to_source()
def copy_extensions_to_source(self):
build_py = self.get_finalized_command('build_py')
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
modpath = fullname.split('.')
package = '.'.join(modpath[:-1])
package_dir = build_py.get_package_dir(package)
dest_filename = os.path.join(package_dir,
os.path.basename(filename))
src_filename = os.path.join(self.build_lib, filename)
# Always copy, even if source is older than destination, to ensure
# that the right extensions for the current Python/platform are
# used.
copy_file(
src_filename, dest_filename, verbose=self.verbose,
dry_run=self.dry_run
)
if ext._needs_stub:
self.write_stub(package_dir or os.curdir, ext, True)
def get_ext_filename(self, fullname):
filename = _build_ext.get_ext_filename(self, fullname)
if fullname in self.ext_map:
ext = self.ext_map[fullname]
use_abi3 = (
six.PY3
and getattr(ext, 'py_limited_api')
and get_abi3_suffix()
)
if use_abi3:
so_ext = _get_config_var_837('EXT_SUFFIX')
filename = filename[:-len(so_ext)]
filename = filename + get_abi3_suffix()
if isinstance(ext, Library):
fn, ext = os.path.splitext(filename)
return self.shlib_compiler.library_filename(fn, libtype)
elif use_stubs and ext._links_to_dynamic:
d, fn = os.path.split(filename)
return os.path.join(d, 'dl-' + fn)
return filename
def initialize_options(self):
_build_ext.initialize_options(self)
self.shlib_compiler = None
self.shlibs = []
self.ext_map = {}
def finalize_options(self):
_build_ext.finalize_options(self)
self.extensions = self.extensions or []
self.check_extensions_list(self.extensions)
self.shlibs = [ext for ext in self.extensions
if isinstance(ext, Library)]
if self.shlibs:
self.setup_shlib_compiler()
for ext in self.extensions:
ext._full_name = self.get_ext_fullname(ext.name)
for ext in self.extensions:
fullname = ext._full_name
self.ext_map[fullname] = ext
# distutils 3.1 will also ask for module names
# XXX what to do with conflicts?
self.ext_map[fullname.split('.')[-1]] = ext
ltd = self.shlibs and self.links_to_dynamic(ext) or False
ns = ltd and use_stubs and not isinstance(ext, Library)
ext._links_to_dynamic = ltd
ext._needs_stub = ns
filename = ext._file_name = self.get_ext_filename(fullname)
libdir = os.path.dirname(os.path.join(self.build_lib, filename))
if ltd and libdir not in ext.library_dirs:
ext.library_dirs.append(libdir)
if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
ext.runtime_library_dirs.append(os.curdir)
def setup_shlib_compiler(self):
compiler = self.shlib_compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=self.force
)
_customize_compiler_for_shlib(compiler)
if self.include_dirs is not None:
compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name, value) in self.define:
compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
compiler.undefine_macro(macro)
if self.libraries is not None:
compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
compiler.set_link_objects(self.link_objects)
# hack so distutils' build_extension() builds a library instead
compiler.link_shared_object = link_shared_object.__get__(compiler)
def get_export_symbols(self, ext):
if isinstance(ext, Library):
return ext.export_symbols
return _build_ext.get_export_symbols(self, ext)
def build_extension(self, ext):
ext._convert_pyx_sources_to_lang()
_compiler = self.compiler
try:
if isinstance(ext, Library):
self.compiler = self.shlib_compiler
_build_ext.build_extension(self, ext)
if ext._needs_stub:
cmd = self.get_finalized_command('build_py').build_lib
self.write_stub(cmd, ext)
finally:
self.compiler = _compiler
def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
return any(pkg + libname in libnames for libname in ext.libraries)
def get_outputs(self):
return _build_ext.get_outputs(self) + self.__get_stubs_outputs()
def __get_stubs_outputs(self):
# assemble the base name for each extension that needs a stub
ns_ext_bases = (
os.path.join(self.build_lib, *ext._full_name.split('.'))
for ext in self.extensions
if ext._needs_stub
)
# pair each base with the extension
pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
return list(base + fnext for base, fnext in pairs)
def __get_output_extensions(self):
yield '.py'
yield '.pyc'
if self.get_finalized_command('build_py').optimize:
yield '.pyo'
def write_stub(self, output_dir, ext, compile=False):
log.info("writing stub loader for %s to %s", ext._full_name,
output_dir)
stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) +
'.py')
if compile and os.path.exists(stub_file):
raise DistutilsError(stub_file + " already exists! Please delete.")
if not self.dry_run:
f = open(stub_file, 'w')
f.write(
'\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __file__, __loader__",
" import sys, os, pkg_resources, imp" + if_dl(", dl"),
" __file__ = pkg_resources.resource_filename"
"(__name__,%r)"
% os.path.basename(ext._file_name),
" del __bootstrap__",
" if '__loader__' in globals():",
" del __loader__",
if_dl(" old_flags = sys.getdlopenflags()"),
" old_dir = os.getcwd()",
" try:",
" os.chdir(os.path.dirname(__file__))",
if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
" imp.load_dynamic(__name__,__file__)",
" finally:",
if_dl(" sys.setdlopenflags(old_flags)"),
" os.chdir(old_dir)",
"__bootstrap__()",
"" # terminal \n
])
)
f.close()
if compile:
from distutils.util import byte_compile
byte_compile([stub_file], optimize=0,
force=True, dry_run=self.dry_run)
optimize = self.get_finalized_command('install_lib').optimize
if optimize > 0:
byte_compile([stub_file], optimize=optimize,
force=True, dry_run=self.dry_run)
if os.path.exists(stub_file) and not self.dry_run:
os.unlink(stub_file)
if use_stubs or os.name == 'nt':
# Build shared libraries
#
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
self.link(
self.SHARED_LIBRARY, objects, output_libname,
output_dir, libraries, library_dirs, runtime_library_dirs,
export_symbols, debug, extra_preargs, extra_postargs,
build_temp, target_lang
)
else:
# Build static libraries everywhere else
libtype = 'static'
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
# XXX we need to either disallow these attrs on Library instances,
# or warn/abort here if set, or something...
# libraries=None, library_dirs=None, runtime_library_dirs=None,
# export_symbols=None, extra_preargs=None, extra_postargs=None,
# build_temp=None
assert output_dir is None # distutils build_ext doesn't pass this
output_dir, filename = os.path.split(output_libname)
basename, ext = os.path.splitext(filename)
if self.library_filename("x").startswith('lib'):
# strip 'lib' prefix; this is kludgy if some platform uses
# a different prefix
basename = basename[3:]
self.create_static_lib(
objects, basename, output_dir, debug, target_lang
)
def _get_config_var_837(name):
"""
In https://github.com/pypa/setuptools/pull/837, we discovered
Python 3.3.0 exposes the extension suffix under the name 'SO'.
"""
if sys.version_info < (3, 3, 1):
name = 'SO'
return get_config_var(name)
| gpl-3.0 |
robbarnsley/macro_image_analysis | camera.py | 1 | 2666 | import os
import gphoto2 as gp
class CameraInterface():
def __init__(self, logger):
self.logger = logger
camera = None
context = None
def _captureImage(self):
'''
Takes an image and returns the corresponding path instance on the camera
SD card.
'''
self.logger.info('Capturing image.')
file_path = gp.check_result(gp.gp_camera_capture(self.camera,
gp.GP_CAPTURE_IMAGE,
self.context))
self.logger.info('Image captured (' + file_path.folder + '/' + file_path.name + ')')
return file_path
def _connectToCamera(self):
self.context = gp.gp_context_new()
self.camera = gp.check_result(gp.gp_camera_new())
gp.check_result(gp.gp_camera_init(self.camera, self.context))
self.logger.info('Connected to camera.')
def _deleteFileFromCamera(self, file_path):
try:
gp.check_result(gp.gp_camera_file_delete(self.camera,
file_path.folder,
file_path.name, self.context))
except gp.GPhoto2Error:
pass
self.logger.info('Image deleted (' + file_path.folder + '/' + file_path.name + ')')
def _disconnectFromCamera(self):
gp.check_result(gp.gp_camera_exit(self.camera, self.context))
self.camera = None
self.context = None
self.logger.info('Disconnected from camera.')
def _transferImageFromCamera(self, camera_file_path, out_filename=None):
'''
Takes the result of gp_camera_capture and copies the corresponding
file to a local disk.
'''
if out_filename is None:
out_filename = os.path.join('/tmp/', camera_file.name)
fi = gp.check_result(
gp.gp_camera_file_get(self.camera,
camera_file_path.folder,
camera_file_path.name,
gp.GP_FILE_TYPE_NORMAL, self.context)
)
gp.check_result(gp.gp_file_save(fi, out_filename))
self.logger.info('File saved to ' + out_filename)
def captureImageAndSave(self, out_filename=None):
self._connectToCamera()
camera_file_path = self._captureImage()
self._transferImageFromCamera(camera_file_path, out_filename)
self._deleteFileFromCamera(camera_file_path)
self._disconnectFromCamera()
| mit |
zvezdan/pip | src/pip/_vendor/distlib/scripts.py | 22 | 16585 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from io import BytesIO
import logging
import os
import re
import struct
import sys
from .compat import sysconfig, detect_encoding, ZipFile
from .resources import finder
from .util import (FileOperator, get_export_entry, convert_path,
get_executable, in_venv)
logger = logging.getLogger(__name__)
_DEFAULT_MANIFEST = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity version="1.0.0.0"
processorArchitecture="X86"
name="%s"
type="win32"/>
<!-- Identify the application security requirements. -->
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
</assembly>'''.strip()
# check if Python is called on the first line with this expression
FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
SCRIPT_TEMPLATE = r'''# -*- coding: utf-8 -*-
if __name__ == '__main__':
import sys, re
def _resolve(module, func):
__import__(module)
mod = sys.modules[module]
parts = func.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
try:
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
func = _resolve('%(module)s', '%(func)s')
rc = func() # None interpreted as 0
except Exception as e: # only supporting Python >= 2.6
sys.stderr.write('%%s\n' %% e)
rc = 1
sys.exit(rc)
'''
def _enquote_executable(executable):
if ' ' in executable:
# make sure we quote only the executable in case of env
# for example /usr/bin/env "/dir with spaces/bin/jython"
# instead of "/usr/bin/env /dir with spaces/bin/jython"
# otherwise whole
if executable.startswith('/usr/bin/env '):
env, _executable = executable.split(' ', 1)
if ' ' in _executable and not _executable.startswith('"'):
executable = '%s "%s"' % (env, _executable)
else:
if not executable.startswith('"'):
executable = '"%s"' % executable
return executable
class ScriptMaker(object):
"""
A class to copy or create scripts from source scripts or callable
specifications.
"""
script_template = SCRIPT_TEMPLATE
executable = None # for shebangs
def __init__(self, source_dir, target_dir, add_launchers=True,
dry_run=False, fileop=None):
self.source_dir = source_dir
self.target_dir = target_dir
self.add_launchers = add_launchers
self.force = False
self.clobber = False
# It only makes sense to set mode bits on POSIX.
self.set_mode = (os.name == 'posix') or (os.name == 'java' and
os._name == 'posix')
self.variants = set(('', 'X.Y'))
self._fileop = fileop or FileOperator(dry_run)
self._is_nt = os.name == 'nt' or (
os.name == 'java' and os._name == 'nt')
def _get_alternate_executable(self, executable, options):
if options.get('gui', False) and self._is_nt: # pragma: no cover
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
return executable
if sys.platform.startswith('java'): # pragma: no cover
def _is_shell(self, executable):
"""
Determine if the specified executable is a script
(contains a #! line)
"""
try:
with open(executable) as fp:
return fp.read(2) == '#!'
except (OSError, IOError):
logger.warning('Failed to open %s', executable)
return False
def _fix_jython_executable(self, executable):
if self._is_shell(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty('os.name') == 'Linux':
return executable
elif executable.lower().endswith('jython.exe'):
# Use wrapper exe for Jython on Windows
return executable
return '/usr/bin/env %s' % executable
def _build_shebang(self, executable, post_interp):
"""
Build a shebang line. In the simple case (on Windows, or a shebang line
which is not too long or contains spaces) use a simple formulation for
the shebang. Otherwise, use /bin/sh as the executable, with a contrived
shebang which allows the script to run either under Python or sh, using
suitable quoting. Thanks to Harald Nordgren for his input.
See also: http://www.in-ulm.de/~mascheck/various/shebang/#length
https://hg.mozilla.org/mozilla-central/file/tip/mach
"""
if os.name != 'posix':
simple_shebang = True
else:
# Add 3 for '#!' prefix and newline suffix.
shebang_length = len(executable) + len(post_interp) + 3
if sys.platform == 'darwin':
max_shebang_length = 512
else:
max_shebang_length = 127
simple_shebang = ((b' ' not in executable) and
(shebang_length <= max_shebang_length))
if simple_shebang:
result = b'#!' + executable + post_interp + b'\n'
else:
result = b'#!/bin/sh\n'
result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n'
result += b"' '''"
return result
def _get_shebang(self, encoding, post_interp=b'', options=None):
enquote = True
if self.executable:
executable = self.executable
enquote = False # assume this will be taken care of
elif not sysconfig.is_python_build():
executable = get_executable()
elif in_venv(): # pragma: no cover
executable = os.path.join(sysconfig.get_path('scripts'),
'python%s' % sysconfig.get_config_var('EXE'))
else: # pragma: no cover
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'),
sysconfig.get_config_var('EXE')))
if options:
executable = self._get_alternate_executable(executable, options)
if sys.platform.startswith('java'): # pragma: no cover
executable = self._fix_jython_executable(executable)
# Normalise case for Windows
executable = os.path.normcase(executable)
# If the user didn't specify an executable, it may be necessary to
# cater for executable paths with spaces (not uncommon on Windows)
if enquote:
executable = _enquote_executable(executable)
# Issue #51: don't use fsencode, since we later try to
# check that the shebang is decodable using utf-8.
executable = executable.encode('utf-8')
# in case of IronPython, play safe and enable frames support
if (sys.platform == 'cli' and '-X:Frames' not in post_interp
and '-X:FullFrames' not in post_interp): # pragma: no cover
post_interp += b' -X:Frames'
shebang = self._build_shebang(executable, post_interp)
# Python parser starts to read a script using UTF-8 until
# it gets a #coding:xxx cookie. The shebang has to be the
# first line of a file, the #coding:xxx cookie cannot be
# written before. So the shebang has to be decodable from
# UTF-8.
try:
shebang.decode('utf-8')
except UnicodeDecodeError: # pragma: no cover
raise ValueError(
'The shebang (%r) is not decodable from utf-8' % shebang)
# If the script is encoded to a custom encoding (use a
# #coding:xxx cookie), the shebang has to be decodable from
# the script encoding too.
if encoding != 'utf-8':
try:
shebang.decode(encoding)
except UnicodeDecodeError: # pragma: no cover
raise ValueError(
'The shebang (%r) is not decodable '
'from the script encoding (%r)' % (shebang, encoding))
return shebang
def _get_script_text(self, entry):
return self.script_template % dict(module=entry.prefix,
func=entry.suffix)
manifest = _DEFAULT_MANIFEST
def get_manifest(self, exename):
base = os.path.basename(exename)
return self.manifest % base
def _write_script(self, names, shebang, script_bytes, filenames, ext):
use_launcher = self.add_launchers and self._is_nt
linesep = os.linesep.encode('utf-8')
if not use_launcher:
script_bytes = shebang + linesep + script_bytes
else: # pragma: no cover
if ext == 'py':
launcher = self._get_launcher('t')
else:
launcher = self._get_launcher('w')
stream = BytesIO()
with ZipFile(stream, 'w') as zf:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
script_bytes = launcher + shebang + linesep + zip_data
for name in names:
outname = os.path.join(self.target_dir, name)
if use_launcher: # pragma: no cover
n, e = os.path.splitext(outname)
if e.startswith('.py'):
outname = n
outname = '%s.exe' % outname
try:
self._fileop.write_binary_file(outname, script_bytes)
except Exception:
# Failed writing an executable - it might be in use.
logger.warning('Failed to write executable - trying to '
'use .deleteme logic')
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
self._fileop.write_binary_file(outname, script_bytes)
logger.debug('Able to replace executable using '
'.deleteme logic')
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover
outname = '%s.%s' % (outname, ext)
if os.path.exists(outname) and not self.clobber:
logger.warning('Skipping existing file %s', outname)
continue
self._fileop.write_binary_file(outname, script_bytes)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
def _make_script(self, entry, filenames, options=None):
post_interp = b''
if options:
args = options.get('interpreter_args', [])
if args:
args = ' %s' % ' '.join(args)
post_interp = args.encode('utf-8')
shebang = self._get_shebang('utf-8', post_interp, options=options)
script = self._get_script_text(entry).encode('utf-8')
name = entry.name
scriptnames = set()
if '' in self.variants:
scriptnames.add(name)
if 'X' in self.variants:
scriptnames.add('%s%s' % (name, sys.version[0]))
if 'X.Y' in self.variants:
scriptnames.add('%s-%s' % (name, sys.version[:3]))
if options and options.get('gui', False):
ext = 'pyw'
else:
ext = 'py'
self._write_script(scriptnames, shebang, script, filenames, ext)
def _copy_script(self, script, filenames):
adjust = False
script = os.path.join(self.source_dir, convert_path(script))
outname = os.path.join(self.target_dir, os.path.basename(script))
if not self.force and not self._fileop.newer(script, outname):
logger.debug('not copying %s (up-to-date)', script)
return
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, 'rb')
except IOError: # pragma: no cover
if not self.dry_run:
raise
f = None
else:
first_line = f.readline()
if not first_line: # pragma: no cover
logger.warning('%s: %s is an empty file (skipping)',
self.get_command_name(), script)
return
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
if match:
adjust = True
post_interp = match.group(1) or b''
if not adjust:
if f:
f.close()
self._fileop.copy_file(script, outname)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
else:
logger.info('copying and adjusting %s -> %s', script,
self.target_dir)
if not self._fileop.dry_run:
encoding, lines = detect_encoding(f.readline)
f.seek(0)
shebang = self._get_shebang(encoding, post_interp)
if b'pythonw' in first_line: # pragma: no cover
ext = 'pyw'
else:
ext = 'py'
n = os.path.basename(outname)
self._write_script([n], shebang, f.read(), filenames, ext)
if f:
f.close()
@property
def dry_run(self):
return self._fileop.dry_run
@dry_run.setter
def dry_run(self, value):
self._fileop.dry_run = value
if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover
# Executable launcher support.
# Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
def _get_launcher(self, kind):
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
name = '%s%s.exe' % (kind, bits)
# Issue 31: don't hardcode an absolute package name, but
# determine it relative to the current package
distlib_package = __name__.rsplit('.', 1)[0]
result = finder(distlib_package).find(name).bytes
return result
# Public API follows
def make(self, specification, options=None):
"""
Make a script.
:param specification: The specification, which is either a valid export
entry specification (to make a script from a
callable) or a filename (to make a script by
copying from a source location).
:param options: A dictionary of options controlling script generation.
:return: A list of all absolute pathnames written to.
"""
filenames = []
entry = get_export_entry(specification)
if entry is None:
self._copy_script(specification, filenames)
else:
self._make_script(entry, filenames, options=options)
return filenames
def make_multiple(self, specifications, options=None):
"""
Take a list of specifications and make scripts from them,
:param specifications: A list of specifications.
:return: A list of all absolute pathnames written to,
"""
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames
| mit |
tfeagle/mitmproxy | libmproxy/script.py | 9 | 5930 | from __future__ import absolute_import
import os
import traceback
import threading
import shlex
import sys
class ScriptError(Exception):
pass
class ScriptContext:
"""
The script context should be used to interact with the global mitmproxy state from within a
script.
"""
def __init__(self, master):
self._master = master
def log(self, message, level="info"):
"""
Logs an event.
By default, only events with level "error" get displayed. This can be controlled with the "-v" switch.
How log messages are handled depends on the front-end. mitmdump will print them to stdout,
mitmproxy sends output to the eventlog for display ("e" keyboard shortcut).
"""
self._master.add_event(message, level)
def kill_flow(self, f):
"""
Kills a flow immediately. No further data will be sent to the client or the server.
"""
f.kill(self._master)
def duplicate_flow(self, f):
"""
Returns a duplicate of the specified flow. The flow is also
injected into the current state, and is ready for editing, replay,
etc.
"""
self._master.pause_scripts = True
f = self._master.duplicate_flow(f)
self._master.pause_scripts = False
return f
def replay_request(self, f):
"""
Replay the request on the current flow. The response will be added
to the flow object.
"""
return self._master.replay_request(f, block=True, run_scripthooks=False)
@property
def app_registry(self):
return self._master.apps
class Script:
"""
Script object representing an inline script.
"""
def __init__(self, command, master):
self.args = self.parse_command(command)
self.ctx = ScriptContext(master)
self.ns = None
self.load()
@classmethod
def parse_command(cls, command):
if not command or not command.strip():
raise ScriptError("Empty script command.")
if os.name == "nt": # Windows: escape all backslashes in the path.
backslashes = shlex.split(command, posix=False)[0].count("\\")
command = command.replace("\\", "\\\\", backslashes)
args = shlex.split(command)
args[0] = os.path.expanduser(args[0])
if not os.path.exists(args[0]):
raise ScriptError(
("Script file not found: %s.\r\n"
"If your script path contains spaces, "
"make sure to wrap it in additional quotes, e.g. -s \"'./foo bar/baz.py' --args\".") %
args[0])
elif not os.path.isfile(args[0]):
raise ScriptError("Not a file: %s" % args[0])
return args
def load(self):
"""
Loads an inline script.
Returns:
The return value of self.run("start", ...)
Raises:
ScriptError on failure
"""
if self.ns is not None:
self.unload()
script_dir = os.path.dirname(os.path.abspath(self.args[0]))
ns = {'__file__': os.path.abspath(self.args[0])}
sys.path.append(script_dir)
try:
execfile(self.args[0], ns, ns)
except Exception as e:
# Python 3: use exception chaining, https://www.python.org/dev/peps/pep-3134/
raise ScriptError(traceback.format_exc(e))
sys.path.pop()
self.ns = ns
return self.run("start", self.args)
def unload(self):
ret = self.run("done")
self.ns = None
return ret
def run(self, name, *args, **kwargs):
"""
Runs an inline script hook.
Returns:
The return value of the method.
None, if the script does not provide the method.
Raises:
ScriptError if there was an exception.
"""
f = self.ns.get(name)
if f:
try:
return f(self.ctx, *args, **kwargs)
except Exception as e:
raise ScriptError(traceback.format_exc(e))
else:
return None
class ReplyProxy(object):
def __init__(self, original_reply, script_thread):
self.original_reply = original_reply
self.script_thread = script_thread
self._ignore_call = True
self.lock = threading.Lock()
def __call__(self, *args, **kwargs):
with self.lock:
if self._ignore_call:
self.script_thread.start()
self._ignore_call = False
return
self.original_reply(*args, **kwargs)
def __getattr__(self, k):
return getattr(self.original_reply, k)
def _handle_concurrent_reply(fn, o, *args, **kwargs):
# Make first call to o.reply a no op and start the script thread.
# We must not start the script thread before, as this may lead to a nasty race condition
# where the script thread replies a different response before the normal reply, which then gets swallowed.
def run():
fn(*args, **kwargs)
# If the script did not call .reply(), we have to do it now.
reply_proxy()
script_thread = ScriptThread(target=run)
reply_proxy = ReplyProxy(o.reply, script_thread)
o.reply = reply_proxy
class ScriptThread(threading.Thread):
name = "ScriptThread"
def concurrent(fn):
if fn.func_name in (
"request",
"response",
"error",
"clientconnect",
"serverconnect",
"clientdisconnect",
"next_layer"):
def _concurrent(ctx, obj):
_handle_concurrent_reply(fn, obj, ctx, obj)
return _concurrent
raise NotImplementedError(
"Concurrent decorator not supported for '%s' method." % fn.func_name)
| mit |
sneaker-rohit/PI2-ns-3 | src/visualizer/visualizer/plugins/wifi_intrastructure_link.py | 182 | 4933 | import math
import ns.wifi
import ns.network
import goocanvas
from visualizer.base import Link, transform_distance_canvas_to_simulation
class WifiLink(Link):
def __init__(self, parent_canvas_item, sta, dev):
self.node1 = sta
self.dev = dev
self.node2 = None # ap
self.canvas_item = goocanvas.Group(parent=parent_canvas_item)
self.invisible_line = goocanvas.Polyline(parent=self.canvas_item,
line_width=25.0,
visibility=goocanvas.ITEM_HIDDEN)
self.visible_line = goocanvas.Polyline(parent=self.canvas_item,
line_width=1.0,
stroke_color_rgba=0xC00000FF,
line_dash=goocanvas.LineDash([2.0, 2.0 ]))
self.invisible_line.props.pointer_events = (goocanvas.EVENTS_STROKE_MASK
|goocanvas.EVENTS_FILL_MASK
|goocanvas.EVENTS_PAINTED_MASK)
self.canvas_item.set_data("pyviz-object", self)
self.canvas_item.lower(None)
self.set_ap(None)
def set_ap(self, ap):
if ap is self.node2:
return
if self.node2 is not None:
self.node2.remove_link(self)
self.node2 = ap
if self.node2 is None:
self.canvas_item.set_property("visibility", goocanvas.ITEM_HIDDEN)
else:
self.node2.add_link(self)
self.canvas_item.set_property("visibility", goocanvas.ITEM_VISIBLE)
self.update_points()
def update_points(self):
if self.node2 is None:
return
pos1_x, pos1_y = self.node1.get_position()
pos2_x, pos2_y = self.node2.get_position()
points = goocanvas.Points([(pos1_x, pos1_y), (pos2_x, pos2_y)])
self.visible_line.set_property("points", points)
self.invisible_line.set_property("points", points)
def destroy(self):
self.canvas_item.destroy()
self.node1 = None
self.node2 = None
def tooltip_query(self, tooltip):
pos1_x, pos1_y = self.node1.get_position()
pos2_x, pos2_y = self.node2.get_position()
dx = pos2_x - pos1_x
dy = pos2_y - pos1_y
d = transform_distance_canvas_to_simulation(math.sqrt(dx*dx + dy*dy))
mac = self.dev.GetMac()
tooltip.set_text(("WiFi link between STA Node %i and AP Node %i; distance=%.2f m.\n"
"SSID: %s\n"
"BSSID: %s")
% (self.node1.node_index, self.node2.node_index, d,
mac.GetSsid(), mac.GetBssid()))
class WifiLinkMonitor(object):
def __init__(self, dummy_viz):
self.access_points = {} # bssid -> node
self.stations = [] # list of (sta_netdevice, viz_node, wifi_link)
def scan_nodes(self, viz):
for (sta_netdevice, viz_node, wifi_link) in self.stations:
wifi_link.destroy()
self.access_points = {}
self.stations = []
for node in viz.nodes.itervalues():
ns3_node = ns.network.NodeList.GetNode(node.node_index)
for devI in range(ns3_node.GetNDevices()):
dev = ns3_node.GetDevice(devI)
if not isinstance(dev, ns.wifi.WifiNetDevice):
continue
wifi_mac = dev.GetMac()
if isinstance(wifi_mac, ns.wifi.StaWifiMac):
wifi_link = WifiLink(viz.links_group, node, dev)
self.stations.append((dev, node, wifi_link))
elif isinstance(wifi_mac, ns.wifi.ApWifiMac):
bssid = ns.network.Mac48Address.ConvertFrom(dev.GetAddress())
self.access_points[str(bssid)] = node
#print "APs: ", self.access_points
#print "STAs: ", self.stations
def simulation_periodic_update(self, viz):
for (sta_netdevice, viz_node, wifi_link) in self.stations:
if not sta_netdevice.IsLinkUp():
wifi_link.set_ap(None)
continue
bssid = str(sta_netdevice.GetMac().GetBssid())
if bssid == '00:00:00:00:00:00':
wifi_link.set_ap(None)
continue
ap = self.access_points[bssid]
wifi_link.set_ap(ap)
def update_view(self, viz):
for (dummy_sta_netdevice, dummy_viz_node, wifi_link) in self.stations:
if wifi_link is not None:
wifi_link.update_points()
def register(viz):
link_monitor = WifiLinkMonitor(viz)
viz.connect("simulation-periodic-update", link_monitor.simulation_periodic_update)
viz.connect("update-view", link_monitor.update_view)
viz.connect("topology-scanned", link_monitor.scan_nodes)
| gpl-2.0 |
sstoma/CellProfiler | cellprofiler/modules/tests/test_filterobjects.py | 2 | 91690 | '''test_filterbyobjectmeasurements.py: Test FilterByObjectMeasurements module
CellProfiler is distributed under the GNU General Public License.
See the accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2015 Broad Institute
All rights reserved.
Please see the AUTHORS file for credits.
Website: http://www.cellprofiler.org
'''
import base64
import numpy as np
import os
import StringIO
import tempfile
import zlib
import unittest
from cellprofiler.preferences import set_headless
set_headless()
import cellprofiler.workspace as cpw
import cellprofiler.pipeline as cpp
import cellprofiler.objects as cpo
import cellprofiler.cpimage as cpi
import cellprofiler.preferences as cpprefs
import cellprofiler.measurements as cpm
import cellprofiler.modules.filterobjects as F
class TestFilterObjects(unittest.TestCase):
def make_workspace(self, object_dict= {}, image_dict = {}):
'''Make a workspace for testing FilterByObjectMeasurement'''
module = F.FilterByObjectMeasurement()
pipeline = cpp.Pipeline()
object_set = cpo.ObjectSet()
image_set_list = cpi.ImageSetList()
image_set = image_set_list.get_image_set(0)
workspace = cpw.Workspace(pipeline,
module,
image_set,
object_set,
cpm.Measurements(),
image_set_list)
for key in image_dict.keys():
image_set.add(key, cpi.Image(image_dict[key]))
for key in object_dict.keys():
o = cpo.Objects()
o.segmented = object_dict[key]
object_set.add_objects(o, key)
return workspace, module
def test_00_01_zeros_single(self):
'''Test keep single object on an empty labels matrix'''
workspace, module = self.make_workspace({ "my_objects": np.zeros((10,10),int) })
module.object_name.value = "my_objects"
module.target_name.value = "my_result"
module.measurements[0].measurement.value = "my_measurement"
module.filter_choice.value = F.FI_MAXIMAL
m = workspace.measurements
m.add_measurement("my_objects","my_measurement",np.zeros((0,)))
module.run(workspace)
labels = workspace.object_set.get_objects("my_result")
self.assertTrue(np.all(labels.segmented==0))
def test_00_02_zeros_per_object(self):
'''Test keep per object filtering on an empty labels matrix'''
workspace, module = self.make_workspace(
{"my_objects": np.zeros((10,10),int),
"my_enclosing_objects": np.zeros((10,10),int)})
module.object_name.value = "my_objects"
module.target_name.value = "my_result"
module.enclosing_object_name.value = "my_enclosing_objects"
module.measurements[0].measurement.value = "my_measurement"
module.filter_choice.value = F.FI_MAXIMAL_PER_OBJECT
m = workspace.measurements
m.add_measurement("my_objects","my_measurement",np.zeros((0,)))
module.run(workspace)
labels = workspace.object_set.get_objects("my_result")
self.assertTrue(np.all(labels.segmented==0))
def test_00_03_zeros_filter(self):
'''Test object filtering on an empty labels matrix'''
workspace, module = self.make_workspace({ "my_objects": np.zeros((10,10),int) })
module.object_name.value = "my_objects"
module.target_name.value = "my_result"
module.measurements[0].measurement.value = "my_measurement"
module.filter_choice.value = F.FI_LIMITS
module.measurements[0].min_limit.value = 0
module.measurements[0].max_limit.value = 1000
m = workspace.measurements
m.add_measurement("my_objects","my_measurement",np.zeros((0,)))
module.run(workspace)
labels = workspace.object_set.get_objects("my_result")
self.assertTrue(np.all(labels.segmented==0))
def test_01_01_keep_single_min(self):
'''Keep a single object (min) from among two'''
labels = np.zeros((10,10), int)
labels[2:4,3:5] = 1
labels[6:9,5:8] = 2
expected = labels.copy()
expected[labels == 1] = 0
expected[labels == 2] = 1
workspace, module = self.make_workspace({ "my_objects": labels })
module.object_name.value = "my_objects"
module.target_name.value = "my_result"
module.measurements[0].measurement.value = "my_measurement"
module.filter_choice.value = F.FI_MINIMAL
m = workspace.measurements
m.add_measurement("my_objects","my_measurement",np.array([2,1]))
module.run(workspace)
labels = workspace.object_set.get_objects("my_result")
self.assertTrue(np.all(labels.segmented==expected))
parents = m.get_current_measurement("my_result","Parent_my_objects")
self.assertEqual(len(parents),1)
self.assertEqual(parents[0],2)
self.assertEqual(m.get_current_image_measurement("Count_my_result"),1)
feature = F.FF_CHILDREN_COUNT % "my_result"
child_count = m.get_current_measurement("my_objects", feature)
self.assertEqual(len(child_count), 2)
self.assertEqual(child_count[0], 0)
self.assertEqual(child_count[1], 1)
def test_01_02_keep_single_max(self):
'''Keep a single object (max) from among two'''
labels = np.zeros((10,10), int)
labels[2:4,3:5] = 1
labels[6:9,5:8] = 2
expected = labels.copy()
expected[labels == 1] = 0
expected[labels == 2] = 1
workspace, module = self.make_workspace({ "my_objects": labels })
module.object_name.value = "my_objects"
module.target_name.value = "my_result"
module.measurements[0].measurement.value = "my_measurement"
module.filter_choice.value = F.FI_MAXIMAL
m = workspace.measurements
m.add_measurement("my_objects","my_measurement",np.array([1,2]))
module.run(workspace)
labels = workspace.object_set.get_objects("my_result")
self.assertTrue(np.all(labels.segmented==expected))
def test_02_01_keep_one_min(self):
'''Keep two sub-objects (min) from among four enclosed by two'''
sub_labels = np.zeros((20,20), int)
expected = np.zeros((20,20), int)
for i,j,k,e in ((0,0,1,0),(10,0,2,1),(0,10,3,2),(10,10,4,0)):
sub_labels[i+2:i+5,j+3:j+7] = k
expected[i+2:i+5,j+3:j+7] = e
labels = np.zeros((20,20), int)
labels[:,:10] = 1
labels[:,10:] = 2
workspace, module = self.make_workspace({ "my_objects": sub_labels,
"my_enclosing_objects": labels })
module.object_name.value = "my_objects"
module.target_name.value = "my_result"
module.enclosing_object_name.value = 'my_enclosing_objects'
module.measurements[0].measurement.value = "my_measurement"
module.filter_choice.value = F.FI_MINIMAL_PER_OBJECT
m = workspace.measurements
m.add_measurement("my_objects","my_measurement",np.array([2,1,3,4]))
module.run(workspace)
labels = workspace.object_set.get_objects("my_result")
self.assertTrue(np.all(labels.segmented==expected))
def test_02_02_keep_one_max(self):
'''Keep two sub-objects (max) from among four enclosed by two'''
sub_labels = np.zeros((20,20), int)
expected = np.zeros((20,20), int)
for i,j,k,e in ((0,0,1,0),(10,0,2,1),(0,10,3,2),(10,10,4,0)):
sub_labels[i+2:i+5,j+3:j+7] = k
expected[i+2:i+5,j+3:j+7] = e
labels = np.zeros((20,20), int)
labels[:,:10] = 1
labels[:,10:] = 2
workspace, module = self.make_workspace({ "my_objects": sub_labels,
"my_enclosing_objects": labels })
module.object_name.value = "my_objects"
module.target_name.value = "my_result"
module.enclosing_object_name.value = 'my_enclosing_objects'
module.measurements[0].measurement.value = "my_measurement"
module.filter_choice.value = F.FI_MAXIMAL_PER_OBJECT
m = workspace.measurements
m.add_measurement("my_objects","my_measurement",np.array([1,2,4,3]))
module.run(workspace)
labels = workspace.object_set.get_objects("my_result")
self.assertTrue(np.all(labels.segmented==expected))
def test_02_03_keep_maximal_most_overlap(self):
labels = np.zeros((10, 20), int)
labels[:, :10] = 1
labels[:, 10:] = 2
sub_labels = np.zeros((10, 20), int)
sub_labels[2, 4] = 1
sub_labels[4:6, 8:15] = 2
sub_labels[8, 15] = 3
expected = sub_labels * (sub_labels != 3)
workspace, module = self.make_workspace({ "my_objects": sub_labels,
"my_enclosing_objects": labels })
module.object_name.value = "my_objects"
module.target_name.value = "my_result"
module.enclosing_object_name.value = 'my_enclosing_objects'
module.measurements[0].measurement.value = "my_measurement"
module.filter_choice.value = F.FI_MAXIMAL_PER_OBJECT
module.per_object_assignment.value = F.PO_PARENT_WITH_MOST_OVERLAP
m = workspace.measurements
m.add_measurement("my_objects","my_measurement",np.array([1,4,2]))
module.run(workspace)
labels = workspace.object_set.get_objects("my_result")
self.assertTrue(np.all(labels.segmented==expected))
def test_02_04_keep_minimal_most_overlap(self):
labels = np.zeros((10, 20), int)
labels[:, :10] = 1
labels[:, 10:] = 2
sub_labels = np.zeros((10, 20), int)
sub_labels[2, 4] = 1
sub_labels[4:6, 8:15] = 2
sub_labels[8, 15] = 3
expected = sub_labels * (sub_labels != 3)
workspace, module = self.make_workspace({ "my_objects": sub_labels,
"my_enclosing_objects": labels })
module.object_name.value = "my_objects"
module.target_name.value = "my_result"
module.enclosing_object_name.value = 'my_enclosing_objects'
module.measurements[0].measurement.value = "my_measurement"
module.filter_choice.value = F.FI_MINIMAL_PER_OBJECT
module.per_object_assignment.value = F.PO_PARENT_WITH_MOST_OVERLAP
m = workspace.measurements
m.add_measurement("my_objects","my_measurement",np.array([4,2,3]))
module.run(workspace)
labels = workspace.object_set.get_objects("my_result")
self.assertTrue(np.all(labels.segmented==expected))
def test_03_01_filter(self):
'''Filter objects by limits'''
n = 40
labels = np.zeros((10,n*10),int)
for i in range(40):
labels[2:5,i*10+3:i*10+7] = i+1
np.random.seed(0)
values = np.random.uniform(size=n)
idx = 1
my_min = .3
my_max = .7
expected = np.zeros(labels.shape, int)
for i, value in zip(range(n), values):
if value >= my_min and value <= my_max:
expected[labels == i+1] = idx
idx += 1
workspace, module = self.make_workspace({ "my_objects": labels })
module.object_name.value = "my_objects"
module.target_name.value = "my_result"
module.measurements[0].measurement.value = "my_measurement"
module.filter_choice.value = F.FI_LIMITS
module.measurements[0].wants_minimum.value = True
module.measurements[0].min_limit.value = my_min
module.measurements[0].wants_maximum.value = True
module.measurements[0].max_limit.value = my_max
m = workspace.measurements
m.add_measurement("my_objects","my_measurement",values)
module.run(workspace)
labels = workspace.object_set.get_objects("my_result")
self.assertTrue(np.all(labels.segmented==expected))
def test_03_02_filter(self):
'''Filter objects by min limits'''
n = 40
labels = np.zeros((10,n*10),int)
for i in range(40):
labels[2:5,i*10+3:i*10+7] = i+1
np.random.seed(0)
values = np.random.uniform(size=n)
idx = 1
my_min = .3
expected = np.zeros(labels.shape, int)
for i, value in zip(range(n), values):
if value >= my_min:
expected[labels == i+1] = idx
idx += 1
workspace, module = self.make_workspace({ "my_objects": labels })
module.object_name.value = "my_objects"
module.target_name.value = "my_result"
module.measurements[0].measurement.value = "my_measurement"
module.filter_choice.value = F.FI_LIMITS
module.measurements[0].min_limit.value = my_min
module.measurements[0].max_limit.value = .7
module.measurements[0].wants_maximum.value = False
m = workspace.measurements
m.add_measurement("my_objects","my_measurement",values)
module.run(workspace)
labels = workspace.object_set.get_objects("my_result")
self.assertTrue(np.all(labels.segmented==expected))
def test_03_03_filter(self):
'''Filter objects by maximum limits'''
n = 40
labels = np.zeros((10,n*10),int)
for i in range(40):
labels[2:5,i*10+3:i*10+7] = i+1
np.random.seed(0)
values = np.random.uniform(size=n)
idx = 1
my_max = .7
expected = np.zeros(labels.shape, int)
for i, value in zip(range(n), values):
if value <= my_max:
expected[labels == i+1] = idx
idx += 1
workspace, module = self.make_workspace({ "my_objects": labels })
module.object_name.value = "my_objects"
module.target_name.value = "my_result"
module.measurements[0].measurement.value = "my_measurement"
module.filter_choice.value = F.FI_LIMITS
module.measurements[0].min_limit.value = .3
module.measurements[0].wants_minimum.value = False
module.measurements[0].max_limit.value = my_max
m = workspace.measurements
m.add_measurement("my_objects","my_measurement",values)
module.run(workspace)
labels = workspace.object_set.get_objects("my_result")
self.assertTrue(np.all(labels.segmented==expected))
def test_03_04_filter_two(self):
'''Filter objects by two measurements'''
n = 40
labels = np.zeros((10,n*10),int)
for i in range(40):
labels[2:5,i*10+3:i*10+7] = i+1
np.random.seed(0)
values = np.zeros((n,2))
values = np.random.uniform(size=(n,2))
idx = 1
my_max = np.array([.7,.5])
expected = np.zeros(labels.shape, int)
for i, v1,v2 in zip(range(n), values[:,0],values[:,1]):
if v1 <= my_max[0] and v2 <= my_max[1]:
expected[labels == i+1] = idx
idx += 1
workspace, module = self.make_workspace({ "my_objects": labels })
module.object_name.value = "my_objects"
module.target_name.value = "my_result"
module.add_measurement()
m = workspace.measurements
for i in range(2):
measurement_name = "measurement%d" % (i+1)
module.measurements[i].measurement.value = measurement_name
module.filter_choice.value = F.FI_LIMITS
module.measurements[i].min_limit.value = .3
module.measurements[i].wants_minimum.value = False
module.measurements[i].max_limit.value = my_max[i]
m.add_measurement("my_objects",measurement_name,values[:,i])
module.run(workspace)
labels = workspace.object_set.get_objects("my_result")
self.assertTrue(np.all(labels.segmented==expected))
def test_04_01_renumber_other(self):
'''Renumber an associated object'''
n = 40
labels = np.zeros((10,n*10),int)
alternates = np.zeros((10,n*10), int)
for i in range(40):
labels[2:5,i*10+3:i*10+7] = i+1
alternates[3:7,i*10+2:i*10+5] = i+1
np.random.seed(0)
values = np.random.uniform(size=n)
idx = 1
my_min = .3
my_max = .7
expected = np.zeros(labels.shape, int)
expected_alternates = np.zeros(alternates.shape, int)
for i, value in zip(range(n), values):
if value >= my_min and value <= my_max:
expected[labels == i+1] = idx
expected_alternates[alternates == i+1] = idx
idx += 1
workspace, module = self.make_workspace({ "my_objects": labels,
"my_alternates": alternates })
module.object_name.value = "my_objects"
module.target_name.value = "my_result"
module.measurements[0].measurement.value = "my_measurement"
module.filter_choice.value = F.FI_LIMITS
module.measurements[0].min_limit.value = my_min
module.measurements[0].max_limit.value = my_max
module.add_additional_object()
module.additional_objects[0].object_name.value="my_alternates"
module.additional_objects[0].target_name.value = "my_additional_result"
m = workspace.measurements
m.add_measurement("my_objects","my_measurement",values)
module.run(workspace)
labels = workspace.object_set.get_objects("my_result")
alternates = workspace.object_set.get_objects("my_additional_result")
self.assertTrue(np.all(labels.segmented==expected))
self.assertTrue(np.all(alternates.segmented==expected_alternates))
def test_05_00_load_matlab_v5(self):
data = """CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:1234
FromMatlab:True
FilterByObjectMeasurement:[module_num:1|svn_version:\'8913\'|variable_revision_number:5|show_window:False|notes:\x5B\x5D]
Which object would you like to filter by, or if using a Ratio, what is the numerator object?:MyObjects
What do you want to call the filtered objects?:MyFilteredObjects
Which category of measurements would you want to filter by?:Texture
Which feature do you want to use? (Enter the feature number or name - see help for details):Granulectomy
For INTENSITY, AREAOCCUPIED or TEXTURE features, which image's measurements do you want to use (for other measurements, this will only affect the display)?:MyImage
For TEXTURE, RADIAL DISTRIBUTION, OR NEIGHBORS features, what previously measured size scale (TEXTURE OR NEIGHBORS) or previously used number of bins (RADIALDISTRIBUTION) do you want to use?:15
Minimum value required:No minimum
Maximum value allowed:0.85
What do you want to call the outlines of the identified objects (optional)?:MyOutlines
"""
pipeline = cpp.Pipeline()
def callback(caller, event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO.StringIO(data))
self.assertEqual(len(pipeline.modules()), 1)
module = pipeline.modules()[0]
self.assertTrue(isinstance(module, F.FilterByObjectMeasurement))
self.assertEqual(module.object_name, "MyObjects")
self.assertEqual(module.target_name, "MyFilteredObjects")
self.assertEqual(module.measurements[0].measurement, "Texture_Granulectomy")
self.assertEqual(module.filter_choice, F.FI_LIMITS)
self.assertAlmostEqual(module.measurements[0].max_limit.value, 0.85)
self.assertEqual(module.outlines_name, "MyOutlines")
def test_05_01_load_matlab(self):
'''Test loading a Matlab pipeline
Saved Pipeline, in file fbom_pipe.txt, Saved on 22-Apr-2009
SVN version number: 7297
Pixel Size: 1
Pipeline:
KeepLargestObject
FilterByObjectMeasurement
FilterByObjectMeasurement
Module #1: KeepLargestObject revision - 1
What did you call the primary objects? FilteredNuclei
What did you call the secondary objects? TargetObjects
What do you want to call the largest primary objects? TargetObjects
Module #2: FilterByObjectMeasurement revision - 6
What do you want to call the filtered objects? FilteredNuclei
Which object would you like to filter by, or if using a Ratio, what is the numerator object? LargestObjects
Which category of measurements would you want to filter by? AreaShape
Which feature do you want to use? (Enter the feature number or name - see help for details) Perimeter
For INTENSITY, AREAOCCUPIED or TEXTURE features, which image's measurements do you want to use (for other measurements, this will only affect the display)?
For TEXTURE, RADIAL DISTRIBUTION, OR NEIGHBORS features, what previously measured size scale (TEXTURE OR NEIGHBORS) or previously used number of bins (RADIALDISTRIBUTION) do you want to use? 1
Minimum value required: 200
Maximum value allowed: No maximum
What do you want to call the outlines of the identified objects? Type "Do not use" to ignore. Do not use
Module #3: FilterByObjectMeasurement revision - 6
What do you want to call the filtered objects? FilteredNuclei
Which object would you like to filter by, or if using a Ratio, what is the numerator object? TargetObjects
Which category of measurements would you want to filter by? Intensity
Which feature do you want to use? (Enter the feature number or name - see help for details) MeanIntensity
For INTENSITY, AREAOCCUPIED or TEXTURE features, which image's measurements do you want to use (for other measurements, this will only affect the display)?
For TEXTURE, RADIAL DISTRIBUTION, OR NEIGHBORS features, what previously measured size scale (TEXTURE OR NEIGHBORS) or previously used number of bins (RADIALDISTRIBUTION) do you want to use? 1
Minimum value required: No minimum
Maximum value allowed: .25
What do you want to call the outlines of the identified objects? Type "Do not use" to ignore. OutlineObjects
'''
data = ('TUFUTEFCIDUuMCBNQVQtZmlsZSwgUGxhdGZvcm06IFBDV0lOLCBD' +
'cmVhdGVkIG9uOiBXZWQgQXByIDIyIDEyOjM2OjQ3IDIwMDkgICAg' +
'ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg' +
'ICAgICAgIAABSU0PAAAACQIAAHic1ZZPT9swFMAd0lYURAU7gbRD' +
'j5xQQZq045gmBGK0FUW9u/RRPCV2FNuo5VPtuOM+Csd9jNkkaR1T' +
'SNKEanuRFT3nvd/7I+clLYTQzx2EGuq+qdYGiqQe646xtD4AIQid' +
'8Dqqof14/7daQxwSPPJgiD0JHM0l2b+gd+xmFswfXbGx9KCLfdNY' +
'SVf6Iwh57y5xjB/3yRS8AXkElJbE7BoeCCeMxv4x396dx2XCittS' +
'66m+6INj9cFVq2nsa/svaGFfW9K3lmG/G+tnxBMQwrgrbz0g/ydn' +
'x+Jo/QaHExC90Q+4FbxEf75rDl+A1plPVZymxdH6aQh4cI+D5Pyu' +
'yrmgAignYmZwyvD6EBIf1BEowFnWpyvA1MzNzKvzBi8Rk1fWL4+/' +
'Odcifwcdl/TLG9dN+bvopNPJ1fctq16td1nbJ5T40n9Z/6o8PE3z' +
'itZzdPKpdB7fWJsy0ZYc8nOWzZOeFB6hkJonf9xq5/y2FVfr7Dng' +
'JGQymHPOMzh7FmcvzWkTOobAyKtqXlV1/mucqvtU9J5Vx7LzT3w8' +
'gUUZlXKqvmf194OVl9ZZ9F6+aPB78A6d1993e36tel4uAYLUv4vB' +
'62fwDiye1qP/sq+zCKa+rlyG4ANdB9ec942Mfm0ozW02K/ve5onn' +
'NBrPfp8L+NVim4OPQ3VFcX+hYufi8A37RNZl/xf1ffWE')
pipeline = cpp.Pipeline()
def handle_error(caller, event):
if isinstance(event, cpp.LoadExceptionEvent):
self.fail(event.error.message)
pipeline.add_listener(handle_error)
fd = StringIO.StringIO(base64.b64decode(data))
pipeline.load(fd)
self.assertEqual(len(pipeline.modules()), 3)
klo, fbom1, fbom2 = pipeline.modules()
self.assertEqual(klo.object_name.value, 'TargetObjects')
self.assertEqual(klo.enclosing_object_name.value, 'FilteredNuclei')
self.assertEqual(klo.target_name.value, 'TargetObjects')
self.assertEqual(klo.filter_choice.value, F.FI_MAXIMAL_PER_OBJECT)
self.assertEqual(klo.measurements[0].measurement.value, 'AreaShape_Area')
self.assertFalse(klo.wants_outlines.value)
self.assertEqual(fbom1.object_name.value,'LargestObjects')
self.assertEqual(fbom1.target_name.value,'FilteredNuclei')
self.assertEqual(fbom1.filter_choice.value, F.FI_LIMITS)
self.assertEqual(fbom1.measurements[0].measurement.value, 'AreaShape_Perimeter')
self.assertTrue(fbom1.measurements[0].wants_minimum.value)
self.assertEqual(fbom1.measurements[0].min_limit.value, 200)
self.assertFalse(fbom1.measurements[0].wants_maximum.value)
self.assertFalse(fbom1.wants_outlines.value)
self.assertEqual(fbom2.object_name.value,'TargetObjects')
self.assertEqual(fbom2.target_name.value,'FilteredNuclei')
self.assertEqual(fbom2.filter_choice.value, F.FI_LIMITS)
self.assertEqual(fbom2.measurements[0].measurement.value, 'Intensity_MeanIntensity')
self.assertFalse(fbom2.measurements[0].wants_minimum.value)
self.assertTrue(fbom2.measurements[0].wants_maximum.value)
self.assertEqual(fbom2.measurements[0].max_limit.value, .25)
self.assertTrue(fbom2.wants_outlines.value)
self.assertEqual(fbom2.outlines_name.value, 'OutlineObjects')
def test_05_02_load(self):
'''Load a pipeline saved by pyCP'''
data = ('TUFUTEFCIDUuMCBNQVQtZmlsZSBQbGF0Zm9ybTogbnQsIENyZWF0ZW' +
'Qgb246IFdlZCBBcHIgMjIgMTM6MzA6MTQgMjAwOQAAAAAAAAAAAAAA' +
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' +
'AAAAABSU0OAAAAmDoAAAYAAAAIAAAAAgAAAAAAAAAFAAAACAAAAAEA' +
'AAABAAAAAQAAAAgAAABTZXR0aW5ncwUABAAYAAAAAQAAAMAAAABWYX' +
'JpYWJsZVZhbHVlcwAAAAAAAAAAAABWYXJpYWJsZUluZm9UeXBlcwAA' +
'AAAAAABNb2R1bGVOYW1lcwAAAAAAAAAAAAAAAABOdW1iZXJzT2ZWYX' +
'JpYWJsZXMAAAAAAABQaXhlbFNpemUAAAAAAAAAAAAAAAAAAABWYXJp' +
'YWJsZVJldmlzaW9uTnVtYmVycwBNb2R1bGVSZXZpc2lvbk51bWJlcn' +
'MAAABNb2R1bGVOb3RlcwAAAAAAAAAAAAAAAAAOAAAAQBsAAAYAAAAI' +
'AAAAAQAAAAAAAAAFAAAACAAAAAUAAAAWAAAAAQAAAAAAAAAOAAAASA' +
'AAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAAARAAAAAQAAAAAA' +
'AAAQAAAAEQAAAGluZGl2aWR1YWwgaW1hZ2VzAAAAAAAAAA4AAAAwAA' +
'AABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAAMAAAABAAAAAAAA' +
'ABAAAwBETkEADgAAADgAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAA' +
'ABAAAABgAAAAEAAAAAAAAAEAAAAAYAAABOdWNsZWkAAA4AAAAwAAAA' +
'BgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAAMAAAABAAAAAAAAAB' +
'AAAwBETkEADgAAAEAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAAB' +
'AAAADgAAAAEAAAAAAAAAEAAAAA4AAABGaWx0ZXJlZE51Y2xlaQAADg' +
'AAAEAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAAEAAAAAEA' +
'AAAAAAAAEAAAABAAAABUZXh0LUV4YWN0IG1hdGNoDgAAADgAAAAGAA' +
'AACAAAAAQAAAAAAAAABQAAAAgAAAABAAAABgAAAAEAAAAAAAAAEAAA' +
'AAYAAABOdWNsZWkAAA4AAAA4AAAABgAAAAgAAAAEAAAAAAAAAAUAAA' +
'AIAAAAAQAAAAUAAAABAAAAAAAAABAAAAAFAAAAQ2VsbHMAAAAOAAAA' +
'QAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAAAKAAAAAQAAAA' +
'AAAAAQAAAACgAAAERvIG5vdCB1c2UAAAAAAAAOAAAAOAAAAAYAAAAI' +
'AAAABAAAAAAAAAAFAAAACAAAAAEAAAAGAAAAAQAAAAAAAAAQAAAABg' +
'AAAE51Y2xlaQAADgAAADAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgA' +
'AAABAAAAAQAAAAEAAAAAAAAAEAABADMAAAAOAAAAOAAAAAYAAAAIAA' +
'AABAAAAAAAAAAFAAAACAAAAAEAAAAFAAAAAQAAAAAAAAAQAAAABQAA' +
'ADEwLDQwAAAADgAAAEAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAA' +
'ABAAAACwAAAAEAAAAAAAAAEAAAAAsAAABQcm9wYWdhdGlvbgAAAAAA' +
'DgAAADgAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAABgAAAA' +
'EAAAAAAAAAEAAAAAYAAABOdWNsZWkAAA4AAABYAAAABgAAAAgAAAAE' +
'AAAAAAAAAAUAAAAIAAAAAQAAACEAAAABAAAAAAAAABAAAAAhAAAASW' +
'50ZW5zaXR5X0ludGVncmF0ZWRJbnRlbnNpdHlfRE5BAAAAAAAAAA4A' +
'AABAAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAAoAAAABAA' +
'AAAAAAABAAAAAKAAAARG8gbm90IHVzZQAAAAAAAA4AAAAwAAAABgAA' +
'AAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAAMAAAABAAAAAAAAABAAAw' +
'BZZXMADgAAAEAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAA' +
'CQAAAAEAAAAAAAAAEAAAAAkAAABDeXRvcGxhc20AAAAAAAAADgAAAD' +
'AAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAA' +
'AAAACQAAAAAAAAAOAAAAOAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACA' +
'AAAAEAAAAGAAAAAQAAAAAAAAAQAAAABgAAAExpbWl0cwAADgAAADAA' +
'AAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAAAgAAAAEAAAAAAA' +
'AAEAACAE5vAAAOAAAAMAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAA' +
'AAEAAAACAAAAAQAAAAAAAAAQAAIATm8AAA4AAABAAAAABgAAAAgAAA' +
'AEAAAAAAAAAAUAAAAIAAAAAQAAAAsAAAABAAAAAAAAABAAAAALAAAA' +
'T3RzdSBHbG9iYWwAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAA' +
'UAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAG' +
'AAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAABAAAAAEAAAAAAAAAEA' +
'AEAE5vbmUOAAAASAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEA' +
'AAAXAAAAAQAAAAAAAAAQAAAAFwAAAERlZmF1bHQgSW1hZ2UgRGlyZW' +
'N0b3J5AA4AAAAwAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAA' +
'AAMAAAABAAAAAAAAABAAAwBZZXMADgAAADAAAAAGAAAACAAAAAQAAA' +
'AAAAAABQAAAAgAAAABAAAAAwAAAAEAAAAAAAAAEAADADEuMAAOAAAA' +
'MAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAA' +
'AAAAAJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAI' +
'AAAAAQAAAAMAAAABAAAAAAAAABAAAwBZZXMADgAAADAAAAAGAAAACA' +
'AAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAA' +
'AAAOAAAAQAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAAALAA' +
'AAAQAAAAAAAAAQAAAACwAAAE90c3UgR2xvYmFsAAAAAAAOAAAASAAA' +
'AAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAAARAAAAAQAAAAAAAA' +
'AQAAAAEQAAADAuMDAwMDAwLDEuMDAwMDAwAAAAAAAAAA4AAAAwAAAA' +
'BgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAA' +
'kAAAAAAAAADgAAADgAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAAB' +
'AAAABQAAAAEAAAAAAAAAEAAAAAUAAAAzMDAuMAAAAA4AAAAwAAAABg' +
'AAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAAMAAAABAAAAAAAAABAA' +
'AwBZZXMADgAAADAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAA' +
'AAAwAAAAEAAAAAAAAAEAADADEuMAAOAAAAMAAAAAYAAAAIAAAABAAA' +
'AAAAAAAFAAAACAAAAAEAAAAEAAAAAQAAAAAAAAAQAAQAMC4wMQ4AAA' +
'AwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAA' +
'AAAAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAQAAAAAAAAABQAAAA' +
'gAAAABAAAAAwAAAAEAAAAAAAAAEAADAFllcwAOAAAAMAAAAAYAAAAI' +
'AAAABAAAAAAAAAAFAAAACAAAAAEAAAADAAAAAQAAAAAAAAAQAAMAWW' +
'VzAA4AAABIAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAABEA' +
'AAABAAAAAAAAABAAAAARAAAAMC4wMDAwMDAsMS4wMDAwMDAAAAAAAA' +
'AADgAAADAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAAAgAA' +
'AAEAAAAAAAAAEAACADEwAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAA' +
'AFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAA4AAAA' +
'BgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAAUAAAABAAAAAAAAAB' +
'AAAAAFAAAANTAwLjAAAAAOAAAAOAAAAAYAAAAIAAAABAAAAAAAAAAF' +
'AAAACAAAAAEAAAAIAAAAAQAAAAAAAAAQAAAACAAAAENoYW5uZWwyDg' +
'AAADAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAABAAAAAEA' +
'AAAAAAAAEAAEADAuMDEOAAAAMAAAAAYAAAAIAAAABAAAAAAAAAAFAA' +
'AACAAAAAEAAAAEAAAAAQAAAAAAAAAQAAQAMC4wNQ4AAAAwAAAABgAA' +
'AAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAA' +
'AAAAAADgAAADAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAA' +
'AgAAAAEAAAAAAAAAEAACAE5vAAAOAAAAMAAAAAYAAAAIAAAABAAAAA' +
'AAAAAFAAAACAAAAAEAAAADAAAAAQAAAAAAAAAQAAMARE5BAA4AAABA' +
'AAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAAkAAAABAAAAAA' +
'AAABAAAAAJAAAASW50ZW5zaXR5AAAAAAAAAA4AAABIAAAABgAAAAgA' +
'AAAEAAAAAAAAAAUAAAAIAAAAAQAAABEAAAABAAAAAAAAABAAAAARAA' +
'AAU2Vjb25kYXJ5T3V0bGluZXMAAAAAAAAADgAAADAAAAAGAAAACAAA' +
'AAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAA' +
'AOAAAAQAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAAAMAAAA' +
'AQAAAAAAAAAQAAAADAAAAEZpbHRlcmVkQmx1ZQAAAAAOAAAAMAAAAA' +
'YAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAAABAAAAAQAAAAAAAAAQ' +
'AAEAMQAAAA4AAABAAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQ' +
'AAAAkAAAABAAAAAAAAABAAAAAJAAAASW50ZW5zaXR5AAAAAAAAAA4A' +
'AAAwAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAAIAAAABAA' +
'AAAAAAABAAAgBObwAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAA' +
'AAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAOAAAAAYAAA' +
'AIAAAABAAAAAAAAAAFAAAACAAAAAEAAAAFAAAAAQAAAAAAAAAQAAAA' +
'BQAAAENlbGxzAAAADgAAADAAAAAGAAAACAAAAAQAAAAAAAAABQAAAA' +
'gAAAABAAAABAAAAAEAAAAAAAAAEAAEAE5vbmUOAAAAMAAAAAYAAAAI' +
'AAAABAAAAAAAAAAFAAAACAAAAAEAAAACAAAAAQAAAAAAAAAQAAIAMT' +
'AAAA4AAAAwAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAAMA' +
'AAABAAAAAAAAABAAAwAwLjAADgAAADAAAAAGAAAACAAAAAYAAAAAAA' +
'AABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAQAAA' +
'AAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAAANAAAAAQAAAAAAAA' +
'AQAAAADQAAAEZpbHRlcmVkQ2VsbHMAAAAOAAAAgAAAAAYAAAAIAAAA' +
'BAAAAAAAAAAFAAAACAAAAAEAAABLAAAAAQAAAAAAAAAQAAAASwAAAF' +
'4oP1A8UGxhdGU+LispXyg/UDxXZWxsUm93PltBLVBdKSg/UDxXZWxs' +
'Q29sdW1uPlswLTldezEsMn0pXyg/UDxTaXRlPlswLTldKQAAAAAADg' +
'AAADAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAAAQAAAAEA' +
'AAAAAAAAEAABADcAAAAOAAAAMAAAAAYAAAAIAAAABAAAAAAAAAAFAA' +
'AACAAAAAEAAAAEAAAAAQAAAAAAAAAQAAQATm9uZQ4AAAAwAAAABgAA' +
'AAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAA' +
'AAAAAADgAAADAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAA' +
'AgAAAAEAAAAAAAAAEAACAE5vAAAOAAAAaAAAAAYAAAAIAAAABAAAAA' +
'AAAAAFAAAACAAAAAEAAAA4AAAAAQAAAAAAAAAQAAAAOAAAACg/UDxZ' +
'ZWFyPlswLTldezR9KS0oP1A8TW9udGg+WzAtOV17Mn0pLSg/UDxEYX' +
'k+WzAtOV17Mn0pDgAAADAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgA' +
'AAABAAAAAwAAAAEAAAAAAAAAEAADAFllcwAOAAAAMAAAAAYAAAAIAA' +
'AABAAAAAAAAAAFAAAACAAAAAEAAAACAAAAAQAAAAAAAAAQAAIATm8A' +
'AA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAA' +
'ABAAAAAAAAAAkAAAAAAAAADgAAAEgAAAAGAAAACAAAAAQAAAAAAAAA' +
'BQAAAAgAAAABAAAAFQAAAAEAAAAAAAAAEAAAABUAAABPdXRsaW5lc0' +
'ZpbHRlcmVkR3JlZW4AAAAOAAAAOAAAAAYAAAAIAAAABAAAAAAAAAAF' +
'AAAACAAAAAEAAAAIAAAAAQAAAAAAAAAQAAAACAAAAENoYW5uZWwxDg' +
'AAAEAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAACgAAAAEA' +
'AAAAAAAAEAAAAAoAAABEbyBub3QgdXNlAAAAAAAADgAAADAAAAAGAA' +
'AACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAA' +
'AAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAA' +
'AAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAA' +
'AAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAAE' +
'AAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAACQAAAAEAAAAA' +
'AAAAEAAAAAkAAABDeXRvcGxhc20AAAAAAAAADgAAADAAAAAGAAAACA' +
'AAAAQAAAAAAAAABQAAAAgAAAABAAAAAwAAAAEAAAAAAAAAEAADAFll' +
'cwAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAA' +
'AAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAA' +
'AAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAA' +
'AGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAA' +
'CQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAA' +
'EAAAABAAAAAQAAAAAAAAAQAAEAMgAAAA4AAAAwAAAABgAAAAgAAAAE' +
'AAAAAAAAAAUAAAAIAAAAAQAAAAMAAAABAAAAAAAAABAAAwBZZXMADg' +
'AAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEA' +
'AAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAA' +
'AACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAA' +
'AAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAA' +
'AAAAAADgAAADAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAA' +
'BAAAAAEAAAAAAAAAEAAEAE5vbmUOAAAAMAAAAAYAAAAIAAAABAAAAA' +
'AAAAAFAAAACAAAAAEAAAADAAAAAQAAAAAAAAAQAAMAWWVzAA4AAAAw' +
'AAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAA' +
'AAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgA' +
'AAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAA' +
'AABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAA' +
'AA4AAACAAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAEsAAA' +
'ABAAAAAAAAABAAAABLAAAAXig/UDxQbGF0ZT4uKylfKD9QPFdlbGxS' +
'b3c+W0EtUF0pKD9QPFdlbGxDb2x1bW4+WzAtOV17MSwyfSlfKD9QPF' +
'NpdGU+WzAtOV0pAAAAAAAOAAAAMAAAAAYAAAAIAAAABAAAAAAAAAAF' +
'AAAACAAAAAEAAAADAAAAAQAAAAAAAAAQAAMAMC4wAA4AAAAwAAAABg' +
'AAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkA' +
'AAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAA' +
'AAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAA' +
'AAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAA' +
'BoAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAADgAAAABAAAA' +
'AAAAABAAAAA4AAAAKD9QPFllYXI+WzAtOV17NH0pLSg/UDxNb250aD' +
'5bMC05XXsyfSktKD9QPERheT5bMC05XXsyfSkOAAAAMAAAAAYAAAAI' +
'AAAABAAAAAAAAAAFAAAACAAAAAEAAAAEAAAAAQAAAAAAAAAQAAQATm' +
'9uZQ4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAA' +
'AAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAA' +
'AABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAA' +
'AAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAA' +
'AJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAA' +
'AAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAA' +
'QAAAAAAAAABQAAAAgAAAABAAAAAgAAAAEAAAAAAAAAEAACAE5vAAAO' +
'AAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQ' +
'AAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUA' +
'AAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAA' +
'AACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAA' +
'AAAAAAAOAAAAqBkAAAYAAAAIAAAAAQAAAAAAAAAFAAAACAAAAAUAAA' +
'AWAAAAAQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAA' +
'CAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAABAAAAABgAAAA' +
'gAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAAoAAAABAAAAAAAAABAAAAAK' +
'AAAAaW1hZ2Vncm91cAAAAAAAAA4AAABAAAAABgAAAAgAAAAEAAAAAA' +
'AAAAUAAAAIAAAAAQAAAAsAAAABAAAAAAAAABAAAAALAAAAb2JqZWN0' +
'Z3JvdXAAAAAAAA4AAABAAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAA' +
'AAAQAAAAoAAAABAAAAAAAAABAAAAAKAAAAaW1hZ2Vncm91cAAAAAAA' +
'AA4AAABIAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAABEAAA' +
'ABAAAAAAAAABAAAAARAAAAb2JqZWN0Z3JvdXAgaW5kZXAAAAAAAAAA' +
'DgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAA' +
'EAAAAAAAAACQAAAAAAAAAOAAAASAAAAAYAAAAIAAAABAAAAAAAAAAF' +
'AAAACAAAAAEAAAARAAAAAQAAAAAAAAAQAAAAEQAAAG9iamVjdGdyb3' +
'VwIGluZGVwAAAAAAAAAA4AAABIAAAABgAAAAgAAAAEAAAAAAAAAAUA' +
'AAAIAAAAAQAAABEAAAABAAAAAAAAABAAAAARAAAAb2JqZWN0Z3JvdX' +
'AgaW5kZXAAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAA' +
'AAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAQAAAAAYAAA' +
'AIAAAABAAAAAAAAAAFAAAACAAAAAEAAAALAAAAAQAAAAAAAAAQAAAA' +
'CwAAAG9iamVjdGdyb3VwAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAA' +
'AAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAw' +
'AAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAA' +
'AAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgA' +
'AAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAQAAAAAYAAAAIAA' +
'AABAAAAAAAAAAFAAAACAAAAAEAAAALAAAAAQAAAAAAAAAQAAAACwAA' +
'AG9iamVjdGdyb3VwAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAA' +
'AFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAA' +
'BgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAA' +
'kAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAA' +
'AAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAQAAAAAYAAAAIAAAABA' +
'AAAAAAAAAFAAAACAAAAAEAAAAKAAAAAQAAAAAAAAAQAAAACgAAAGlt' +
'YWdlZ3JvdXAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAA' +
'AACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAA' +
'AAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAA' +
'AAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAA' +
'AAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAA' +
'AAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAw' +
'AAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAA' +
'AAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgA' +
'AAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAQAAAAAYAAAAIAA' +
'AABAAAAAAAAAAFAAAACAAAAAEAAAALAAAAAQAAAAAAAAAQAAAACwAA' +
'AG9iamVjdGdyb3VwAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAA' +
'AFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAA' +
'BgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAA' +
'kAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAA' +
'AAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABg' +
'AAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4A' +
'AAAwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAA' +
'AAAAAAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAA' +
'AAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAA' +
'AIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAA' +
'AAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAA' +
'AAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAA' +
'AAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAMA' +
'AAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAA' +
'AAAJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAA' +
'AAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAAAACAAA' +
'AAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAA' +
'AOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAA' +
'AQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAA' +
'UAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAG' +
'AAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQ' +
'AAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAA' +
'AAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAA' +
'AAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAA' +
'ADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAA' +
'AAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAA' +
'CAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAAAA' +
'gAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAA' +
'AAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAA' +
'AAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAA' +
'AAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAA' +
'AABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAA' +
'AAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAA' +
'AAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAA' +
'BgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA' +
'4AAABAAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAABAAAAAB' +
'AAAAAAAAABAAAAAQAAAAaW1hZ2Vncm91cCBpbmRlcA4AAAAwAAAABg' +
'AAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkA' +
'AAAAAAAADgAAAEgAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAA' +
'AAEgAAAAEAAAAAAAAAEAAAABIAAABvdXRsaW5lZ3JvdXAgaW5kZXAA' +
'AAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAA' +
'AAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAABAAAAABgAAAAgAAAAEAAAA' +
'AAAAAAUAAAAIAAAAAQAAABAAAAABAAAAAAAAABAAAAAQAAAAaW1hZ2' +
'Vncm91cCBpbmRlcA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAI' +
'AAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAAAACA' +
'AAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAA' +
'AAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAA' +
'AAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAA' +
'AAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAAEAAAA' +
'AGAAAACAAAAAQAAAAAAAAABQAAAAgAAAABAAAACwAAAAEAAAAAAAAA' +
'EAAAAAsAAABvYmplY3Rncm91cAAAAAAADgAAADAAAAAGAAAACAAAAA' +
'YAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAO' +
'AAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQ' +
'AAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUA' +
'AAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAA' +
'AACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAA' +
'AAAAAAAOAAAASAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAA' +
'ARAAAAAQAAAAAAAAAQAAAAEQAAAG9iamVjdGdyb3VwIGluZGVwAAAA' +
'AAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAA' +
'AAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAA' +
'AAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAQA' +
'AAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAAAKAAAAAQAAAAAA' +
'AAAQAAAACgAAAGltYWdlZ3JvdXAAAAAAAAAOAAAAMAAAAAYAAAAIAA' +
'AABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAA' +
'AA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAA' +
'ABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAA' +
'BQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAAAA' +
'YAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJ' +
'AAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAA' +
'AAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAYA' +
'AAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAA' +
'AAQAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAAAQAAAAAQAA' +
'AAAAAAAQAAAAEAAAAGltYWdlZ3JvdXAgaW5kZXAOAAAAMAAAAAYAAA' +
'AIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAA' +
'AAAAAA4AAABIAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAAAB' +
'IAAAABAAAAAAAAABAAAAASAAAAb3V0bGluZWdyb3VwIGluZGVwAAAA' +
'AAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAA' +
'AAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAA' +
'AAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAA' +
'AABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAA' +
'AAkAAAAAAAAADgAAAEAAAAAGAAAACAAAAAQAAAAAAAAABQAAAAgAAA' +
'ABAAAAEAAAAAEAAAAAAAAAEAAAABAAAABpbWFnZWdyb3VwIGluZGVw' +
'DgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAA' +
'EAAAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAF' +
'AAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAABg' +
'AAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkA' +
'AAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAA' +
'AAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAA' +
'AAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAA' +
'AwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAA' +
'AAAAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAA' +
'gAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAI' +
'AAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAA' +
'AAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAA' +
'AAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAA' +
'AABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAMAAA' +
'AAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQAAAAAAAA' +
'AJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUAAAAIAAAA' +
'AAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAAAACAAAAA' +
'YAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAO' +
'AAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAAAAAAAAAQ' +
'AAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAAAAUA' +
'AAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAAAGAA' +
'AACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAACQAA' +
'AAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAAAAAA' +
'AAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAA' +
'AAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAAD' +
'AAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAA' +
'AAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACA' +
'AAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAABAAAAABgAAAAgA' +
'AAAEAAAAAAAAAAUAAAAIAAAAAQAAAAoAAAABAAAAAAAAABAAAAAKAA' +
'AAaW1hZ2Vncm91cAAAAAAAAA4AAAAwAAAABgAAAAgAAAAGAAAAAAAA' +
'AAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADgAAADAAAA' +
'AGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEAAAAAAAAA' +
'CQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAAAACAAAAA' +
'AAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAAAAgAAAAG' +
'AAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAADg' +
'AAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAAAAAAAAEA' +
'AAAAAAAACQAAAAAAAAAOAAAAMAAAAAYAAAAIAAAABgAAAAAAAAAFAA' +
'AACAAAAAAAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAA4AAAAwAAAABgAA' +
'AAgAAAAGAAAAAAAAAAUAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAkAAA' +
'AAAAAADgAAADAAAAAGAAAACAAAAAYAAAAAAAAABQAAAAgAAAAAAAAA' +
'AAAAAAEAAAAAAAAACQAAAAAAAAAOAAAAeAIAAAYAAAAIAAAAAQAAAA' +
'AAAAAFAAAACAAAAAEAAAAFAAAAAQAAAAAAAAAOAAAAYAAAAAYAAAAI' +
'AAAABAAAAAAAAAAFAAAACAAAAAEAAAAqAAAAAQAAAAAAAAAQAAAAKg' +
'AAAGNlbGxwcm9maWxlci5tb2R1bGVzLmxvYWRpbWFnZXMuTG9hZElt' +
'YWdlcwAAAAAAAA4AAABwAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAA' +
'AAAQAAAEAAAAABAAAAAAAAABAAAABAAAAAY2VsbHByb2ZpbGVyLm1v' +
'ZHVsZXMuaWRlbnRpZnlwcmltYXV0b21hdGljLklkZW50aWZ5UHJpbU' +
'F1dG9tYXRpYw4AAABoAAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAA' +
'AQAAADgAAAABAAAAAAAAABAAAAA4AAAAY2VsbHByb2ZpbGVyLm1vZH' +
'VsZXMuaWRlbnRpZnlzZWNvbmRhcnkuSWRlbnRpZnlTZWNvbmRhcnkO' +
'AAAAeAAAAAYAAAAIAAAABAAAAAAAAAAFAAAACAAAAAEAAABCAAAAAQ' +
'AAAAAAAAAQAAAAQgAAAGNlbGxwcm9maWxlci5tb2R1bGVzLm1lYXN1' +
'cmVvYmplY3RpbnRlbnNpdHkuTWVhc3VyZU9iamVjdEludGVuc2l0eQ' +
'AAAAAAAA4AAAB4AAAABgAAAAgAAAAEAAAAAAAAAAUAAAAIAAAAAQAA' +
'AEgAAAABAAAAAAAAABAAAABIAAAAY2VsbHByb2ZpbGVyLm1vZHVsZX' +
'MuZmlsdGVyYnlvYmplY3RtZWFzdXJlbWVudC5GaWx0ZXJCeU9iamVj' +
'dE1lYXN1cmVtZW50DgAAADgAAAAGAAAACAAAAAkAAAAAAAAABQAAAA' +
'gAAAABAAAABQAAAAEAAAAAAAAAAgAAAAUAAAAVFg8DDwAAAA4AAAAw' +
'AAAABgAAAAgAAAAGAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAAJAAAACA' +
'AAAAAAAAAAAPA/DgAAADgAAAAGAAAACAAAAAkAAAAAAAAABQAAAAgA' +
'AAABAAAABQAAAAEAAAAAAAAAAgAAAAUAAAACAQECAQAAAA4AAABAAA' +
'AABgAAAAgAAAALAAAAAAAAAAUAAAAIAAAAAQAAAAUAAAABAAAAAAAA' +
'AAQAAAAKAAAAAAAAAAAAAAAAAAAAAAAAAA4AAAAYAQAABgAAAAgAAA' +
'ABAAAAAAAAAAUAAAAIAAAAAQAAAAUAAAABAAAAAAAAAA4AAAAoAAAA' +
'BgAAAAgAAAABAAAAAAAAAAUAAAAIAAAAAAAAAAEAAAABAAAAAAAAAA' +
'4AAAAoAAAABgAAAAgAAAABAAAAAAAAAAUAAAAIAAAAAAAAAAEAAAAB' +
'AAAAAAAAAA4AAAAoAAAABgAAAAgAAAABAAAAAAAAAAUAAAAIAAAAAA' +
'AAAAEAAAABAAAAAAAAAA4AAAAoAAAABgAAAAgAAAABAAAAAAAAAAUA' +
'AAAIAAAAAAAAAAEAAAABAAAAAAAAAA4AAAAoAAAABgAAAAgAAAABAA' +
'AAAAAAAAUAAAAIAAAAAAAAAAEAAAABAAAAAAAAAA==')
pipeline = cpp.Pipeline()
def handle_error(caller, event):
if isinstance(event, cpp.LoadExceptionEvent):
self.fail(event.error.message)
pipeline.add_listener(handle_error)
fd = StringIO.StringIO(base64.b64decode(data))
pipeline.load(fd)
module = pipeline.modules()[4]
self.assertEqual(module.target_name.value, 'FilteredNuclei')
self.assertEqual(module.object_name.value, 'Nuclei')
self.assertEqual(module.measurements[0].measurement.value, 'Intensity_IntegratedIntensity_DNA')
self.assertEqual(module.filter_choice.value, F.FI_LIMITS)
self.assertTrue(module.measurements[0].wants_minimum.value)
self.assertEqual(module.measurements[0].min_limit.value, 300)
self.assertTrue(module.measurements[0].wants_maximum.value)
self.assertEqual(module.measurements[0].max_limit.value, 500)
self.assertEqual(len(module.additional_objects), 1)
self.assertEqual(module.additional_objects[0].object_name.value, 'Cells')
self.assertEqual(module.additional_objects[0].target_name.value, 'FilteredCells')
def test_05_03_test_load_v2(self):
data = ('eJztW91u2zYUll0naNYtS4FdDBsK6KIXTRCrclKjbTa0tuNlMRA7Rhw0G9Is'
'ky065kCJhkSl8Ya94x5jl32EkZZsSZxcyYr/hEqAIB2K3/nOOTw6ov7q5fOT'
'ckUsSrJYL5/nuxABsYkU0sWGdiDqZFc8NIBCgCpi/UC8oNsq6Ih7+2LhxcF+'
'8UAuinuy/FqIt2Rq9U26+ecHQVin24d0zTqH1hw541mZ3AKEQP3GXBNywrdO'
'+0e6vlMMqLQReKcgC5guxai9pnfx+aA/PlTHqoVAQ9G8nenSsLQ2MMzT7gjo'
'HG7CO4Ba8E/AuTDqdgZuoQmx7uAd/XzrmBcTjrfVwx+ODGoOp7+ikE6vRegI'
'+NtZ3I6/d+OW4eKWo+sTT/uwv+D2zwXE+bGn/5YjQ12Ft1C1FCRCTbkZW830'
'ySH6Hvj0PRCqjfIQ9yoEt87ZweSG1UEA2rylEPyXHJ7JRxARYAC1QtNjZH+Y'
'ni1OD1vPwR3J/3SndIiosaGZhT/TxvFXOgqL4M348Blh34lbGO8ax8vkgrz7'
'Qhb+z7vO4UfLCL/hbKOM1ybHy+QyrV6tntIH12wvmp4vOD1MrmJRx0S0TODa'
'E3fc4vofZ7xPoAaJGc3erA+fFRp4drhp/Azjy/n4cpRPB1Hq2zdcfJhcBV3F'
'QkSsseImHmGkAiOqHbMe30XzlUL4HnHxYvIpMS3xZ4TbCgq0e57+ypI8szhF'
'wRUC+BY9ntOcb3Gu7zSmw2W34OxMsH+efgfFeZb1ia8X1OfCPP2bdT0shfBt'
'CP5xZXJNJ0A3IRlMsHuW/LOabwX5cdhTdB2gvfwKxSPO/OfMsm8l7jNvnjbv'
'CvJ8x/0rzk8mH1omwZrYHV1I48wzCzFxLwNw8zyvpef3m68sc34xDa4XYudL'
'wZ8HTP7t2dvmj+xBAngj7WxfM+kCIPTmspxvXl3K+ddXf+39vX1tsgMtSHsN'
'27Yj5d3XHB+Tmwa9PTUGpxZBUF9SnH4JsfspZzeTpZ3L9++fX7EwVJ1gjRvO'
'LJ3JT4PsWuV8Scr8N/Vvuf4FzeeT7F+UOp9k/+Z5n78K/n1+519x6XbO+3nF'
'+QcsdpBims6T6yT5exzib9D9/AWANz32uuaWvZjQO8CjLyl+l0L8Dno+e4QN'
'cGNgS1eT5+/nXpf4+7jikux89Z2Ly3C4oPdpi8zv4cs3luD96HqC6iFu/wE6'
'xFUUt8549IhQV0F/BnFZFTtKIXZEjWsSz88UlzxcSUjzNcWluBSX4hZRT7cE'
'fz1lqzs/s6chSfI3jVOKS3GTcSUhzfMUl+JS3GrUm6jPh5Lib4pLcSkuxaW4'
'FJdk3MOsi+PfHzHZ+30I6/+7hyfoOr/j6b/lyB2AUN/A7D89Q9KGP5OZEsKK'
'av+dJZ3Q3ZrnRy3G0w/hKXE8pUk8UAU6gd1Bn31sZxGsKQR2pJrTyj7BK49a'
'Ge9dCG+F461M4tWAYloGsJ9ZKwaV2B89Ut1uPh02j3/0cf0O4z/m+I8n8XeH'
'HzO3B7YBjjUa9VqyP3OuDGwb6u4RPo82Avi9+ZCl0uMn2c1P5Z8g+PPOzceP'
'b+Pw5XKZ7PC9pwf3KASXE/znAcP/K0yX988+0X/k4yr3nzbOGbrcN04uT25s'
'k61/Nfv/B37E+Yc=')
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO.StringIO(zlib.decompress(base64.b64decode(data))))
module = pipeline.modules()[-1]
self.assertTrue(isinstance(module, F.FilterByObjectMeasurement))
self.assertEqual(module.target_name.value, 'FilteredBlue')
self.assertEqual(module.object_name.value, 'Nuclei')
self.assertEqual(module.mode, F.MODE_RULES)
self.assertEqual(module.rules_file_name, 'rules.txt')
self.assertEqual(module.rules_directory.dir_choice,
cpprefs.DEFAULT_INPUT_SUBFOLDER_NAME )
self.assertEqual(module.rules_directory.custom_path, "./")
def test_05_04_load_matlab_v7(self):
data = ('eJzzdQzxcXRSMNUzUPB1DNFNy8xJ1VEIyEksScsvyrVSCHAO9/TTUXAuSk0s'
'SU1RyM+zUggH0l6JeQoGZgqGhlbGllZGhgpGBoYGCiQDBkZPX34GBoafTAwM'
'FXPezridd9lA4vhdvdhGR1OzRgZO0+kxPZO4Wso8Ay1DFmUeXeYZs2vtbUnV'
'wqkWwYdN/Q8FF5x8/IGjoP3usrXZl57dOCwwbc2L4j9///yUlw9mYvhTxXLA'
'rvpegiW/2soHty0Vi3/zBHatbQxm4Kp4MP1/wk+mWxJlPCWMvOKWCdOWPeVd'
'7X9u78M3oZO/xDC/vCCivXnJ7jT2m36r7Uu/xG//yCGZ/oObqTsz6q3yrkN8'
'atPev//89FLk7havt9Mi/sXNej1hcklAhJ3Yqcq/4Qlx8ZwX3gasqH/xvNhk'
'kf/9noTpq9SW3PU+d9HJopp7jm3A9WyF3viIO6qaeXn7j7K5mr16mXfdQcIl'
'Kz7ygcaCuGzzST82ms2N/RK97H1mR7zjpem3Q98+aWVb1Ls6epL99tcuWzlC'
'Y4/PVwi0MFfl2y4t5jqteSX7ouJFx4TDz/vn9OUF9VsLtd/2ZdENnOz7amL8'
'gyeNftt+2y5aJ3891aZl6/zoB08cRHPK6uQ8ZPK2r3i86j3PpUa3FL+wjyyF'
'JYtm3Ti0LHvZ45SftZe+sfdtuyxfuSdb/eqVvyedrj8X/PukNmR1xlWLj+y9'
'5Zc336w8u/KC4NmYdgO/K38NV64Tvf++tuN+rCZfXvLq9vUF51vbunwtZsxN'
'u35x/Yq/3rviWY8GF2smsqfc8RU8b7T3uqi/0LsDbfMut7IJWXh+P7rj15rq'
'+l7p87vOW2d+3TjvoM9+/6jYlHPnH736vXpHn8rV0j03eHIe8ZVPn5xiqzr3'
'/Arv4wpT167NkHBevqDW+8HbmpjZj1j+8s+s3+/5/dI88aZHv8Irbl/S75l8'
'9c+/9f9f/Tg0L/K4fW3b2mh3oem3jyxl23Yhnjt8zr+Y7893le4+o3v//dPf'
'EZcq/zQeiHPekeDy8rpt5+G3bvyRIt/3buv1/agj21oe/6hwd+eCx7dWmM7T'
'O/S09fMtxT3tQv23J/+/vX4v54Xls3XSHO0/ztkmv6h8XqT78aPNJv8NH20L'
'3zFXv2j/ZYm6rvh/m/5s/fP24/tPP2Q4H/WuVtd+5X953bX/zPOlJoQDAOEw'
'rqc=')
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO.StringIO(zlib.decompress(base64.b64decode(data))))
self.assertEqual(len(pipeline.modules()), 4)
module = pipeline.modules()[-1]
self.assertTrue(isinstance(module, F.FilterObjects))
self.assertEqual(module.object_name, "Nucs")
self.assertEqual(module.target_name, "FilteredNuclei")
self.assertEqual(module.mode, F.MODE_RULES)
self.assertEqual(module.rules_directory.dir_choice,
cpprefs.DEFAULT_OUTPUT_FOLDER_NAME)
self.assertEqual(module.rules_file_name, "myrules.txt")
self.assertEqual(module.measurements[0].measurement, "Intensity_MeanIntensity_DNA_1")
def test_05_05_load_v3(self):
data = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:8973
FilterObjects:[module_num:1|svn_version:\'8955\'|variable_revision_number:3|show_window:True|notes:\x5B\x5D]
Name the output objects:FilteredThings
Select the object to filter:Things
Select the measurement to filter by:Intensity_MeanIntensity_DNA
Select the filtering method:Minimal
What did you call the objects that contain the filtered objects?:Nuclei
Filter using a minimum measurement value?:No
Minimum value:0
Filter using a maximum measurement value?:No
Maximum value:1
Retain the outlines of filtered objects for use later in the pipeline (for example, in SaveImages)?:No
Name the outline image:None
Filter using classifier rules or measurements?:Measurements
Rules file location:Default output folder
Rules folder name:.
Rules file name:myrules.txt
"""
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO.StringIO(data))
self.assertEqual(len(pipeline.modules()), 1)
module = pipeline.modules()[-1]
self.assertTrue(isinstance(module, F.FilterObjects))
self.assertEqual(module.object_name, "Things")
self.assertEqual(module.target_name, "FilteredThings")
self.assertEqual(module.mode, F.MODE_MEASUREMENTS)
self.assertEqual(module.rules_directory.dir_choice,
cpprefs.DEFAULT_OUTPUT_FOLDER_NAME)
self.assertEqual(module.rules_file_name, "myrules.txt")
self.assertEqual(module.measurements[0].measurement, "Intensity_MeanIntensity_DNA")
self.assertEqual(module.filter_choice, F.FI_MINIMAL)
def test_05_06_load_v4(self):
data = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:9025
LoadImages:[module_num:1|svn_version:\'9020\'|variable_revision_number:4|show_window:True|notes:\x5B\x5D]
What type of files are you loading?:individual images
How do you want to load these files?:Text-Exact match
How many images are there in each group?:3
Type the text that the excluded images have in common:Do not use
Analyze all subfolders within the selected folder?:No
Image location:Default Image Folder
Enter the full path to the images:
Do you want to check image sets for missing or duplicate files?:Yes
Do you want to group image sets by metadata?:No
Do you want to exclude certain files?:No
What metadata fields do you want to group by?:
Type the text that these images have in common (case-sensitive):
What do you want to call this image in CellProfiler?:DNA
What is the position of this image in each group?:1
Do you want to extract metadata from the file name, the subfolder path or both?:None
Type the regular expression that finds metadata in the file name\x3A:^(?P<Plate>.*)_(?P<Well>\x5BA-P\x5D\x5B0-9\x5D{2})_s(?P<Site>\x5B0-9\x5D)
Type the regular expression that finds metadata in the subfolder path\x3A:.*\x5B\\\\/\x5D(?P<Date>.*)\x5B\\\\/\x5D(?P<Run>.*)$
IdentifyPrimaryObjects:[module_num:2|svn_version:\'9010\'|variable_revision_number:5|show_window:True|notes:\x5B\x5D]
Select the input image:DNA
Name the identified primary objects:MyObjects
Typical diameter of objects, in pixel units (Min,Max)\x3A:10,40
Discard objects outside the diameter range?:Yes
Try to merge too small objects with nearby larger objects?:No
Discard objects touching the border of the image?:Yes
Select the thresholding method:Otsu Global
Threshold correction factor:1.0
Lower and upper bounds on threshold\x3A:0.000000,1.000000
Approximate fraction of image covered by objects?:0.01
Method to distinguish clumped objects:Intensity
Method to draw dividing lines between clumped objects:Intensity
Size of smoothing filter\x3A:10
Suppress local maxima within this distance\x3A:7
Speed up by using lower-resolution image to find local maxima?:Yes
Name the outline image:PrimaryOutlines
Fill holes in identified objects?:Yes
Automatically calculate size of smoothing filter?:Yes
Automatically calculate minimum size of local maxima?:Yes
Enter manual threshold\x3A:0.0
Select binary image\x3A:None
Save outlines of the identified objects?:No
Calculate the Laplacian of Gaussian threshold automatically?:Yes
Enter Laplacian of Gaussian threshold\x3A:0.5
Two-class or three-class thresholding?:Two classes
Minimize the weighted variance or the entropy?:Weighted variance
Assign pixels in the middle intensity class to the foreground or the background?:Foreground
Automatically calculate the size of objects for the Laplacian of Gaussian filter?:Yes
Enter LoG filter diameter\x3A :5
How do you want to handle images with large numbers of objects?:No action
Maximum # of objects\x3A:500
IdentifySecondaryObjects:[module_num:3|svn_version:\'9007\'|variable_revision_number:3|show_window:True|notes:\x5B\x5D]
Select the input objects:MyObjects
Name the identified objects:Cells
Select the method to identify the secondary objects:Propagation
Select the input image:DNA
Select the thresholding method:Otsu Global
Threshold correction factor:1.0
Lower and upper bounds on threshold\x3A:0.000000,1.000000
Approximate fraction of image covered by objects?:0.01
Number of pixels by which to expand the primary objects\x3A:10
Regularization factor\x3A:0.05
Name the outline image:SecondaryOutlines
Enter manual threshold\x3A:0.0
Select binary image\x3A:None
Save outlines of the identified objects?:No
Two-class or three-class thresholding?:Two classes
Minimize the weighted variance or the entropy?:Weighted variance
Assign pixels in the middle intensity class to the foreground or the background?:Foreground
Do you want to discard objects that touch the edge of the image?:No
Do you want to discard associated primary objects?:No
New primary objects name\x3A:FilteredNuclei
IdentifyTertiaryObjects:[module_num:4|svn_version:\'8957\'|variable_revision_number:1|show_window:True|notes:\x5B\x5D]
Select the larger identified objects:Cells
Select the smaller identified objects:MyObjects
Name the identified subregion objects:Cytoplasm
Name the outline image:CytoplasmOutlines
Retain the outlines for use later in the pipeline (for example, in SaveImages)?:No
MeasureObjectIntensity:[module_num:5|svn_version:\'9000\'|variable_revision_number:3|show_window:True|notes:\x5B\x5D]
Hidden:1
Select an image to use for intensity measurements:DNA
Select objects to measure:MyObjects
FilterObjects:[module_num:6|svn_version:\'9000\'|variable_revision_number:4|show_window:True|notes:\x5B\x5D]
Name the output objects:MyFilteredObjects
Select the object to filter:MyObjects
Filter using classifier rules or measurements?:Measurements
Select the filtering method:Limits
What did you call the objects that contain the filtered objects?:None
Retain the outlines of filtered objects for use later in the pipeline (for example, in SaveImages)?:No
Name the outline image:FilteredObjects
Rules file location:Default input folder
Rules folder name:./rules
Rules file name:myrules.txt
Hidden:2
Hidden:2
Select the measurement to filter by:Intensity_LowerQuartileIntensity_DNA
Filter using a minimum measurement value?:Yes
Minimum value:0.2
Filter using a maximum measurement value?:No
Maximum value:1.5
Select the measurement to filter by:Intensity_UpperQuartileIntensity_DNA
Filter using a minimum measurement value?:No
Minimum value:0.9
Filter using a maximum measurement value?:Yes
Maximum value:1.8
Select additional object to relabel:Cells
Name the relabeled objects:FilteredCells
Save outlines of relabeled objects?:No
Name the outline image:OutlinesFilteredCells
Select additional object to relabel:Cytoplasm
Name the relabeled objects:FilteredCytoplasm
Save outlines of relabeled objects?:No
Name the outline image:OutlinesFilteredCytoplasm
"""
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO.StringIO(data))
self.assertEqual(len(pipeline.modules()), 6)
module = pipeline.modules()[-1]
self.assertTrue(isinstance(module, F.FilterObjects))
self.assertEqual(module.target_name, "MyFilteredObjects")
self.assertEqual(module.object_name, "MyObjects")
self.assertEqual(module.mode, F.MODE_MEASUREMENTS)
self.assertEqual(module.filter_choice, F.FI_LIMITS)
self.assertEqual(module.rules_directory.dir_choice, cpprefs.DEFAULT_INPUT_FOLDER_NAME)
self.assertEqual(module.rules_directory.custom_path, "./rules")
self.assertEqual(module.rules_file_name, "myrules.txt")
self.assertEqual(module.measurement_count.value, 2)
self.assertEqual(module.additional_object_count.value, 2)
self.assertEqual(module.measurements[0].measurement,
"Intensity_LowerQuartileIntensity_DNA")
self.assertTrue(module.measurements[0].wants_minimum)
self.assertFalse(module.measurements[0].wants_maximum)
self.assertAlmostEqual(module.measurements[0].min_limit.value, 0.2)
self.assertAlmostEqual(module.measurements[0].max_limit.value, 1.5)
self.assertEqual(module.measurements[1].measurement,
"Intensity_UpperQuartileIntensity_DNA")
self.assertFalse(module.measurements[1].wants_minimum)
self.assertTrue(module.measurements[1].wants_maximum)
self.assertAlmostEqual(module.measurements[1].min_limit.value, 0.9)
self.assertAlmostEqual(module.measurements[1].max_limit.value, 1.8)
for group, name in zip(module.additional_objects,('Cells','Cytoplasm')):
self.assertEqual(group.object_name, name)
self.assertEqual(group.target_name, "Filtered%s" % name)
self.assertEqual(group.outlines_name, "OutlinesFiltered%s" % name)
self.assertFalse(group.wants_outlines)
def test_05_07_load_v5(self):
data = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:9025
FilterObjects:[module_num:6|svn_version:\'9000\'|variable_revision_number:5|show_window:True|notes:\x5B\x5D]
Name the output objects:MyFilteredObjects
Select the object to filter:MyObjects
Filter using classifier rules or measurements?:Measurements
Select the filtering method:Limits
What did you call the objects that contain the filtered objects?:None
Retain the outlines of filtered objects for use later in the pipeline (for example, in SaveImages)?:No
Name the outline image:FilteredObjects
Rules file location:Default input folder\x7C./rules
Rules file name:myrules.txt
Hidden:2
Hidden:2
Select the measurement to filter by:Intensity_LowerQuartileIntensity_DNA
Filter using a minimum measurement value?:Yes
Minimum value:0.2
Filter using a maximum measurement value?:No
Maximum value:1.5
Select the measurement to filter by:Intensity_UpperQuartileIntensity_DNA
Filter using a minimum measurement value?:No
Minimum value:0.9
Filter using a maximum measurement value?:Yes
Maximum value:1.8
Select additional object to relabel:Cells
Name the relabeled objects:FilteredCells
Save outlines of relabeled objects?:No
Name the outline image:OutlinesFilteredCells
Select additional object to relabel:Cytoplasm
Name the relabeled objects:FilteredCytoplasm
Save outlines of relabeled objects?:No
Name the outline image:OutlinesFilteredCytoplasm
"""
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO.StringIO(data))
self.assertEqual(len(pipeline.modules()), 1)
module = pipeline.modules()[-1]
self.assertTrue(isinstance(module, F.FilterObjects))
self.assertEqual(module.target_name, "MyFilteredObjects")
self.assertEqual(module.object_name, "MyObjects")
self.assertEqual(module.mode, F.MODE_MEASUREMENTS)
self.assertEqual(module.filter_choice, F.FI_LIMITS)
self.assertEqual(module.rules_directory.dir_choice, cpprefs.DEFAULT_INPUT_FOLDER_NAME)
self.assertEqual(module.rules_directory.custom_path, "./rules")
self.assertEqual(module.rules_file_name, "myrules.txt")
self.assertEqual(module.rules_class, "1")
self.assertEqual(module.measurement_count.value, 2)
self.assertEqual(module.additional_object_count.value, 2)
self.assertEqual(module.measurements[0].measurement,
"Intensity_LowerQuartileIntensity_DNA")
self.assertTrue(module.measurements[0].wants_minimum)
self.assertFalse(module.measurements[0].wants_maximum)
self.assertAlmostEqual(module.measurements[0].min_limit.value, 0.2)
self.assertAlmostEqual(module.measurements[0].max_limit.value, 1.5)
self.assertEqual(module.measurements[1].measurement,
"Intensity_UpperQuartileIntensity_DNA")
self.assertFalse(module.measurements[1].wants_minimum)
self.assertTrue(module.measurements[1].wants_maximum)
self.assertAlmostEqual(module.measurements[1].min_limit.value, 0.9)
self.assertAlmostEqual(module.measurements[1].max_limit.value, 1.8)
for group, name in zip(module.additional_objects,('Cells','Cytoplasm')):
self.assertEqual(group.object_name, name)
self.assertEqual(group.target_name, "Filtered%s" % name)
self.assertEqual(group.outlines_name, "OutlinesFiltered%s" % name)
self.assertFalse(group.wants_outlines)
def test_05_08_load_v6(self):
data = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:9025
FilterObjects:[module_num:1|svn_version:\'9000\'|variable_revision_number:6|show_window:True|notes:\x5B\x5D]
Name the output objects:MyFilteredObjects
Select the object to filter:MyObjects
Filter using classifier rules or measurements?:Measurements
Select the filtering method:Limits
What did you call the objects that contain the filtered objects?:None
Retain the outlines of filtered objects for use later in the pipeline (for example, in SaveImages)?:No
Name the outline image:FilteredObjects
Rules file location:Default input folder\x7C./rules
Rules file name:myrules.txt
Rules class:1
Hidden:2
Hidden:2
Select the measurement to filter by:Intensity_LowerQuartileIntensity_DNA
Filter using a minimum measurement value?:Yes
Minimum value:0.2
Filter using a maximum measurement value?:No
Maximum value:1.5
Select the measurement to filter by:Intensity_UpperQuartileIntensity_DNA
Filter using a minimum measurement value?:No
Minimum value:0.9
Filter using a maximum measurement value?:Yes
Maximum value:1.8
Select additional object to relabel:Cells
Name the relabeled objects:FilteredCells
Save outlines of relabeled objects?:No
Name the outline image:OutlinesFilteredCells
Select additional object to relabel:Cytoplasm
Name the relabeled objects:FilteredCytoplasm
Save outlines of relabeled objects?:No
Name the outline image:OutlinesFilteredCytoplasm
"""
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO.StringIO(data))
self.assertEqual(len(pipeline.modules()), 1)
module = pipeline.modules()[-1]
self.assertTrue(isinstance(module, F.FilterObjects))
self.assertEqual(module.target_name, "MyFilteredObjects")
self.assertEqual(module.object_name, "MyObjects")
self.assertEqual(module.mode, F.MODE_MEASUREMENTS)
self.assertEqual(module.filter_choice, F.FI_LIMITS)
self.assertEqual(module.per_object_assignment, F.PO_BOTH)
self.assertEqual(module.rules_directory.dir_choice, cpprefs.DEFAULT_INPUT_FOLDER_NAME)
self.assertEqual(module.rules_directory.custom_path, "./rules")
self.assertEqual(module.rules_file_name, "myrules.txt")
self.assertEqual(module.rules_class, "1")
self.assertEqual(module.measurement_count.value, 2)
self.assertEqual(module.additional_object_count.value, 2)
self.assertEqual(module.measurements[0].measurement,
"Intensity_LowerQuartileIntensity_DNA")
self.assertTrue(module.measurements[0].wants_minimum)
self.assertFalse(module.measurements[0].wants_maximum)
self.assertAlmostEqual(module.measurements[0].min_limit.value, 0.2)
self.assertAlmostEqual(module.measurements[0].max_limit.value, 1.5)
self.assertEqual(module.measurements[1].measurement,
"Intensity_UpperQuartileIntensity_DNA")
self.assertFalse(module.measurements[1].wants_minimum)
self.assertTrue(module.measurements[1].wants_maximum)
self.assertAlmostEqual(module.measurements[1].min_limit.value, 0.9)
self.assertAlmostEqual(module.measurements[1].max_limit.value, 1.8)
for group, name in zip(module.additional_objects,('Cells','Cytoplasm')):
self.assertEqual(group.object_name, name)
self.assertEqual(group.target_name, "Filtered%s" % name)
self.assertEqual(group.outlines_name, "OutlinesFiltered%s" % name)
self.assertFalse(group.wants_outlines)
def test_05_09_load_v7(self):
data = r"""CellProfiler Pipeline: http://www.cellprofiler.org
Version:1
SVNRevision:9025
FilterObjects:[module_num:1|svn_version:\'9000\'|variable_revision_number:7|show_window:True|notes:\x5B\x5D]
Name the output objects:MyFilteredObjects
Select the object to filter:MyObjects
Filter using classifier rules or measurements?:Measurements
Select the filtering method:Limits
What did you call the objects that contain the filtered objects?:None
Retain the outlines of filtered objects for use later in the pipeline (for example, in SaveImages)?:No
Name the outline image:FilteredObjects
Rules file location:Default input folder\x7C./rules
Rules file name:myrules.txt
Rules class:1
Hidden:2
Hidden:2
Assign overlapping child to:Parent with most overlap
Select the measurement to filter by:Intensity_LowerQuartileIntensity_DNA
Filter using a minimum measurement value?:Yes
Minimum value:0.2
Filter using a maximum measurement value?:No
Maximum value:1.5
Select the measurement to filter by:Intensity_UpperQuartileIntensity_DNA
Filter using a minimum measurement value?:No
Minimum value:0.9
Filter using a maximum measurement value?:Yes
Maximum value:1.8
Select additional object to relabel:Cells
Name the relabeled objects:FilteredCells
Save outlines of relabeled objects?:No
Name the outline image:OutlinesFilteredCells
Select additional object to relabel:Cytoplasm
Name the relabeled objects:FilteredCytoplasm
Save outlines of relabeled objects?:No
Name the outline image:OutlinesFilteredCytoplasm
"""
pipeline = cpp.Pipeline()
def callback(caller,event):
self.assertFalse(isinstance(event, cpp.LoadExceptionEvent))
pipeline.add_listener(callback)
pipeline.load(StringIO.StringIO(data))
self.assertEqual(len(pipeline.modules()), 1)
module = pipeline.modules()[-1]
self.assertTrue(isinstance(module, F.FilterObjects))
self.assertEqual(module.target_name, "MyFilteredObjects")
self.assertEqual(module.object_name, "MyObjects")
self.assertEqual(module.mode, F.MODE_MEASUREMENTS)
self.assertEqual(module.filter_choice, F.FI_LIMITS)
self.assertEqual(module.per_object_assignment, F.PO_PARENT_WITH_MOST_OVERLAP)
self.assertEqual(module.rules_directory.dir_choice, cpprefs.DEFAULT_INPUT_FOLDER_NAME)
self.assertEqual(module.rules_directory.custom_path, "./rules")
self.assertEqual(module.rules_file_name, "myrules.txt")
self.assertEqual(module.rules_class, "1")
self.assertEqual(module.measurement_count.value, 2)
self.assertEqual(module.additional_object_count.value, 2)
self.assertEqual(module.measurements[0].measurement,
"Intensity_LowerQuartileIntensity_DNA")
self.assertTrue(module.measurements[0].wants_minimum)
self.assertFalse(module.measurements[0].wants_maximum)
self.assertAlmostEqual(module.measurements[0].min_limit.value, 0.2)
self.assertAlmostEqual(module.measurements[0].max_limit.value, 1.5)
self.assertEqual(module.measurements[1].measurement,
"Intensity_UpperQuartileIntensity_DNA")
self.assertFalse(module.measurements[1].wants_minimum)
self.assertTrue(module.measurements[1].wants_maximum)
self.assertAlmostEqual(module.measurements[1].min_limit.value, 0.9)
self.assertAlmostEqual(module.measurements[1].max_limit.value, 1.8)
for group, name in zip(module.additional_objects,('Cells','Cytoplasm')):
self.assertEqual(group.object_name, name)
self.assertEqual(group.target_name, "Filtered%s" % name)
self.assertEqual(group.outlines_name, "OutlinesFiltered%s" % name)
self.assertFalse(group.wants_outlines)
def test_06_01_get_measurement_columns(self):
'''Test the get_measurement_columns function'''
workspace, module = self.make_workspace({ "my_objects": np.zeros((10,10),int) })
module.object_name.value = "my_objects"
module.target_name.value = "my_result"
module.measurements[0].measurement.value = "my_measurement"
module.filter_choice.value = F.FI_MAXIMAL
m = workspace.measurements
m.add_measurement("my_objects","my_measurement",np.zeros((0,)))
module.run(workspace)
image_features = m.get_feature_names(cpm.IMAGE)
result_features = m.get_feature_names("my_result")
object_features = m.get_feature_names("my_objects")
columns = module.get_measurement_columns(workspace.pipeline)
self.assertEqual(len(columns), 6)
for feature in image_features:
self.assertTrue(any([(column[0] == cpm.IMAGE and
column[1] == feature)
for column in columns]))
for feature in result_features:
self.assertTrue(any([(column[0] == "my_result" and
column[1] == feature)
for column in columns]))
for feature in object_features:
if feature != 'my_measurement':
self.assertTrue(any([(column[0] == "my_objects" and
column[1] == feature)
for column in columns]))
for column in columns:
self.assertTrue(column[0] in (cpm.IMAGE, "my_result", "my_objects"))
if column[0] == cpm.IMAGE:
self.assertTrue(column[1] in image_features)
elif column[0] == "my_result":
self.assertTrue(column[1] in result_features)
for feature, coltype in (("Location_Center_X", cpm.COLTYPE_FLOAT),
("Location_Center_Y", cpm.COLTYPE_FLOAT),
("Number_Object_Number", cpm.COLTYPE_INTEGER),
("Parent_my_objects", cpm.COLTYPE_INTEGER),
("Children_my_result_Count", cpm.COLTYPE_INTEGER),
("Count_my_result", cpm.COLTYPE_INTEGER)):
fcolumns = [x for x in columns if x[1] == feature]
self.assertEqual(len(fcolumns),1,"Missing or duplicate column: %s"%feature)
self.assertEqual(fcolumns[0][2], coltype)
for object_name, category in (("Image",dict(Count=["my_result"])),
("my_objects",
dict(Children=["my_result_Count"])),
("my_result",
dict(Location=["Center_X","Center_Y"],
Parent=["my_objects"],
Number=["Object_Number"]))):
categories = module.get_categories(None, object_name)
for c in category.keys():
self.assertTrue(c in categories)
ff = module.get_measurements(None, object_name, c)
for f in ff:
self.assertTrue(f in category[c])
def test_08_01_filter_by_rule(self):
labels = np.zeros((10,20),int)
labels[3:5,4:9] = 1
labels[7:9,6:12] = 2
labels[4:9, 14:18] = 3
workspace, module = self.make_workspace({ "MyObjects": labels })
self.assertTrue(isinstance(module, F.FilterByObjectMeasurement))
m = workspace.measurements
m.add_measurement("MyObjects","MyMeasurement",
np.array([ 1.5, 2.3,1.8]))
rules_file_contents = "IF (MyObjects_MyMeasurement > 2.0, [1.0,-1.0], [-1.0,1.0])\n"
rules_path = tempfile.mktemp()
fd = open(rules_path, 'wt')
try:
fd.write(rules_file_contents)
fd.close()
rules_dir, rules_file = os.path.split(rules_path)
module.object_name.value = "MyObjects"
module.mode.value = F.MODE_RULES
module.rules_file_name.value = rules_file
module.rules_directory.dir_choice = cpprefs.ABSOLUTE_FOLDER_NAME
module.rules_directory.custom_path = rules_dir
module.target_name.value = "MyTargetObjects"
module.run(workspace)
target_objects = workspace.object_set.get_objects("MyTargetObjects")
target_labels = target_objects.segmented
self.assertTrue(np.all(target_labels[labels == 2] > 0))
self.assertTrue(np.all(target_labels[labels != 2] == 0))
finally:
os.remove(rules_path)
def test_08_02_filter_by_3_class_rule(self):
rules_file_contents = (
"IF (MyObjects_MyMeasurement > 2.0, [1.0,-1.0,-1.0], [-0.5,0.5,0.5])\n"
"IF (MyObjects_MyMeasurement > 1.6, [0.5,0.5,-0.5], [-1.0,-1.0,1.0])\n")
expected_class = [None, "3", "1", "2"]
rules_path = tempfile.mktemp()
fd = open(rules_path, 'wt')
fd.write(rules_file_contents)
fd.close()
try:
for rules_class in ("1", "2", "3"):
labels = np.zeros((10,20),int)
labels[3:5,4:9] = 1
labels[7:9,6:12] = 2
labels[4:9, 14:18] = 3
workspace, module = self.make_workspace({ "MyObjects": labels })
self.assertTrue(isinstance(module, F.FilterByObjectMeasurement))
m = workspace.measurements
m.add_measurement("MyObjects","MyMeasurement",
np.array([ 1.5, 2.3,1.8]))
rules_dir, rules_file = os.path.split(rules_path)
module.object_name.value = "MyObjects"
module.mode.value = F.MODE_RULES
module.rules_file_name.value = rules_file
module.rules_directory.dir_choice = cpprefs.ABSOLUTE_FOLDER_NAME
module.rules_directory.custom_path = rules_dir
module.rules_class.value = rules_class
module.target_name.value = "MyTargetObjects"
module.run(workspace)
target_objects = workspace.object_set.get_objects("MyTargetObjects")
target_labels = target_objects.segmented
kept = expected_class.index(rules_class)
self.assertTrue(np.all(target_labels[labels == kept] > 0))
self.assertTrue(np.all(target_labels[labels != kept] == 0))
finally:
os.remove(rules_path)
def test_09_01_discard_border_objects(self):
'''Test the mode to discard border objects'''
labels = np.zeros((10,10), int)
labels[1:4, 0:3] = 1
labels[4:8, 1:5] = 2
labels[:,9] = 3
expected = np.zeros((10,10), int)
expected[4:8, 1:5] = 1
workspace, module = self.make_workspace({"input_objects" : labels})
module.object_name.value = "input_objects"
module.target_name.value = "output_objects"
module.mode.value = F.MODE_BORDER
module.run(workspace)
output_objects = workspace.object_set.get_objects("output_objects")
self.assertTrue(np.all(expected == output_objects.segmented))
def test_09_02_discard_mask_objects(self):
'''Test discarding objects that touch the mask of objects parent img'''
mask = np.ones((10, 10), bool)
mask[5, 5] = False
labels = np.zeros((10, 10), int)
labels[1:4, 1:4] = 1
labels[5:8, 5:8] = 2
expected = labels.copy()
expected[expected==2] = 0
workspace, module = self.make_workspace({})
parent_image = cpi.Image(np.zeros((10, 10)), mask=mask)
workspace.image_set.add("input_image", parent_image)
input_objects = cpo.Objects()
input_objects.segmented = labels
input_objects.parent_image = parent_image
workspace.object_set.add_objects(input_objects, "input_objects")
module.object_name.value = "input_objects"
module.target_name.value = "output_objects"
module.mode.value = F.MODE_BORDER
module.run(workspace)
output_objects = workspace.object_set.get_objects("output_objects")
self.assertTrue(np.all(expected == output_objects.segmented))
| gpl-2.0 |
xiaojingyi/tushare | tushare/internet/boxoffice.py | 7 | 7205 | # -*- coding:utf-8 -*-
"""
电影票房
Created on 2015/12/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
import pandas as pd
from tushare.stock import cons as ct
from tushare.util import dateu as du
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
import time
import json
def realtime_boxoffice(retry_count=3,pause=0.001):
"""
获取实时电影票房数据
数据来源:EBOT艺恩票房智库
Parameters
------
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
BoxOffice 实时票房(万)
Irank 排名
MovieName 影片名
boxPer 票房占比 (%)
movieDay 上映天数
sumBoxOffice 累计票房(万)
time 数据获取时间
"""
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(ct.MOVIE_BOX%(ct.P_TYPE['http'], ct.DOMAINS['mbox'],
ct.BOX, _random()))
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
df = pd.DataFrame(js['data2'])
df = df.drop(['MovieImg','mId'], axis=1)
df['time'] = du.get_now()
return df
def day_boxoffice(date=None, retry_count=3, pause=0.001):
"""
获取单日电影票房数据
数据来源:EBOT艺恩票房智库
Parameters
------
date:日期,默认为上一日
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
AvgPrice 平均票价
AvpPeoPle 场均人次
BoxOffice 单日票房(万)
BoxOffice_Up 环比变化 (%)
IRank 排名
MovieDay 上映天数
MovieName 影片名
SumBoxOffice 累计票房(万)
WomIndex 口碑指数
"""
for _ in range(retry_count):
time.sleep(pause)
try:
if date is None:
date = 0
else:
date = int(du.diff_day(du.today(), date)) + 1
request = Request(ct.BOXOFFICE_DAY%(ct.P_TYPE['http'], ct.DOMAINS['mbox'],
ct.BOX, date, _random()))
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
df = pd.DataFrame(js['data1'])
df = df.drop(['MovieImg', 'BoxOffice1', 'MovieID', 'Director', 'IRank_pro'], axis=1)
return df
def month_boxoffice(date=None, retry_count=3, pause=0.001):
"""
获取单月电影票房数据
数据来源:EBOT艺恩票房智库
Parameters
------
date:日期,默认为上一月,格式YYYY-MM
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
Irank 排名
MovieName 电影名称
WomIndex 口碑指数
avgboxoffice 平均票价
avgshowcount 场均人次
box_pro 月度占比
boxoffice 单月票房(万)
days 月内天数
releaseTime 上映日期
"""
if date is None:
date = du.day_last_week(-30)[0:7]
elif len(date)>8:
print(ct.BOX_INPUT_ERR_MSG)
return
date += '-01'
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(ct.BOXOFFICE_MONTH%(ct.P_TYPE['http'], ct.DOMAINS['mbox'],
ct.BOX, date))
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
df = pd.DataFrame(js['data1'])
df = df.drop(['defaultImage', 'EnMovieID'], axis=1)
return df
def day_cinema(date=None, retry_count=3, pause=0.001):
"""
获取影院单日票房排行数据
数据来源:EBOT艺恩票房智库
Parameters
------
date:日期,默认为上一日
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
Attendance 上座率
AvgPeople 场均人次
CinemaName 影院名称
RowNum 排名
TodayAudienceCount 当日观众人数
TodayBox 当日票房
TodayShowCount 当日场次
price 场均票价(元)
"""
if date is None:
date = du.day_last_week(-1)
data = pd.DataFrame()
ct._write_head()
for x in range(1, 11):
df = _day_cinema(date, x, retry_count,
pause)
if df is not None:
data = pd.concat([data, df])
data = data.drop_duplicates()
return data.reset_index(drop=True)
def _day_cinema(date=None, pNo=1, retry_count=3, pause=0.001):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(ct.BOXOFFICE_CBD%(ct.P_TYPE['http'], ct.DOMAINS['mbox'],
ct.BOX, pNo, date))
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
df = pd.DataFrame(js['data1'])
df = df.drop(['CinemaID'], axis=1)
return df
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
| bsd-3-clause |
ocefpaf/biggus | biggus/tests/unit/init/test_mean.py | 2 | 4693 | # (C) British Crown Copyright 2014, Met Office
#
# This file is part of Biggus.
#
# Biggus is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Biggus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Biggus. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for `biggus.mean`."""
import numpy as np
import numpy.ma as ma
import unittest
import biggus
from biggus import mean
class TestInvalidAxis(unittest.TestCase):
def setUp(self):
self.array = biggus.NumpyArrayAdapter(np.arange(12))
def test_none(self):
with self.assertRaises(biggus.AxisSupportError):
mean(self.array)
def test_too_large(self):
with self.assertRaises(ValueError):
mean(self.array, axis=1)
def test_too_small(self):
with self.assertRaises(ValueError):
mean(self.array, axis=-2)
def test_multiple(self):
array = biggus.NumpyArrayAdapter(np.arange(12).reshape(3, 4))
with self.assertRaises(biggus.AxisSupportError):
mean(array, axis=(0, 1))
class TestAggregationDtype(unittest.TestCase):
def _check(self, source, target):
array = biggus.NumpyArrayAdapter(np.arange(2, dtype=source))
agg = mean(array, axis=0)
self.assertEqual(agg.dtype, target)
def test_int_to_float(self):
dtypes = [np.int8, np.int16, np.int32, np.int]
for dtype in dtypes:
self._check(dtype, np.float)
def test_bool_to_float(self):
self._check(np.bool, np.float)
def test_floats(self):
dtypes = [np.float16, np.float32, np.float]
for dtype in dtypes:
self._check(dtype, dtype)
def test_complex(self):
self._check(np.complex, np.complex)
class TestNumpyArrayAdapter(unittest.TestCase):
def setUp(self):
self.data = np.arange(12)
def _check(self, data, dtype=None, shape=None):
data = np.asarray(data, dtype=dtype)
if shape is not None:
data = data.reshape(shape)
array = biggus.NumpyArrayAdapter(data)
result = mean(array, axis=0).ndarray()
expected = np.mean(data, axis=0)
if expected.ndim == 0:
expected = np.asarray(expected)
np.testing.assert_array_equal(result, expected)
def test_flat_int(self):
self._check(self.data)
def test_multi_int(self):
self._check(self.data, shape=(3, 4))
def test_flat_float(self):
self._check(self.data, dtype=np.float)
def test_multi_float(self):
self._check(self.data, dtype=np.float, shape=(3, 4))
class TestNumpyArrayAdapterMasked(unittest.TestCase):
def _check(self, data):
array = biggus.NumpyArrayAdapter(data)
result = mean(array, axis=0).masked_array()
expected = ma.mean(data, axis=0)
if expected.ndim == 0:
expected = ma.asarray(expected)
np.testing.assert_array_equal(result.filled(), expected.filled())
np.testing.assert_array_equal(result.mask, expected.mask)
def test_no_mask_flat(self):
for dtype in [np.int, np.float]:
data = ma.arange(12, dtype=dtype)
self._check(data)
def test_no_mask_multi(self):
for dtype in [np.int, np.float]:
data = ma.arange(12, dtype=dtype).reshape(3, 4)
self._check(data)
def test_flat(self):
for dtype in [np.int, np.float]:
data = ma.arange(12, dtype=dtype)
data[::2] = ma.masked
self._check(data)
data.mask = ma.nomask
data[1::2] = ma.masked
self._check(data)
def test_multi(self):
for dtype in [np.int, np.float]:
data = ma.arange(12, dtype=dtype)
data[::2] = ma.masked
self._check(data.reshape(3, 4))
data = ma.arange(12, dtype=dtype)
data[1::2] = ma.masked
self._check(data.reshape(3, 4))
data = ma.arange(12, dtype=dtype).reshape(3, 4)
data[::2] = ma.masked
self._check(data)
data = ma.arange(12, dtype=dtype).reshape(3, 4)
data[1::2] = ma.masked
self._check(data)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
iABC2XYZ/abc | PIC4/ConfigLattice.py | 1 | 5266 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: Peiyong Jiang
作者: 姜培勇
[email protected]
本文件解释:
"""
from ConfigInput import X,PX,Y,PY,Z,PZ,Q,M,QReal,freq
from Input import *
from Field import FieldAdd,FieldInter_2D,FieldInter_3D,FieldExtern_Drift
from Field import FieldExtern_AllPart
from Windows import WindowDef,WindowInner,WindowLeft,WindowRight
numPart=np.int64(numPart)
i=1
while vars().has_key('ele_'+str(i)) :i+=1
numEle=i-1
EleType=[]
EleStart=np.zeros(numEle)
EleEnd=np.zeros(numEle)
for iEle in range(1,numEle+1):
EleType.append(eval('ele_'+str(iEle)).lower())
EleStart[iEle-1]=eval('zStart_'+str(iEle))/1000.
EleEnd[iEle-1]=eval('zEnd_'+str(iEle))/1000.
z0=zBegin/1000.
zOver=zFinish/1000.
spaceChargeStep=nStep
iSpaceChargeStep=0
beamEnergyMain=beamEnergy
while True:
iSpaceChargeStep+=1
startWindow,endWindow=WindowDef(z0,beamEnergy,freq)
idInner=WindowInner(startWindow,endWindow,EleStart,EleEnd)
idLeft=WindowLeft(startWindow,endWindow,EleStart,EleEnd)
idRight=WindowRight(startWindow,endWindow,EleStart,EleEnd)
if idInner[0]!=0: # window 在某个场区内
# 计算场
Ex,Ey,Ez,Bx,By,Bz=np.zeros([numPart,1]),np.zeros([numPart,1]),np.zeros([numPart,1]), \
np.zeros([numPart,1]),np.zeros([numPart,1]),np.zeros([numPart,1])
# 计算外场
for idEle in idInner:
exEx,exEy,exEz,exBx,exBy,exBz=FieldExtern_AllPart(idEle)
Ex,Ey,Ez,Bx,By,Bz=FieldAdd(Ex,Ey,Ez,Bx,By,Bz,exEx,exEy,exEz,exBx,exBy,exBz)
# 计算内场:
xMin,xMax,yMin,yMax=-1e10,1e10,-1e10,1e10
for idEle in idInner:
try : xMinTmp=eval('xMin_'+str(idEle))/1000.
except: xMinTmp=xMinG/1000.
try : xMaxTmp=eval('xMax_'+str(idEle))/1000.
except: xMaxTmp=xMaxG/1000.
try : yMinTmp=eval('yMin_'+str(idEle))/1000.
except: yMinTmp=yMinG/1000.
try : yMaxTmp=eval('yMax_'+str(idEle))/1000.
except: yMaxTmp=yMaxG/1000.
if np.abs(xMinTmp)<np.abs(xMin):
xMin=xMinTmp
if np.abs(xMaxTmp)<np.abs(xMax):
xMax=xMaxTmp
if np.abs(yMinTmp)<np.abs(yMin):
yMin=yMinTmp
if np.abs(yMaxTmp)<np.abs(yMax):
yMax=yMaxTmp
zMin,zMax=startWindow,endWindow
if iSpaceChargeStep==spaceChargeStep:
iSpaceChargeStep-=spaceChargeStep
if spaceCharge.lower()=='3d':
xGrid,yGrid,zGrid=2**xGridLog,2**yGridLog,2**zGridLog
inEx,inEy,inEz,inBx,inBy,inBz=FieldInter_3D(beamEnergyMain,X,Y,Z,QReal,xMin,xMax,yMin,yMax,zMin,zMax,xGrid,yGrid,zGrid)
Ex,Ey,Ez,Bx,By,Bz=FieldAdd(Ex,Ey,Ez,Bx,By,Bz,inEx,inEy,inEz,inBx,inBy,inBz)
if spaceCharge.lower()=='2d':
xGrid,yGrid=2**xGridLog,2**yGridLog
inEx,inEy,inEz,inBx=FieldInter_2D(beamEnergyMain,X,Y,Z,QReal,xMin,xMax,yMin,yMax,xGrid,yGrid)
Ex,Ey,Bx,By=FieldAdd(Ex,Ey,Bx,By,inEx,inEy,inBx,inBy)
try : freq=eval('freqMHz_'+str(iEle))*1e6
except: freq=freqMHzG*1e6
try : dT=eval('dT_'+str(iEle))/freq
except: dT=dTG/freq
if (idLeft[0]!=0) or (idRight[0]!=0): # window 在厂区边界上
print 'B1'
pass
z0+=0.1
if z0>zOver:
break
'''
zBegin/=1000. # m
for iEle in range(1,numEle+1):
if eval('ele_'+str(iEle)).lower()=='drift':
ele='drift'
if eval('ele_'+str(iEle))=='EMField':
ele='emfield'
EMMod=eval('EMMod_'+str(iEle))
EMFieldLoc=eval('EMFieldLoc_'+str(iEle))
#----------------------------------------
zStart=eval('zStart_'+str(iEle))/1000.
zEnd=eval('zEnd_'+str(iEle))/1000.
try : freq=eval('freqMHz_'+str(iEle))*1e6
except: freq=freqMHzG*1e6
try : dT=eval('dT_'+str(iEle))/freq
except: dT=dTG/freq
try : xMin=eval('xMin_'+str(iEle))/1000.
except: xMin=xMinG/1000.
try : xMax=eval('xMax_'+str(iEle))/1000.
except: xMax=xMaxG/1000.
try : yMin=eval('yMin_'+str(iEle))/1000.
except: yMin=yMinG/1000.
try : yMax=eval('yMax_'+str(iEle))/1000.
except: yMax=yMaxG/1000.
#-----------------------------------------
if ele=='drift':
exEx,exEy,exEz,exBx,exBy,exBz=FieldExtern_Drift()
if spaceCharge=='2d':
inEx,inEy,inBx,inBy=FieldInter_2D()
inEz,inBz=0.,0.
if spaceCharge=='3d':
inEx,inEy,inEz,inBx,inBy,inBz=FieldInter_3D()
Ex,Ey,Ez,Bx,By,Bz=FieldAdd(exEx,exEy,exEz,exBx,exBy,exBz,inEx,inEy,inEz,inBx,inBy,inBz)
if ele=='emfield':
pass
'''
| gpl-3.0 |
hilgroth/fiware-IoTAgent-Cplusplus | third_party/mosquitto-1.3.5/test/broker/03-pattern-matching.py | 25 | 3088 | #!/usr/bin/env python
import subprocess
import socket
import time
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
def pattern_test(sub_topic, pub_topic):
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("pattern-sub-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
publish_packet = mosq_test.gen_publish(pub_topic, qos=0, payload="message")
publish_retained_packet = mosq_test.gen_publish(pub_topic, qos=0, retain=True, payload="message")
mid = 312
subscribe_packet = mosq_test.gen_subscribe(mid, sub_topic, 0)
suback_packet = mosq_test.gen_suback(mid, 0)
mid = 234;
unsubscribe_packet = mosq_test.gen_unsubscribe(mid, sub_topic)
unsuback_packet = mosq_test.gen_unsuback(mid)
broker = subprocess.Popen(['../../src/mosquitto', '-p', '1888'], stderr=subprocess.PIPE)
try:
time.sleep(0.5)
sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=20)
sock.send(subscribe_packet)
if mosq_test.expect_packet(sock, "suback", suback_packet):
pub = subprocess.Popen(['./03-pattern-matching-helper.py', pub_topic])
pub.wait()
if mosq_test.expect_packet(sock, "publish", publish_packet):
sock.send(unsubscribe_packet)
if mosq_test.expect_packet(sock, "unsuback", unsuback_packet):
sock.send(subscribe_packet)
if mosq_test.expect_packet(sock, "suback", suback_packet):
if mosq_test.expect_packet(sock, "publish retained", publish_retained_packet):
rc = 0
sock.close()
finally:
broker.terminate()
broker.wait()
if rc:
(stdo, stde) = broker.communicate()
print(stde)
raise
return rc
pattern_test("#", "test/topic")
pattern_test("#", "/test/topic")
pattern_test("foo/#", "foo/bar/baz")
pattern_test("foo/+/baz", "foo/bar/baz")
pattern_test("foo/+/baz/#", "foo/bar/baz")
pattern_test("foo/+/baz/#", "foo/bar/baz/bar")
pattern_test("foo/foo/baz/#", "foo/foo/baz/bar")
pattern_test("foo/#", "foo")
pattern_test("/#", "/foo")
pattern_test("test/topic/", "test/topic/")
pattern_test("test/topic/+", "test/topic/")
pattern_test("+/+/+/+/+/+/+/+/+/+/test", "one/two/three/four/five/six/seven/eight/nine/ten/test")
pattern_test("#", "test////a//topic")
pattern_test("#", "/test////a//topic")
pattern_test("foo/#", "foo//bar///baz")
pattern_test("foo/+/baz", "foo//baz")
pattern_test("foo/+/baz//", "foo//baz//")
pattern_test("foo/+/baz/#", "foo//baz")
pattern_test("foo/+/baz/#", "foo//baz/bar")
pattern_test("foo//baz/#", "foo//baz/bar")
pattern_test("foo/foo/baz/#", "foo/foo/baz/bar")
pattern_test("/#", "////foo///bar")
exit(0)
| agpl-3.0 |
mrknow/filmkodi | plugin.video.fanfilm/resources/lib/libraries/f4mproxy/utils/rsakey.py | 148 | 8833 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""Abstract class for RSA."""
from .cryptomath import *
class RSAKey(object):
"""This is an abstract base class for RSA keys.
Particular implementations of RSA keys, such as
L{openssl_rsakey.OpenSSL_RSAKey},
L{python_rsakey.Python_RSAKey}, and
L{pycrypto_rsakey.PyCrypto_RSAKey},
inherit from this.
To create or parse an RSA key, don't use one of these classes
directly. Instead, use the factory functions in
L{tlslite.utils.keyfactory}.
"""
def __init__(self, n=0, e=0):
"""Create a new RSA key.
If n and e are passed in, the new key will be initialized.
@type n: int
@param n: RSA modulus.
@type e: int
@param e: RSA public exponent.
"""
raise NotImplementedError()
def __len__(self):
"""Return the length of this key in bits.
@rtype: int
"""
return numBits(self.n)
def hasPrivateKey(self):
"""Return whether or not this key has a private component.
@rtype: bool
"""
raise NotImplementedError()
def hashAndSign(self, bytes):
"""Hash and sign the passed-in bytes.
This requires the key to have a private component. It performs
a PKCS1-SHA1 signature on the passed-in data.
@type bytes: str or L{bytearray} of unsigned bytes
@param bytes: The value which will be hashed and signed.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1-SHA1 signature on the passed-in data.
"""
hashBytes = SHA1(bytearray(bytes))
prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes)
sigBytes = self.sign(prefixedHashBytes)
return sigBytes
def hashAndVerify(self, sigBytes, bytes):
"""Hash and verify the passed-in bytes with the signature.
This verifies a PKCS1-SHA1 signature on the passed-in data.
@type sigBytes: L{bytearray} of unsigned bytes
@param sigBytes: A PKCS1-SHA1 signature.
@type bytes: str or L{bytearray} of unsigned bytes
@param bytes: The value which will be hashed and verified.
@rtype: bool
@return: Whether the signature matches the passed-in data.
"""
hashBytes = SHA1(bytearray(bytes))
# Try it with/without the embedded NULL
prefixedHashBytes1 = self._addPKCS1SHA1Prefix(hashBytes, False)
prefixedHashBytes2 = self._addPKCS1SHA1Prefix(hashBytes, True)
result1 = self.verify(sigBytes, prefixedHashBytes1)
result2 = self.verify(sigBytes, prefixedHashBytes2)
return (result1 or result2)
def sign(self, bytes):
"""Sign the passed-in bytes.
This requires the key to have a private component. It performs
a PKCS1 signature on the passed-in data.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be signed.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1 signature on the passed-in data.
"""
if not self.hasPrivateKey():
raise AssertionError()
paddedBytes = self._addPKCS1Padding(bytes, 1)
m = bytesToNumber(paddedBytes)
if m >= self.n:
raise ValueError()
c = self._rawPrivateKeyOp(m)
sigBytes = numberToByteArray(c, numBytes(self.n))
return sigBytes
def verify(self, sigBytes, bytes):
"""Verify the passed-in bytes with the signature.
This verifies a PKCS1 signature on the passed-in data.
@type sigBytes: L{bytearray} of unsigned bytes
@param sigBytes: A PKCS1 signature.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be verified.
@rtype: bool
@return: Whether the signature matches the passed-in data.
"""
if len(sigBytes) != numBytes(self.n):
return False
paddedBytes = self._addPKCS1Padding(bytes, 1)
c = bytesToNumber(sigBytes)
if c >= self.n:
return False
m = self._rawPublicKeyOp(c)
checkBytes = numberToByteArray(m, numBytes(self.n))
return checkBytes == paddedBytes
def encrypt(self, bytes):
"""Encrypt the passed-in bytes.
This performs PKCS1 encryption of the passed-in data.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be encrypted.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1 encryption of the passed-in data.
"""
paddedBytes = self._addPKCS1Padding(bytes, 2)
m = bytesToNumber(paddedBytes)
if m >= self.n:
raise ValueError()
c = self._rawPublicKeyOp(m)
encBytes = numberToByteArray(c, numBytes(self.n))
return encBytes
def decrypt(self, encBytes):
"""Decrypt the passed-in bytes.
This requires the key to have a private component. It performs
PKCS1 decryption of the passed-in data.
@type encBytes: L{bytearray} of unsigned bytes
@param encBytes: The value which will be decrypted.
@rtype: L{bytearray} of unsigned bytes or None.
@return: A PKCS1 decryption of the passed-in data or None if
the data is not properly formatted.
"""
if not self.hasPrivateKey():
raise AssertionError()
if len(encBytes) != numBytes(self.n):
return None
c = bytesToNumber(encBytes)
if c >= self.n:
return None
m = self._rawPrivateKeyOp(c)
decBytes = numberToByteArray(m, numBytes(self.n))
#Check first two bytes
if decBytes[0] != 0 or decBytes[1] != 2:
return None
#Scan through for zero separator
for x in range(1, len(decBytes)-1):
if decBytes[x]== 0:
break
else:
return None
return decBytes[x+1:] #Return everything after the separator
def _rawPrivateKeyOp(self, m):
raise NotImplementedError()
def _rawPublicKeyOp(self, c):
raise NotImplementedError()
def acceptsPassword(self):
"""Return True if the write() method accepts a password for use
in encrypting the private key.
@rtype: bool
"""
raise NotImplementedError()
def write(self, password=None):
"""Return a string containing the key.
@rtype: str
@return: A string describing the key, in whichever format (PEM)
is native to the implementation.
"""
raise NotImplementedError()
def generate(bits):
"""Generate a new key with the specified bit length.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
"""
raise NotImplementedError()
generate = staticmethod(generate)
# **************************************************************************
# Helper Functions for RSA Keys
# **************************************************************************
def _addPKCS1SHA1Prefix(self, bytes, withNULL=True):
# There is a long history of confusion over whether the SHA1
# algorithmIdentifier should be encoded with a NULL parameter or
# with the parameter omitted. While the original intention was
# apparently to omit it, many toolkits went the other way. TLS 1.2
# specifies the NULL should be included, and this behavior is also
# mandated in recent versions of PKCS #1, and is what tlslite has
# always implemented. Anyways, verification code should probably
# accept both. However, nothing uses this code yet, so this is
# all fairly moot.
if not withNULL:
prefixBytes = bytearray(\
[0x30,0x1f,0x30,0x07,0x06,0x05,0x2b,0x0e,0x03,0x02,0x1a,0x04,0x14])
else:
prefixBytes = bytearray(\
[0x30,0x21,0x30,0x09,0x06,0x05,0x2b,0x0e,0x03,0x02,0x1a,0x05,0x00,0x04,0x14])
prefixedBytes = prefixBytes + bytes
return prefixedBytes
def _addPKCS1Padding(self, bytes, blockType):
padLength = (numBytes(self.n) - (len(bytes)+3))
if blockType == 1: #Signature padding
pad = [0xFF] * padLength
elif blockType == 2: #Encryption padding
pad = bytearray(0)
while len(pad) < padLength:
padBytes = getRandomBytes(padLength * 2)
pad = [b for b in padBytes if b != 0]
pad = pad[:padLength]
else:
raise AssertionError()
padding = bytearray([0,blockType] + pad + [0])
paddedBytes = padding + bytes
return paddedBytes
| apache-2.0 |
eonpatapon/contrail-controller | src/container/cni/cni/common/interface.py | 3 | 6506 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2016 Juniper Networks, Inc. All rights reserved.
#
"""
Module to manage interface inside a container
class Interface is base class. It is further derived to provide implementation
for veth-pair and macvlan interfaces
"""
import ctypes
import errno
import json
import os
import sys
import logging
from pyroute2 import NetlinkError, IPRoute
from cni import Error as Error
CNI_ERROR_NS_ENTER = 201
CNI_ERROR_NS_LEAVE = 202
CNI_ERROR_DEL_NS_INTF = 203
CNI_ERROR_CONFIG_NS_INTF = 204
# logger for the file
logger = None
class CniNamespace(object):
'''
Helper class to run processing inside a network-namespace
The class must be used using 'with' statement as follows,
with CniNamespace('/proc/<pid>/ns/net'):
do_something()
The process changes network-namespace to one given in ns_path. The old
network-namespace is restored at the end
'''
def __init__(self, ns_path):
self.libc = ctypes.CDLL('libc.so.6', use_errno=True)
# get current namespace and open fd in current network-namespace
self.my_path = '/proc/self/ns/net'
self.my_fd = os.open(self.my_path, os.O_RDONLY)
# Open fd in network-namespace ns_path
self.ns_path = ns_path
self.ns_fd = os.open(self.ns_path, os.O_RDONLY)
return
def close_files(self):
if self.ns_fd is not None:
os.close(self.ns_fd)
self.ns_fd = None
if self.my_fd is not None:
os.close(self.my_fd)
self.my_fd = None
return
def __enter__(self):
logger.debug('Entering namespace <' + self.ns_path + '>')
# Enter the namespace
if self.libc.setns(self.ns_fd, 0) == -1:
e = ctypes.get_errno()
self.close_files()
raise Error(CNI_ERROR_NS_ENTER,
'Error entering namespace ' + self.ns_path +
'. Error ' + str(e) + ' : ' + errno.errorcode[e])
return
def __exit__(self, type, value, tb):
logger.debug('Leaving namespace <' + self.ns_path + '>')
if self.libc.setns(self.my_fd, 0) == -1:
e = ctypes.get_errno()
self.close_files()
raise Error(CNI_ERROR_NS_LEAVE,
'Error leaving namespace ' + self.ns_path +
'. Error ' + str(e) + ' : ' + errno.errorcode[e])
self.close_files()
return
class Interface():
'''
Class for create/delete/configure of interface inside container
Class is derived further to manage veth-pair and mac-vlan interfaces
'''
def __init__(self, cni):
# configure logger
global logger
logger = logging.getLogger('cni-interface')
self.cni = cni
return
def get_link(self):
'''
Get link information for the interface inside the container
'''
link = None
with CniNamespace(self.cni.container_netns):
iproute = IPRoute()
iface = iproute.link_lookup(ifname=self.cni.container_ifname)
if len(iface) != 0:
idx = iface[0]
link = iproute.link("get", index=idx)
return link
def delete_link(self):
'''
Delete interface inside the container
'''
with CniNamespace(self.cni.container_netns):
iproute = IPRoute()
iface = iproute.link_lookup(ifname=self.cni.container_ifname)
if len(iface) == 0:
return
try:
iproute.link('del', index=iface[0])
except NetlinkError as e:
raise Error(CNI_ERROR_DEL_NS_INTF,
'Error deleting interface inside container ' +
self.cni.container_ifname + ' code ' +
str(e.code) + ' message ' + e.message)
return
def configure_link(self, ip4_address, plen, gateway):
'''
Configure following attributes for interface inside the container
- Link-up
- IP Address
- Default gateway
'''
@staticmethod
def _intf_error(e, ifname, message):
raise Error(CNI_ERROR_CONFIG_NS_INTF, message + ifname +
' code ' + str(e.code) + ' message ' + e.message)
return
with CniNamespace(self.cni.container_netns):
iproute = IPRoute()
intf = iproute.link_lookup(ifname=self.cni.container_ifname)
if len(intf) == 0:
raise Error(CNI_ERROR_CONFIG_NS_INTF,
'Error finding interface ' +
self.cni.container_ifname + ' inside container')
idx_ns = intf[0]
try:
iproute.link('set', index=idx_ns, state='up')
except NetlinkError as e:
_intf_error(e, self.cni.container_ifname,
'Error setting link state for interface ' +
'inside container')
try:
iproute.addr('add', index=idx_ns, address=ip4_address,
prefixlen=plen)
except NetlinkError as e:
if e.code != errno.EEXIST:
_intf_error(e, self.cni.container_ifname,
'Error setting ip-address for interface ' +
'inside container')
try:
iproute.route('add', dst='0.0.0.0/0', gateway=gateway)
except NetlinkError as e:
if e.code != errno.EEXIST:
_intf_error(e, self.cni.container_ifname,
'Error adding default route inside container')
return
def configure_interface(self, ip4, plen, gw):
'''
Configure the interface inside container with,
- IP Address
- Default gateway
- Link-up
'''
# Configure interface inside the container
self.configure_link(ip4, plen, gw)
return
def add(self, ip4_address, plen, gateway):
# Create the interface
self.create_interface()
# Configure the interface based on config given in arguments
self.configure_interface(ip4_address, plen, gateway)
return
def delete(self):
# Delete the interface
self.delete_interface()
return
| apache-2.0 |
MTASZTAKI/ApertusVR | plugins/assetImporter/assimpAssetLoader/3rdParty/assimp/port/PyAssimp/pyassimp/postprocess.py | 3 | 23548 | # <hr>Calculates the tangents and bitangents for the imported meshes.
#
# Does nothing if a mesh does not have normals. You might want this post
# processing step to be executed if you plan to use tangent space calculations
# such as normal mapping applied to the meshes. There's a config setting,
# <tt>#AI_CONFIG_PP_CT_MAX_SMOOTHING_ANGLE<tt>, which allows you to specify
# a maximum smoothing angle for the algorithm. However, usually you'll
# want to leave it at the default value.
#
aiProcess_CalcTangentSpace = 0x1
## <hr>Identifies and joins identical vertex data sets within all
# imported meshes.
#
# After this step is run, each mesh contains unique vertices,
# so a vertex may be used by multiple faces. You usually want
# to use this post processing step. If your application deals with
# indexed geometry, this step is compulsory or you'll just waste rendering
# time. <b>If this flag is not specified<b>, no vertices are referenced by
# more than one face and <b>no index buffer is required<b> for rendering.
#
aiProcess_JoinIdenticalVertices = 0x2
## <hr>Converts all the imported data to a left-handed coordinate space.
#
# By default the data is returned in a right-handed coordinate space (which
# OpenGL prefers). In this space, +X points to the right,
# +Z points towards the viewer, and +Y points upwards. In the DirectX
# coordinate space +X points to the right, +Y points upwards, and +Z points
# away from the viewer.
#
# You'll probably want to consider this flag if you use Direct3D for
# rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this
# setting and bundles all conversions typically required for D3D-based
# applications.
#
aiProcess_MakeLeftHanded = 0x4
## <hr>Triangulates all faces of all meshes.
#
# By default the imported mesh data might contain faces with more than 3
# indices. For rendering you'll usually want all faces to be triangles.
# This post processing step splits up faces with more than 3 indices into
# triangles. Line and point primitives are #not# modified! If you want
# 'triangles only' with no other kinds of primitives, try the following
# solution:
# <ul>
# <li>Specify both #aiProcess_Triangulate and #aiProcess_SortByPType <li>
# <li>Ignore all point and line meshes when you process assimp's output<li>
# <ul>
#
aiProcess_Triangulate = 0x8
## <hr>Removes some parts of the data structure (animations, materials,
# light sources, cameras, textures, vertex components).
#
# The components to be removed are specified in a separate
# configuration option, <tt>#AI_CONFIG_PP_RVC_FLAGS<tt>. This is quite useful
# if you don't need all parts of the output structure. Vertex colors
# are rarely used today for example... Calling this step to remove unneeded
# data from the pipeline as early as possible results in increased
# performance and a more optimized output data structure.
# This step is also useful if you want to force Assimp to recompute
# normals or tangents. The corresponding steps don't recompute them if
# they're already there (loaded from the source asset). By using this
# step you can make sure they are NOT there.
#
# This flag is a poor one, mainly because its purpose is usually
# misunderstood. Consider the following case: a 3D model has been exported
# from a CAD app, and it has per-face vertex colors. Vertex positions can't be
# shared, thus the #aiProcess_JoinIdenticalVertices step fails to
# optimize the data because of these nasty little vertex colors.
# Most apps don't even process them, so it's all for nothing. By using
# this step, unneeded components are excluded as early as possible
# thus opening more room for internal optimizations.
#
aiProcess_RemoveComponent = 0x10
## <hr>Generates normals for all faces of all meshes.
#
# This is ignored if normals are already there at the time this flag
# is evaluated. Model importers try to load them from the source file, so
# they're usually already there. Face normals are shared between all points
# of a single face, so a single point can have multiple normals, which
# forces the library to duplicate vertices in some cases.
# #aiProcess_JoinIdenticalVertices is #senseless# then.
#
# This flag may not be specified together with #aiProcess_GenSmoothNormals.
#
aiProcess_GenNormals = 0x20
## <hr>Generates smooth normals for all vertices in the mesh.
#
# This is ignored if normals are already there at the time this flag
# is evaluated. Model importers try to load them from the source file, so
# they're usually already there.
#
# This flag may not be specified together with
# #aiProcess_GenNormals. There's a configuration option,
# <tt>#AI_CONFIG_PP_GSN_MAX_SMOOTHING_ANGLE<tt> which allows you to specify
# an angle maximum for the normal smoothing algorithm. Normals exceeding
# this limit are not smoothed, resulting in a 'hard' seam between two faces.
# Using a decent angle here (e.g. 80 degrees) results in very good visual
# appearance.
#
aiProcess_GenSmoothNormals = 0x40
## <hr>Splits large meshes into smaller sub-meshes.
#
# This is quite useful for real-time rendering, where the number of triangles
# which can be maximally processed in a single draw-call is limited
# by the video driverhardware. The maximum vertex buffer is usually limited
# too. Both requirements can be met with this step: you may specify both a
# triangle and vertex limit for a single mesh.
#
# The split limits can (and should!) be set through the
# <tt>#AI_CONFIG_PP_SLM_VERTEX_LIMIT<tt> and <tt>#AI_CONFIG_PP_SLM_TRIANGLE_LIMIT<tt>
# settings. The default values are <tt>#AI_SLM_DEFAULT_MAX_VERTICES<tt> and
# <tt>#AI_SLM_DEFAULT_MAX_TRIANGLES<tt>.
#
# Note that splitting is generally a time-consuming task, but only if there's
# something to split. The use of this step is recommended for most users.
#
aiProcess_SplitLargeMeshes = 0x80
## <hr>Removes the node graph and pre-transforms all vertices with
# the local transformation matrices of their nodes.
#
# The output scene still contains nodes, however there is only a
# root node with children, each one referencing only one mesh,
# and each mesh referencing one material. For rendering, you can
# simply render all meshes in order - you don't need to pay
# attention to local transformations and the node hierarchy.
# Animations are removed during this step.
# This step is intended for applications without a scenegraph.
# The step CAN cause some problems: if e.g. a mesh of the asset
# contains normals and another, using the same material index, does not,
# they will be brought together, but the first meshes's part of
# the normal list is zeroed. However, these artifacts are rare.
# @note The <tt>#AI_CONFIG_PP_PTV_NORMALIZE<tt> configuration property
# can be set to normalize the scene's spatial dimension to the -1...1
# range.
#
aiProcess_PreTransformVertices = 0x100
## <hr>Limits the number of bones simultaneously affecting a single vertex
# to a maximum value.
#
# If any vertex is affected by more than the maximum number of bones, the least
# important vertex weights are removed and the remaining vertex weights are
# renormalized so that the weights still sum up to 1.
# The default bone weight limit is 4 (defined as <tt>#AI_LMW_MAX_WEIGHTS<tt> in
# config.h), but you can use the <tt>#AI_CONFIG_PP_LBW_MAX_WEIGHTS<tt> setting to
# supply your own limit to the post processing step.
#
# If you intend to perform the skinning in hardware, this post processing
# step might be of interest to you.
#
aiProcess_LimitBoneWeights = 0x200
## <hr>Validates the imported scene data structure.
# This makes sure that all indices are valid, all animations and
# bones are linked correctly, all material references are correct .. etc.
#
# It is recommended that you capture Assimp's log output if you use this flag,
# so you can easily find out what's wrong if a file fails the
# validation. The validator is quite strict and will find #all#
# inconsistencies in the data structure... It is recommended that plugin
# developers use it to debug their loaders. There are two types of
# validation failures:
# <ul>
# <li>Error: There's something wrong with the imported data. Further
# postprocessing is not possible and the data is not usable at all.
# The import fails. #Importer::GetErrorString() or #aiGetErrorString()
# carry the error message around.<li>
# <li>Warning: There are some minor issues (e.g. 1000000 animation
# keyframes with the same time), but further postprocessing and use
# of the data structure is still safe. Warning details are written
# to the log file, <tt>#AI_SCENE_FLAGS_VALIDATION_WARNING<tt> is set
# in #aiScene::mFlags<li>
# <ul>
#
# This post-processing step is not time-consuming. Its use is not
# compulsory, but recommended.
#
aiProcess_ValidateDataStructure = 0x400
## <hr>Reorders triangles for better vertex cache locality.
#
# The step tries to improve the ACMR (average post-transform vertex cache
# miss ratio) for all meshes. The implementation runs in O(n) and is
# roughly based on the 'tipsify' algorithm (see <a href="
# http:www.cs.princeton.edugfxpubsSander_2007_%3ETRtipsy.pdf">this
# paper<a>).
#
# If you intend to render huge models in hardware, this step might
# be of interest to you. The <tt>#AI_CONFIG_PP_ICL_PTCACHE_SIZE<tt>config
# setting can be used to fine-tune the cache optimization.
#
aiProcess_ImproveCacheLocality = 0x800
## <hr>Searches for redundantunreferenced materials and removes them.
#
# This is especially useful in combination with the
# #aiProcess_PretransformVertices and #aiProcess_OptimizeMeshes flags.
# Both join small meshes with equal characteristics, but they can't do
# their work if two meshes have different materials. Because several
# material settings are lost during Assimp's import filters,
# (and because many exporters don't check for redundant materials), huge
# models often have materials which are are defined several times with
# exactly the same settings.
#
# Several material settings not contributing to the final appearance of
# a surface are ignored in all comparisons (e.g. the material name).
# So, if you're passing additional information through the
# content pipeline (probably using #magic# material names), don't
# specify this flag. Alternatively take a look at the
# <tt>#AI_CONFIG_PP_RRM_EXCLUDE_LIST<tt> setting.
#
aiProcess_RemoveRedundantMaterials = 0x1000
## <hr>This step tries to determine which meshes have normal vectors
# that are facing inwards and inverts them.
#
# The algorithm is simple but effective:
# the bounding box of all vertices + their normals is compared against
# the volume of the bounding box of all vertices without their normals.
# This works well for most objects, problems might occur with planar
# surfaces. However, the step tries to filter such cases.
# The step inverts all in-facing normals. Generally it is recommended
# to enable this step, although the result is not always correct.
#
aiProcess_FixInfacingNormals = 0x2000
## <hr>This step splits meshes with more than one primitive type in
# homogeneous sub-meshes.
#
# The step is executed after the triangulation step. After the step
# returns, just one bit is set in aiMesh::mPrimitiveTypes. This is
# especially useful for real-time rendering where point and line
# primitives are often ignored or rendered separately.
# You can use the <tt>#AI_CONFIG_PP_SBP_REMOVE<tt> option to specify which
# primitive types you need. This can be used to easily exclude
# lines and points, which are rarely used, from the import.
#
aiProcess_SortByPType = 0x8000
## <hr>This step searches all meshes for degenerate primitives and
# converts them to proper lines or points.
#
# A face is 'degenerate' if one or more of its points are identical.
# To have the degenerate stuff not only detected and collapsed but
# removed, try one of the following procedures:
# <br><b>1.<b> (if you support lines and points for rendering but don't
# want the degenerates)<br>
# <ul>
# <li>Specify the #aiProcess_FindDegenerates flag.
# <li>
# <li>Set the <tt>AI_CONFIG_PP_FD_REMOVE<tt> option to 1. This will
# cause the step to remove degenerate triangles from the import
# as soon as they're detected. They won't pass any further
# pipeline steps.
# <li>
# <ul>
# <br><b>2.<b>(if you don't support lines and points at all)<br>
# <ul>
# <li>Specify the #aiProcess_FindDegenerates flag.
# <li>
# <li>Specify the #aiProcess_SortByPType flag. This moves line and
# point primitives to separate meshes.
# <li>
# <li>Set the <tt>AI_CONFIG_PP_SBP_REMOVE<tt> option to
# @code aiPrimitiveType_POINTS | aiPrimitiveType_LINES
# @endcode to cause SortByPType to reject point
# and line meshes from the scene.
# <li>
# <ul>
# @note Degenerate polygons are not necessarily evil and that's why
# they're not removed by default. There are several file formats which
# don't support lines or points, and some exporters bypass the
# format specification and write them as degenerate triangles instead.
#
aiProcess_FindDegenerates = 0x10000
## <hr>This step searches all meshes for invalid data, such as zeroed
# normal vectors or invalid UV coords and removesfixes them. This is
# intended to get rid of some common exporter errors.
#
# This is especially useful for normals. If they are invalid, and
# the step recognizes this, they will be removed and can later
# be recomputed, i.e. by the #aiProcess_GenSmoothNormals flag.<br>
# The step will also remove meshes that are infinitely small and reduce
# animation tracks consisting of hundreds if redundant keys to a single
# key. The <tt>AI_CONFIG_PP_FID_ANIM_ACCURACY<tt> config property decides
# the accuracy of the check for duplicate animation tracks.
#
aiProcess_FindInvalidData = 0x20000
## <hr>This step converts non-UV mappings (such as spherical or
# cylindrical mapping) to proper texture coordinate channels.
#
# Most applications will support UV mapping only, so you will
# probably want to specify this step in every case. Note that Assimp is not
# always able to match the original mapping implementation of the
# 3D app which produced a model perfectly. It's always better to let the
# modelling app compute the UV channels - 3ds max, Maya, Blender,
# LightWave, and Modo do this for example.
#
# @note If this step is not requested, you'll need to process the
# <tt>#AI_MATKEY_MAPPING<tt> material property in order to display all assets
# properly.
#
aiProcess_GenUVCoords = 0x40000
## <hr>This step applies per-texture UV transformations and bakes
# them into stand-alone vtexture coordinate channels.
#
# UV transformations are specified per-texture - see the
# <tt>#AI_MATKEY_UVTRANSFORM<tt> material key for more information.
# This step processes all textures with
# transformed input UV coordinates and generates a new (pre-transformed) UV channel
# which replaces the old channel. Most applications won't support UV
# transformations, so you will probably want to specify this step.
#
# @note UV transformations are usually implemented in real-time apps by
# transforming texture coordinates at vertex shader stage with a 3x3
# (homogenous) transformation matrix.
#
aiProcess_TransformUVCoords = 0x80000
## <hr>This step searches for duplicate meshes and replaces them
# with references to the first mesh.
#
# This step takes a while, so don't use it if speed is a concern.
# Its main purpose is to workaround the fact that many export
# file formats don't support instanced meshes, so exporters need to
# duplicate meshes. This step removes the duplicates again. Please
# note that Assimp does not currently support per-node material
# assignment to meshes, which means that identical meshes with
# different materials are currently #not# joined, although this is
# planned for future versions.
#
aiProcess_FindInstances = 0x100000
## <hr>A postprocessing step to reduce the number of meshes.
#
# This will, in fact, reduce the number of draw calls.
#
# This is a very effective optimization and is recommended to be used
# together with #aiProcess_OptimizeGraph, if possible. The flag is fully
# compatible with both #aiProcess_SplitLargeMeshes and #aiProcess_SortByPType.
#
aiProcess_OptimizeMeshes = 0x200000
## <hr>A postprocessing step to optimize the scene hierarchy.
#
# Nodes without animations, bones, lights or cameras assigned are
# collapsed and joined.
#
# Node names can be lost during this step. If you use special 'tag nodes'
# to pass additional information through your content pipeline, use the
# <tt>#AI_CONFIG_PP_OG_EXCLUDE_LIST<tt> setting to specify a list of node
# names you want to be kept. Nodes matching one of the names in this list won't
# be touched or modified.
#
# Use this flag with caution. Most simple files will be collapsed to a
# single node, so complex hierarchies are usually completely lost. This is not
# useful for editor environments, but probably a very effective
# optimization if you just want to get the model data, convert it to your
# own format, and render it as fast as possible.
#
# This flag is designed to be used with #aiProcess_OptimizeMeshes for best
# results.
#
# @note 'Crappy' scenes with thousands of extremely small meshes packed
# in deeply nested nodes exist for almost all file formats.
# #aiProcess_OptimizeMeshes in combination with #aiProcess_OptimizeGraph
# usually fixes them all and makes them renderable.
#
aiProcess_OptimizeGraph = 0x400000
## <hr>This step flips all UV coordinates along the y-axis and adjusts
# material settings and bitangents accordingly.
#
# <b>Output UV coordinate system:<b>
# @code
# 0y|0y ---------- 1x|0y
# | |
# | |
# | |
# 0x|1y ---------- 1x|1y
# @endcode
#
# You'll probably want to consider this flag if you use Direct3D for
# rendering. The #aiProcess_ConvertToLeftHanded flag supersedes this
# setting and bundles all conversions typically required for D3D-based
# applications.
#
aiProcess_FlipUVs = 0x800000
## <hr>This step adjusts the output face winding order to be CW.
#
# The default face winding order is counter clockwise (CCW).
#
# <b>Output face order:<b>
# @code
# x2
#
# x0
# x1
# @endcode
#
aiProcess_FlipWindingOrder = 0x1000000
## <hr>This step splits meshes with many bones into sub-meshes so that each
# su-bmesh has fewer or as many bones as a given limit.
#
aiProcess_SplitByBoneCount = 0x2000000
## <hr>This step removes bones losslessly or according to some threshold.
#
# In some cases (i.e. formats that require it) exporters are forced to
# assign dummy bone weights to otherwise static meshes assigned to
# animated meshes. Full, weight-based skinning is expensive while
# animating nodes is extremely cheap, so this step is offered to clean up
# the data in that regard.
#
# Use <tt>#AI_CONFIG_PP_DB_THRESHOLD<tt> to control this.
# Use <tt>#AI_CONFIG_PP_DB_ALL_OR_NONE<tt> if you want bones removed if and
# only if all bones within the scene qualify for removal.
#
aiProcess_Debone = 0x4000000
aiProcess_GenEntityMeshes = 0x100000
aiProcess_OptimizeAnimations = 0x200000
aiProcess_FixTexturePaths = 0x200000
aiProcess_EmbedTextures = 0x10000000,
## @def aiProcess_ConvertToLeftHanded
# @brief Shortcut flag for Direct3D-based applications.
#
# Supersedes the #aiProcess_MakeLeftHanded and #aiProcess_FlipUVs and
# #aiProcess_FlipWindingOrder flags.
# The output data matches Direct3D's conventions: left-handed geometry, upper-left
# origin for UV coordinates and finally clockwise face order, suitable for CCW culling.
#
# @deprecated
#
aiProcess_ConvertToLeftHanded = ( \
aiProcess_MakeLeftHanded | \
aiProcess_FlipUVs | \
aiProcess_FlipWindingOrder | \
0 )
## @def aiProcessPreset_TargetRealtimeUse_Fast
# @brief Default postprocess configuration optimizing the data for real-time rendering.
#
# Applications would want to use this preset to load models on end-user PCs,
# maybe for direct use in game.
#
# If you're using DirectX, don't forget to combine this value with
# the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
# in your application apply the #aiProcess_TransformUVCoords step, too.
# @note Please take the time to read the docs for the steps enabled by this preset.
# Some of them offer further configurable properties, while some of them might not be of
# use for you so it might be better to not specify them.
#
aiProcessPreset_TargetRealtime_Fast = ( \
aiProcess_CalcTangentSpace | \
aiProcess_GenNormals | \
aiProcess_JoinIdenticalVertices | \
aiProcess_Triangulate | \
aiProcess_GenUVCoords | \
aiProcess_SortByPType | \
0 )
## @def aiProcessPreset_TargetRealtime_Quality
# @brief Default postprocess configuration optimizing the data for real-time rendering.
#
# Unlike #aiProcessPreset_TargetRealtime_Fast, this configuration
# performs some extra optimizations to improve rendering speed and
# to minimize memory usage. It could be a good choice for a level editor
# environment where import speed is not so important.
#
# If you're using DirectX, don't forget to combine this value with
# the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
# in your application apply the #aiProcess_TransformUVCoords step, too.
# @note Please take the time to read the docs for the steps enabled by this preset.
# Some of them offer further configurable properties, while some of them might not be
# of use for you so it might be better to not specify them.
#
aiProcessPreset_TargetRealtime_Quality = ( \
aiProcess_CalcTangentSpace | \
aiProcess_GenSmoothNormals | \
aiProcess_JoinIdenticalVertices | \
aiProcess_ImproveCacheLocality | \
aiProcess_LimitBoneWeights | \
aiProcess_RemoveRedundantMaterials | \
aiProcess_SplitLargeMeshes | \
aiProcess_Triangulate | \
aiProcess_GenUVCoords | \
aiProcess_SortByPType | \
aiProcess_FindDegenerates | \
aiProcess_FindInvalidData | \
0 )
## @def aiProcessPreset_TargetRealtime_MaxQuality
# @brief Default postprocess configuration optimizing the data for real-time rendering.
#
# This preset enables almost every optimization step to achieve perfectly
# optimized data. It's your choice for level editor environments where import speed
# is not important.
#
# If you're using DirectX, don't forget to combine this value with
# the #aiProcess_ConvertToLeftHanded step. If you don't support UV transformations
# in your application, apply the #aiProcess_TransformUVCoords step, too.
# @note Please take the time to read the docs for the steps enabled by this preset.
# Some of them offer further configurable properties, while some of them might not be
# of use for you so it might be better to not specify them.
#
aiProcessPreset_TargetRealtime_MaxQuality = ( \
aiProcessPreset_TargetRealtime_Quality | \
aiProcess_FindInstances | \
aiProcess_ValidateDataStructure | \
aiProcess_OptimizeMeshes | \
0 )
| mit |
chengdh/openerp-ktv | openerp/pychart/line_style.py | 15 | 5786 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2000-2005 by Yasushi Saito ([email protected])
#
# Jockey is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any
# later version.
#
# Jockey is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
import color
import pychart_util
import chart_object
import object_set
import theme
import line_style_doc
from pychart_types import *
from types import *
_keys = {
"width" : (UnitType, theme.default_line_width, "Width of the line, in points."),
"color": (color.T, color.default, "The color of the line."),
"dash" : (TupleType, None,
"""The value
of None will draw a solid line. Otherwise, this
attribute specifies the style of dashed lines.
The 2N'th value specifies the length of the line (in points),
and 2N+1'th value specifies the length of the blank.
For example, the dash style of (3,2,4,1) draws a dashed line that
looks like @samp{---__----_---__----_...}.
"""),
"cap_style": (IntType, 0,
"""Defines the style of the tip of the line segment.
0: butt cap (square cutoff, with no projection beyond),
1: round cap (arc), 2: projecting square cap
(square cutoff, but the line extends half the line width).
See also Postscript/PDF reference manual."""),
"join_style": (IntType, 0,
"""Join style. 0: Miter join (sharp, pointed corners),
1: round join (rounded corners),
2: bevel join (flattened corners).
See also Postscript/PDF reference manual.""")
}
class T(chart_object.T):
__doc__ = line_style_doc.doc
keys = _keys
##AUTOMATICALLY GENERATED
##END AUTOMATICALLY GENERATED
def __str__(self):
s = name_table().lookup(self)
if s:
return s
return "<linestyle: width=%s, color=%s, dash=%s, cap=%d, join=%d>" \
% (self.width, self.color, self.dash, self.cap_style, self.join_style)
default = T(color=color.default)
dash1 = 1.5,1.5 # - - - -
dash2 = 5,2,5,2 # -- -- -- --
dash3 = 1,1
black = T(color=color.black)
black_dash1 = T(color=color.black, dash=dash1)
black_dash2 = T(color=color.black, dash=dash2)
black_dash3 = T(color=color.black, dash=dash3)
gray70 = T(color=color.gray70)
gray70_dash1 = T(color=color.gray70, dash=dash1)
gray70_dash2 = T(color=color.gray70, dash=dash2)
gray70_dash3 = T(color=color.gray70, dash=dash3)
gray10 = T(color=color.gray10)
gray10_dash1 = T(color=color.gray10, dash=dash1)
gray10_dash2 = T(color=color.gray10, dash=dash2)
gray10_dash3 = T(color=color.gray10, dash=dash3)
gray50 = T(color=color.gray50)
gray50_dash1 = T(color=color.gray50, dash=dash1)
gray50_dash2 = T(color=color.gray50, dash=dash2)
gray50_dash3 = T(color=color.gray50, dash=dash3)
gray60 = T(color=color.gray60)
gray60_dash1 = T(color=color.gray60, dash=dash1)
gray60_dash2 = T(color=color.gray60, dash=dash2)
gray60_dash3 = T(color=color.gray60, dash=dash3)
gray90 = T(color=color.gray90)
gray90_dash1 = T(color=color.gray90, dash=dash1)
gray90_dash2 = T(color=color.gray90, dash=dash2)
gray90_dash3 = T(color=color.gray90, dash=dash3)
gray30 = T(color=color.gray30)
gray30_dash1 = T(color=color.gray30, dash=dash1)
gray30_dash2 = T(color=color.gray30, dash=dash2)
gray30_dash3 = T(color=color.gray30, dash=dash3)
white = T(color=color.white)
default = black
red = T(color=color.red)
darkblue = T(color=color.darkblue)
darkseagreen = T(color=color.darkseagreen)
darkkhaki = T(color = color.darkkhaki)
blue = T(color=color.blue)
green = T(color=color.green)
red_dash1 = T(color=color.red, dash=dash1)
darkblue_dash1 = T(color=color.darkblue, dash=dash1)
darkseagreen_dash1 = T(color=color.darkseagreen, dash=dash1)
darkkhaki_dash1 = T(color=color.darkkhaki, dash=dash1)
red_dash2 = T(color=color.red, dash=dash2)
darkblue_dash2 = T(color=color.darkblue, dash=dash2)
darkseagreen_dash2 = T(color=color.darkseagreen, dash=dash2)
darkkhaki_dash2 = T(color=color.darkkhaki, dash=dash2)
standards = None
_name_table = None
def init():
global standards, _name_table
standards = object_set.T()
if theme.use_color:
standards.add(black, red, darkblue, gray70, darkseagreen,
darkkhaki, gray30,
black_dash1, red_dash1, darkblue_dash1, gray70_dash1,
darkseagreen_dash1, darkkhaki_dash1, gray30_dash1,
black_dash2, red_dash2, darkblue_dash2, gray70_dash2,
darkseagreen_dash2, darkkhaki_dash2, gray30_dash2)
else:
standards.add(black, black_dash1, black_dash2,
gray70, gray70_dash1, gray70_dash2,
gray10, gray10_dash1, gray10_dash2,
gray50, gray50_dash1, gray50_dash2,
gray90, gray90_dash1, gray90_dash2,
gray30, gray30_dash1, gray30_dash2,
black_dash3,
gray70_dash3, gray10_dash3, gray50_dash3, gray90_dash3)
for style in standards.list():
style.width = theme.default_line_width
_name_table = None
def name_table():
global _name_table
if not _name_table:
_name_table = pychart_util.symbol_lookup_table(globals(), standards)
return _name_table
init()
theme.add_reinitialization_hook(init)
| agpl-3.0 |
py4a/peewee | playhouse/read_slave.py | 25 | 1449 | """
Support for using a dedicated read-slave. The read database is specified as a
Model.Meta option, and will be used for SELECT statements:
master = PostgresqlDatabase('master')
read_slave = PostgresqlDatabase('read_slave')
class BaseModel(ReadSlaveModel):
class Meta:
database = master
read_slaves = [read_slave] # This database will be used for SELECTs.
# Now define your models as you would normally.
class User(BaseModel):
username = CharField()
# To force a SELECT on the master database, you can instantiate the SelectQuery
# by hand:
master_select = SelectQuery(User).where(...)
"""
from peewee import *
class ReadSlaveModel(Model):
@classmethod
def _get_read_database(cls):
if not getattr(cls._meta, 'read_slaves', None):
return cls._meta.database
current_idx = getattr(cls, '_read_slave_idx', -1)
cls._read_slave_idx = (current_idx + 1) % len(cls._meta.read_slaves)
return cls._meta.read_slaves[cls._read_slave_idx]
@classmethod
def select(cls, *args, **kwargs):
query = super(ReadSlaveModel, cls).select(*args, **kwargs)
query.database = cls._get_read_database()
return query
@classmethod
def raw(cls, *args, **kwargs):
query = super(ReadSlaveModel, cls).raw(*args, **kwargs)
if query._sql.lower().startswith('select'):
query.database = cls._get_read_database()
return query
| mit |
georgeke/caravel | caravel/data/countries.py | 20 | 51525 | """This module contains data related to countries and is used for geo mapping"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
countries = [
{
"name": "Angola",
"area": 1246700,
"cioc": "ANG",
"cca2": "AO",
"capital": "Luanda",
"lat": -12.5,
"lng": 18.5,
"cca3": "AGO"
},
{
"name": "Algeria",
"area": 2381741,
"cioc": "ALG",
"cca2": "DZ",
"capital": "Algiers",
"lat": 28,
"lng": 3,
"cca3": "DZA"
},
{
"name": "Egypt",
"area": 1002450,
"cioc": "EGY",
"cca2": "EG",
"capital": "Cairo",
"lat": 27,
"lng": 30,
"cca3": "EGY"
},
{
"name": "Bangladesh",
"area": 147570,
"cioc": "BAN",
"cca2": "BD",
"capital": "Dhaka",
"lat": 24,
"lng": 90,
"cca3": "BGD"
},
{
"name": "Niger",
"area": 1267000,
"cioc": "NIG",
"cca2": "NE",
"capital": "Niamey",
"lat": 16,
"lng": 8,
"cca3": "NER"
},
{
"name": "Liechtenstein",
"area": 160,
"cioc": "LIE",
"cca2": "LI",
"capital": "Vaduz",
"lat": 47.26666666,
"lng": 9.53333333,
"cca3": "LIE"
},
{
"name": "Namibia",
"area": 825615,
"cioc": "NAM",
"cca2": "NA",
"capital": "Windhoek",
"lat": -22,
"lng": 17,
"cca3": "NAM"
},
{
"name": "Bulgaria",
"area": 110879,
"cioc": "BUL",
"cca2": "BG",
"capital": "Sofia",
"lat": 43,
"lng": 25,
"cca3": "BGR"
},
{
"name": "Bolivia",
"area": 1098581,
"cioc": "BOL",
"cca2": "BO",
"capital": "Sucre",
"lat": -17,
"lng": -65,
"cca3": "BOL"
},
{
"name": "Ghana",
"area": 238533,
"cioc": "GHA",
"cca2": "GH",
"capital": "Accra",
"lat": 8,
"lng": -2,
"cca3": "GHA"
},
{
"name": "Cocos (Keeling) Islands",
"area": 14,
"cioc": "",
"cca2": "CC",
"capital": "West Island",
"lat": -12.5,
"lng": 96.83333333,
"cca3": "CCK"
},
{
"name": "Pakistan",
"area": 881912,
"cioc": "PAK",
"cca2": "PK",
"capital": "Islamabad",
"lat": 30,
"lng": 70,
"cca3": "PAK"
},
{
"name": "Cape Verde",
"area": 4033,
"cioc": "CPV",
"cca2": "CV",
"capital": "Praia",
"lat": 16,
"lng": -24,
"cca3": "CPV"
},
{
"name": "Jordan",
"area": 89342,
"cioc": "JOR",
"cca2": "JO",
"capital": "Amman",
"lat": 31,
"lng": 36,
"cca3": "JOR"
},
{
"name": "Liberia",
"area": 111369,
"cioc": "LBR",
"cca2": "LR",
"capital": "Monrovia",
"lat": 6.5,
"lng": -9.5,
"cca3": "LBR"
},
{
"name": "Libya",
"area": 1759540,
"cioc": "LBA",
"cca2": "LY",
"capital": "Tripoli",
"lat": 25,
"lng": 17,
"cca3": "LBY"
},
{
"name": "Malaysia",
"area": 330803,
"cioc": "MAS",
"cca2": "MY",
"capital": "Kuala Lumpur",
"lat": 2.5,
"lng": 112.5,
"cca3": "MYS"
},
{
"name": "Dominican Republic",
"area": 48671,
"cioc": "DOM",
"cca2": "DO",
"capital": "Santo Domingo",
"lat": 19,
"lng": -70.66666666,
"cca3": "DOM"
},
{
"name": "Puerto Rico",
"area": 8870,
"cioc": "PUR",
"cca2": "PR",
"capital": "San Juan",
"lat": 18.25,
"lng": -66.5,
"cca3": "PRI"
},
{
"name": "Mayotte",
"area": 374,
"cioc": "",
"cca2": "YT",
"capital": "Mamoudzou",
"lat": -12.83333333,
"lng": 45.16666666,
"cca3": "MYT"
},
{
"name": "North Korea",
"area": 120538,
"cioc": "PRK",
"cca2": "KP",
"capital": "Pyongyang",
"lat": 40,
"lng": 127,
"cca3": "PRK"
},
{
"name": "Palestine",
"area": 6220,
"cioc": "PLE",
"cca2": "PS",
"capital": "Ramallah",
"lat": 31.9,
"lng": 35.2,
"cca3": "PSE"
},
{
"name": "Tanzania",
"area": 945087,
"cioc": "TAN",
"cca2": "TZ",
"capital": "Dodoma",
"lat": -6,
"lng": 35,
"cca3": "TZA"
},
{
"name": "Botswana",
"area": 582000,
"cioc": "BOT",
"cca2": "BW",
"capital": "Gaborone",
"lat": -22,
"lng": 24,
"cca3": "BWA"
},
{
"name": "Cambodia",
"area": 181035,
"cioc": "CAM",
"cca2": "KH",
"capital": "Phnom Penh",
"lat": 13,
"lng": 105,
"cca3": "KHM"
},
{
"name": "Nicaragua",
"area": 130373,
"cioc": "NCA",
"cca2": "NI",
"capital": "Managua",
"lat": 13,
"lng": -85,
"cca3": "NIC"
},
{
"name": "Trinidad and Tobago",
"area": 5130,
"cioc": "TTO",
"cca2": "TT",
"capital": "Port of Spain",
"lat": 11,
"lng": -61,
"cca3": "TTO"
},
{
"name": "Ethiopia",
"area": 1104300,
"cioc": "ETH",
"cca2": "ET",
"capital": "Addis Ababa",
"lat": 8,
"lng": 38,
"cca3": "ETH"
},
{
"name": "Paraguay",
"area": 406752,
"cioc": "PAR",
"cca2": "PY",
"capital": "Asuncion",
"lat": -23,
"lng": -58,
"cca3": "PRY"
},
{
"name": "Hong Kong",
"area": 1104,
"cioc": "HKG",
"cca2": "HK",
"capital": "City of Victoria",
"lat": 22.267,
"lng": 114.188,
"cca3": "HKG"
},
{
"name": "Saudi Arabia",
"area": 2149690,
"cioc": "KSA",
"cca2": "SA",
"capital": "Riyadh",
"lat": 25,
"lng": 45,
"cca3": "SAU"
},
{
"name": "Lebanon",
"area": 10452,
"cioc": "LIB",
"cca2": "LB",
"capital": "Beirut",
"lat": 33.83333333,
"lng": 35.83333333,
"cca3": "LBN"
},
{
"name": "Slovenia",
"area": 20273,
"cioc": "SLO",
"cca2": "SI",
"capital": "Ljubljana",
"lat": 46.11666666,
"lng": 14.81666666,
"cca3": "SVN"
},
{
"name": "Burkina Faso",
"area": 272967,
"cioc": "BUR",
"cca2": "BF",
"capital": "Ouagadougou",
"lat": 13,
"lng": -2,
"cca3": "BFA"
},
{
"name": "Switzerland",
"area": 41284,
"cioc": "SUI",
"cca2": "CH",
"capital": "Bern",
"lat": 47,
"lng": 8,
"cca3": "CHE"
},
{
"name": "Mauritania",
"area": 1030700,
"cioc": "MTN",
"cca2": "MR",
"capital": "Nouakchott",
"lat": 20,
"lng": -12,
"cca3": "MRT"
},
{
"name": "Croatia",
"area": 56594,
"cioc": "CRO",
"cca2": "HR",
"capital": "Zagreb",
"lat": 45.16666666,
"lng": 15.5,
"cca3": "HRV"
},
{
"name": "Chile",
"area": 756102,
"cioc": "CHI",
"cca2": "CL",
"capital": "Santiago",
"lat": -30,
"lng": -71,
"cca3": "CHL"
},
{
"name": "China",
"area": 9706961,
"cioc": "CHN",
"cca2": "CN",
"capital": "Beijing",
"lat": 35,
"lng": 105,
"cca3": "CHN"
},
{
"name": "Saint Kitts and Nevis",
"area": 261,
"cioc": "SKN",
"cca2": "KN",
"capital": "Basseterre",
"lat": 17.33333333,
"lng": -62.75,
"cca3": "KNA"
},
{
"name": "Sierra Leone",
"area": 71740,
"cioc": "SLE",
"cca2": "SL",
"capital": "Freetown",
"lat": 8.5,
"lng": -11.5,
"cca3": "SLE"
},
{
"name": "Jamaica",
"area": 10991,
"cioc": "JAM",
"cca2": "JM",
"capital": "Kingston",
"lat": 18.25,
"lng": -77.5,
"cca3": "JAM"
},
{
"name": "San Marino",
"area": 61,
"cioc": "SMR",
"cca2": "SM",
"capital": "City of San Marino",
"lat": 43.76666666,
"lng": 12.41666666,
"cca3": "SMR"
},
{
"name": "Gibraltar",
"area": 6,
"cioc": "",
"cca2": "GI",
"capital": "Gibraltar",
"lat": 36.13333333,
"lng": -5.35,
"cca3": "GIB"
},
{
"name": "Djibouti",
"area": 23200,
"cioc": "DJI",
"cca2": "DJ",
"capital": "Djibouti",
"lat": 11.5,
"lng": 43,
"cca3": "DJI"
},
{
"name": "Guinea",
"area": 245857,
"cioc": "GUI",
"cca2": "GN",
"capital": "Conakry",
"lat": 11,
"lng": -10,
"cca3": "GIN"
},
{
"name": "Finland",
"area": 338424,
"cioc": "FIN",
"cca2": "FI",
"capital": "Helsinki",
"lat": 64,
"lng": 26,
"cca3": "FIN"
},
{
"name": "Uruguay",
"area": 181034,
"cioc": "URU",
"cca2": "UY",
"capital": "Montevideo",
"lat": -33,
"lng": -56,
"cca3": "URY"
},
{
"name": "Thailand",
"area": 513120,
"cioc": "THA",
"cca2": "TH",
"capital": "Bangkok",
"lat": 15,
"lng": 100,
"cca3": "THA"
},
{
"name": "Sao Tome and Principe",
"area": 964,
"cioc": "STP",
"cca2": "ST",
"capital": "Sao Tome",
"lat": 1,
"lng": 7,
"cca3": "STP"
},
{
"name": "Seychelles",
"area": 452,
"cioc": "SEY",
"cca2": "SC",
"capital": "Victoria",
"lat": -4.58333333,
"lng": 55.66666666,
"cca3": "SYC"
},
{
"name": "Nepal",
"area": 147181,
"cioc": "NEP",
"cca2": "NP",
"capital": "Kathmandu",
"lat": 28,
"lng": 84,
"cca3": "NPL"
},
{
"name": "Christmas Island",
"area": 135,
"cioc": "",
"cca2": "CX",
"capital": "Flying Fish Cove",
"lat": -10.5,
"lng": 105.66666666,
"cca3": "CXR"
},
{
"name": "Laos",
"area": 236800,
"cioc": "LAO",
"cca2": "LA",
"capital": "Vientiane",
"lat": 18,
"lng": 105,
"cca3": "LAO"
},
{
"name": "Yemen",
"area": 527968,
"cioc": "YEM",
"cca2": "YE",
"capital": "Sana'a",
"lat": 15,
"lng": 48,
"cca3": "YEM"
},
{
"name": "Bouvet Island",
"area": 49,
"cioc": "",
"cca2": "BV",
"capital": "",
"lat": -54.43333333,
"lng": 3.4,
"cca3": "BVT"
},
{
"name": "South Africa",
"area": 1221037,
"cioc": "RSA",
"cca2": "ZA",
"capital": "Pretoria",
"lat": -29,
"lng": 24,
"cca3": "ZAF"
},
{
"name": "Kiribati",
"area": 811,
"cioc": "KIR",
"cca2": "KI",
"capital": "South Tarawa",
"lat": 1.41666666,
"lng": 173,
"cca3": "KIR"
},
{
"name": "Philippines",
"area": 342353,
"cioc": "PHI",
"cca2": "PH",
"capital": "Manila",
"lat": 13,
"lng": 122,
"cca3": "PHL"
},
{
"name": "Sint Maarten",
"area": 34,
"cioc": "",
"cca2": "SX",
"capital": "Philipsburg",
"lat": 18.033333,
"lng": -63.05,
"cca3": "SXM"
},
{
"name": "Romania",
"area": 238391,
"cioc": "ROU",
"cca2": "RO",
"capital": "Bucharest",
"lat": 46,
"lng": 25,
"cca3": "ROU"
},
{
"name": "United States Virgin Islands",
"area": 347,
"cioc": "ISV",
"cca2": "VI",
"capital": "Charlotte Amalie",
"lat": 18.35,
"lng": -64.933333,
"cca3": "VIR"
},
{
"name": "Syria",
"area": 185180,
"cioc": "SYR",
"cca2": "SY",
"capital": "Damascus",
"lat": 35,
"lng": 38,
"cca3": "SYR"
},
{
"name": "Macau",
"area": 30,
"cioc": "",
"cca2": "MO",
"capital": "",
"lat": 22.16666666,
"lng": 113.55,
"cca3": "MAC"
},
{
"name": "Saint Martin",
"area": 53,
"cioc": "",
"cca2": "MF",
"capital": "Marigot",
"lat": 18.08333333,
"lng": -63.95,
"cca3": "MAF"
},
{
"name": "Malta",
"area": 316,
"cioc": "MLT",
"cca2": "MT",
"capital": "Valletta",
"lat": 35.83333333,
"lng": 14.58333333,
"cca3": "MLT"
},
{
"name": "Kazakhstan",
"area": 2724900,
"cioc": "KAZ",
"cca2": "KZ",
"capital": "Astana",
"lat": 48,
"lng": 68,
"cca3": "KAZ"
},
{
"name": "Turks and Caicos Islands",
"area": 948,
"cioc": "",
"cca2": "TC",
"capital": "Cockburn Town",
"lat": 21.75,
"lng": -71.58333333,
"cca3": "TCA"
},
{
"name": "French Polynesia",
"area": 4167,
"cioc": "",
"cca2": "PF",
"capital": "Papeete",
"lat": -15,
"lng": -140,
"cca3": "PYF"
},
{
"name": "Niue",
"area": 260,
"cioc": "",
"cca2": "NU",
"capital": "Alofi",
"lat": -19.03333333,
"lng": -169.86666666,
"cca3": "NIU"
},
{
"name": "Dominica",
"area": 751,
"cioc": "DMA",
"cca2": "DM",
"capital": "Roseau",
"lat": 15.41666666,
"lng": -61.33333333,
"cca3": "DMA"
},
{
"name": "Benin",
"area": 112622,
"cioc": "BEN",
"cca2": "BJ",
"capital": "Porto-Novo",
"lat": 9.5,
"lng": 2.25,
"cca3": "BEN"
},
{
"name": "French Guiana",
"area": 83534,
"cioc": "",
"cca2": "GF",
"capital": "Cayenne",
"lat": 4,
"lng": -53,
"cca3": "GUF"
},
{
"name": "Belgium",
"area": 30528,
"cioc": "BEL",
"cca2": "BE",
"capital": "Brussels",
"lat": 50.83333333,
"lng": 4,
"cca3": "BEL"
},
{
"name": "Montserrat",
"area": 102,
"cioc": "",
"cca2": "MS",
"capital": "Plymouth",
"lat": 16.75,
"lng": -62.2,
"cca3": "MSR"
},
{
"name": "Togo",
"area": 56785,
"cioc": "TOG",
"cca2": "TG",
"capital": "Lome",
"lat": 8,
"lng": 1.16666666,
"cca3": "TGO"
},
{
"name": "Germany",
"area": 357114,
"cioc": "GER",
"cca2": "DE",
"capital": "Berlin",
"lat": 51,
"lng": 9,
"cca3": "DEU"
},
{
"name": "Guam",
"area": 549,
"cioc": "GUM",
"cca2": "GU",
"capital": "Hagatna",
"lat": 13.46666666,
"lng": 144.78333333,
"cca3": "GUM"
},
{
"name": "Sri Lanka",
"area": 65610,
"cioc": "SRI",
"cca2": "LK",
"capital": "Colombo",
"lat": 7,
"lng": 81,
"cca3": "LKA"
},
{
"name": "South Sudan",
"area": 619745,
"cioc": "",
"cca2": "SS",
"capital": "Juba",
"lat": 7,
"lng": 30,
"cca3": "SSD"
},
{
"name": "Falkland Islands",
"area": 12173,
"cioc": "",
"cca2": "FK",
"capital": "Stanley",
"lat": -51.75,
"lng": -59,
"cca3": "FLK"
},
{
"name": "United Kingdom",
"area": 242900,
"cioc": "GBR",
"cca2": "GB",
"capital": "London",
"lat": 54,
"lng": -2,
"cca3": "GBR"
},
{
"name": "Guyana",
"area": 214969,
"cioc": "GUY",
"cca2": "GY",
"capital": "Georgetown",
"lat": 5,
"lng": -59,
"cca3": "GUY"
},
{
"name": "Costa Rica",
"area": 51100,
"cioc": "CRC",
"cca2": "CR",
"capital": "San Jose",
"lat": 10,
"lng": -84,
"cca3": "CRI"
},
{
"name": "Cameroon",
"area": 475442,
"cioc": "CMR",
"cca2": "CM",
"capital": "Yaounde",
"lat": 6,
"lng": 12,
"cca3": "CMR"
},
{
"name": "Morocco",
"area": 446550,
"cioc": "MAR",
"cca2": "MA",
"capital": "Rabat",
"lat": 32,
"lng": -5,
"cca3": "MAR"
},
{
"name": "Northern Mariana Islands",
"area": 464,
"cioc": "",
"cca2": "MP",
"capital": "Saipan",
"lat": 15.2,
"lng": 145.75,
"cca3": "MNP"
},
{
"name": "Lesotho",
"area": 30355,
"cioc": "LES",
"cca2": "LS",
"capital": "Maseru",
"lat": -29.5,
"lng": 28.5,
"cca3": "LSO"
},
{
"name": "Hungary",
"area": 93028,
"cioc": "HUN",
"cca2": "HU",
"capital": "Budapest",
"lat": 47,
"lng": 20,
"cca3": "HUN"
},
{
"name": "Turkmenistan",
"area": 488100,
"cioc": "TKM",
"cca2": "TM",
"capital": "Ashgabat",
"lat": 40,
"lng": 60,
"cca3": "TKM"
},
{
"name": "Suriname",
"area": 163820,
"cioc": "SUR",
"cca2": "SR",
"capital": "Paramaribo",
"lat": 4,
"lng": -56,
"cca3": "SUR"
},
{
"name": "Netherlands",
"area": 41850,
"cioc": "NED",
"cca2": "NL",
"capital": "Amsterdam",
"lat": 52.5,
"lng": 5.75,
"cca3": "NLD"
},
{
"name": "Bermuda",
"area": 54,
"cioc": "BER",
"cca2": "BM",
"capital": "Hamilton",
"lat": 32.33333333,
"lng": -64.75,
"cca3": "BMU"
},
{
"name": "Heard Island and McDonald Islands",
"area": 412,
"cioc": "",
"cca2": "HM",
"capital": "",
"lat": -53.1,
"lng": 72.51666666,
"cca3": "HMD"
},
{
"name": "Chad",
"area": 1284000,
"cioc": "CHA",
"cca2": "TD",
"capital": "N'Djamena",
"lat": 15,
"lng": 19,
"cca3": "TCD"
},
{
"name": "Georgia",
"area": 69700,
"cioc": "GEO",
"cca2": "GE",
"capital": "Tbilisi",
"lat": 42,
"lng": 43.5,
"cca3": "GEO"
},
{
"name": "Montenegro",
"area": 13812,
"cioc": "MNE",
"cca2": "ME",
"capital": "Podgorica",
"lat": 42.5,
"lng": 19.3,
"cca3": "MNE"
},
{
"name": "Mongolia",
"area": 1564110,
"cioc": "MGL",
"cca2": "MN",
"capital": "Ulan Bator",
"lat": 46,
"lng": 105,
"cca3": "MNG"
},
{
"name": "Marshall Islands",
"area": 181,
"cioc": "MHL",
"cca2": "MH",
"capital": "Majuro",
"lat": 9,
"lng": 168,
"cca3": "MHL"
},
{
"name": "Martinique",
"area": 1128,
"cioc": "",
"cca2": "MQ",
"capital": "Fort-de-France",
"lat": 14.666667,
"lng": -61,
"cca3": "MTQ"
},
{
"name": "Belize",
"area": 22966,
"cioc": "BIZ",
"cca2": "BZ",
"capital": "Belmopan",
"lat": 17.25,
"lng": -88.75,
"cca3": "BLZ"
},
{
"name": "Norfolk Island",
"area": 36,
"cioc": "",
"cca2": "NF",
"capital": "Kingston",
"lat": -29.03333333,
"lng": 167.95,
"cca3": "NFK"
},
{
"name": "Myanmar",
"area": 676578,
"cioc": "MYA",
"cca2": "MM",
"capital": "Naypyidaw",
"lat": 22,
"lng": 98,
"cca3": "MMR"
},
{
"name": "Afghanistan",
"area": 652230,
"cioc": "AFG",
"cca2": "AF",
"capital": "Kabul",
"lat": 33,
"lng": 65,
"cca3": "AFG"
},
{
"name": "Burundi",
"area": 27834,
"cioc": "BDI",
"cca2": "BI",
"capital": "Bujumbura",
"lat": -3.5,
"lng": 30,
"cca3": "BDI"
},
{
"name": "British Virgin Islands",
"area": 151,
"cioc": "IVB",
"cca2": "VG",
"capital": "Road Town",
"lat": 18.431383,
"lng": -64.62305,
"cca3": "VGB"
},
{
"name": "Belarus",
"area": 207600,
"cioc": "BLR",
"cca2": "BY",
"capital": "Minsk",
"lat": 53,
"lng": 28,
"cca3": "BLR"
},
{
"name": "Saint Barthelemy",
"area": 21,
"cioc": "",
"cca2": "BL",
"capital": "Gustavia",
"lat": 18.5,
"lng": -63.41666666,
"cca3": "BLM"
},
{
"name": "Grenada",
"area": 344,
"cioc": "GRN",
"cca2": "GD",
"capital": "St. George's",
"lat": 12.11666666,
"lng": -61.66666666,
"cca3": "GRD"
},
{
"name": "Tokelau",
"area": 12,
"cioc": "",
"cca2": "TK",
"capital": "Fakaofo",
"lat": -9,
"lng": -172,
"cca3": "TKL"
},
{
"name": "Greece",
"area": 131990,
"cioc": "GRE",
"cca2": "GR",
"capital": "Athens",
"lat": 39,
"lng": 22,
"cca3": "GRC"
},
{
"name": "Russia",
"area": 17098242,
"cioc": "RUS",
"cca2": "RU",
"capital": "Moscow",
"lat": 60,
"lng": 100,
"cca3": "RUS"
},
{
"name": "Greenland",
"area": 2166086,
"cioc": "",
"cca2": "GL",
"capital": "Nuuk",
"lat": 72,
"lng": -40,
"cca3": "GRL"
},
{
"name": "Andorra",
"area": 468,
"cioc": "AND",
"cca2": "AD",
"capital": "Andorra la Vella",
"lat": 42.5,
"lng": 1.5,
"cca3": "AND"
},
{
"name": "Mozambique",
"area": 801590,
"cioc": "MOZ",
"cca2": "MZ",
"capital": "Maputo",
"lat": -18.25,
"lng": 35,
"cca3": "MOZ"
},
{
"name": "Tajikistan",
"area": 143100,
"cioc": "TJK",
"cca2": "TJ",
"capital": "Dushanbe",
"lat": 39,
"lng": 71,
"cca3": "TJK"
},
{
"name": "Haiti",
"area": 27750,
"cioc": "HAI",
"cca2": "HT",
"capital": "Port-au-Prince",
"lat": 19,
"lng": -72.41666666,
"cca3": "HTI"
},
{
"name": "Mexico",
"area": 1964375,
"cioc": "MEX",
"cca2": "MX",
"capital": "Mexico City",
"lat": 23,
"lng": -102,
"cca3": "MEX"
},
{
"name": "Zimbabwe",
"area": 390757,
"cioc": "ZIM",
"cca2": "ZW",
"capital": "Harare",
"lat": -20,
"lng": 30,
"cca3": "ZWE"
},
{
"name": "Saint Lucia",
"area": 616,
"cioc": "LCA",
"cca2": "LC",
"capital": "Castries",
"lat": 13.88333333,
"lng": -60.96666666,
"cca3": "LCA"
},
{
"name": "India",
"area": 3287590,
"cioc": "IND",
"cca2": "IN",
"capital": "New Delhi",
"lat": 20,
"lng": 77,
"cca3": "IND"
},
{
"name": "Latvia",
"area": 64559,
"cioc": "LAT",
"cca2": "LV",
"capital": "Riga",
"lat": 57,
"lng": 25,
"cca3": "LVA"
},
{
"name": "Bhutan",
"area": 38394,
"cioc": "BHU",
"cca2": "BT",
"capital": "Thimphu",
"lat": 27.5,
"lng": 90.5,
"cca3": "BTN"
},
{
"name": "Saint Vincent and the Grenadines",
"area": 389,
"cioc": "VIN",
"cca2": "VC",
"capital": "Kingstown",
"lat": 13.25,
"lng": -61.2,
"cca3": "VCT"
},
{
"name": "Vietnam",
"area": 331212,
"cioc": "VIE",
"cca2": "VN",
"capital": "Hanoi",
"lat": 16.16666666,
"lng": 107.83333333,
"cca3": "VNM"
},
{
"name": "Norway",
"area": 323802,
"cioc": "NOR",
"cca2": "NO",
"capital": "Oslo",
"lat": 62,
"lng": 10,
"cca3": "NOR"
},
{
"name": "Czech Republic",
"area": 78865,
"cioc": "CZE",
"cca2": "CZ",
"capital": "Prague",
"lat": 49.75,
"lng": 15.5,
"cca3": "CZE"
},
{
"name": "French Southern and Antarctic Lands",
"area": 7747,
"cioc": "",
"cca2": "TF",
"capital": "Port-aux-Francais",
"lat": -49.25,
"lng": 69.167,
"cca3": "ATF"
},
{
"name": "Antigua and Barbuda",
"area": 442,
"cioc": "ANT",
"cca2": "AG",
"capital": "Saint John's",
"lat": 17.05,
"lng": -61.8,
"cca3": "ATG"
},
{
"name": "Fiji",
"area": 18272,
"cioc": "FIJ",
"cca2": "FJ",
"capital": "Suva",
"lat": -18,
"lng": 175,
"cca3": "FJI"
},
{
"name": "British Indian Ocean Territory",
"area": 60,
"cioc": "",
"cca2": "IO",
"capital": "Diego Garcia",
"lat": -6,
"lng": 71.5,
"cca3": "IOT"
},
{
"name": "Honduras",
"area": 112492,
"cioc": "HON",
"cca2": "HN",
"capital": "Tegucigalpa",
"lat": 15,
"lng": -86.5,
"cca3": "HND"
},
{
"name": "Mauritius",
"area": 2040,
"cioc": "MRI",
"cca2": "MU",
"capital": "Port Louis",
"lat": -20.28333333,
"lng": 57.55,
"cca3": "MUS"
},
{
"name": "Antarctica",
"area": 14000000,
"cioc": "",
"cca2": "AQ",
"capital": "",
"lat": -90,
"lng": 0,
"cca3": "ATA"
},
{
"name": "Luxembourg",
"area": 2586,
"cioc": "LUX",
"cca2": "LU",
"capital": "Luxembourg",
"lat": 49.75,
"lng": 6.16666666,
"cca3": "LUX"
},
{
"name": "Israel",
"area": 20770,
"cioc": "ISR",
"cca2": "IL",
"capital": "Jerusalem",
"lat": 31.47,
"lng": 35.13,
"cca3": "ISR"
},
{
"name": "Micronesia",
"area": 702,
"cioc": "FSM",
"cca2": "FM",
"capital": "Palikir",
"lat": 6.91666666,
"lng": 158.25,
"cca3": "FSM"
},
{
"name": "Peru",
"area": 1285216,
"cioc": "PER",
"cca2": "PE",
"capital": "Lima",
"lat": -10,
"lng": -76,
"cca3": "PER"
},
{
"name": "Reunion",
"area": 2511,
"cioc": "",
"cca2": "RE",
"capital": "Saint-Denis",
"lat": -21.15,
"lng": 55.5,
"cca3": "REU"
},
{
"name": "Indonesia",
"area": 1904569,
"cioc": "INA",
"cca2": "ID",
"capital": "Jakarta",
"lat": -5,
"lng": 120,
"cca3": "IDN"
},
{
"name": "Vanuatu",
"area": 12189,
"cioc": "VAN",
"cca2": "VU",
"capital": "Port Vila",
"lat": -16,
"lng": 167,
"cca3": "VUT"
},
{
"name": "Macedonia",
"area": 25713,
"cioc": "MKD",
"cca2": "MK",
"capital": "Skopje",
"lat": 41.83333333,
"lng": 22,
"cca3": "MKD"
},
{
"name": "DR Congo",
"area": 2344858,
"cioc": "COD",
"cca2": "CD",
"capital": "Kinshasa",
"lat": 0,
"lng": 25,
"cca3": "COD"
},
{
"name": "Republic of the Congo",
"area": 342000,
"cioc": "CGO",
"cca2": "CG",
"capital": "Brazzaville",
"lat": -1,
"lng": 15,
"cca3": "COG"
},
{
"name": "Iceland",
"area": 103000,
"cioc": "ISL",
"cca2": "IS",
"capital": "Reykjavik",
"lat": 65,
"lng": -18,
"cca3": "ISL"
},
{
"name": "Guadeloupe",
"area": 1628,
"cioc": "",
"cca2": "GP",
"capital": "Basse-Terre",
"lat": 16.25,
"lng": -61.583333,
"cca3": "GLP"
},
{
"name": "Cook Islands",
"area": 236,
"cioc": "COK",
"cca2": "CK",
"capital": "Avarua",
"lat": -21.23333333,
"lng": -159.76666666,
"cca3": "COK"
},
{
"name": "Comoros",
"area": 1862,
"cioc": "COM",
"cca2": "KM",
"capital": "Moroni",
"lat": -12.16666666,
"lng": 44.25,
"cca3": "COM"
},
{
"name": "Colombia",
"area": 1141748,
"cioc": "COL",
"cca2": "CO",
"capital": "Bogota",
"lat": 4,
"lng": -72,
"cca3": "COL"
},
{
"name": "Nigeria",
"area": 923768,
"cioc": "NGR",
"cca2": "NG",
"capital": "Abuja",
"lat": 10,
"lng": 8,
"cca3": "NGA"
},
{
"name": "Timor-Leste",
"area": 14874,
"cioc": "TLS",
"cca2": "TL",
"capital": "Dili",
"lat": -8.83333333,
"lng": 125.91666666,
"cca3": "TLS"
},
{
"name": "Taiwan",
"area": 36193,
"cioc": "TPE",
"cca2": "TW",
"capital": "Taipei",
"lat": 23.5,
"lng": 121,
"cca3": "TWN"
},
{
"name": "Portugal",
"area": 92090,
"cioc": "POR",
"cca2": "PT",
"capital": "Lisbon",
"lat": 39.5,
"lng": -8,
"cca3": "PRT"
},
{
"name": "Moldova",
"area": 33846,
"cioc": "MDA",
"cca2": "MD",
"capital": "Chisinau",
"lat": 47,
"lng": 29,
"cca3": "MDA"
},
{
"name": "Guernsey",
"area": 78,
"cioc": "",
"cca2": "GG",
"capital": "St. Peter Port",
"lat": 49.46666666,
"lng": -2.58333333,
"cca3": "GGY"
},
{
"name": "Madagascar",
"area": 587041,
"cioc": "MAD",
"cca2": "MG",
"capital": "Antananarivo",
"lat": -20,
"lng": 47,
"cca3": "MDG"
},
{
"name": "Ecuador",
"area": 276841,
"cioc": "ECU",
"cca2": "EC",
"capital": "Quito",
"lat": -2,
"lng": -77.5,
"cca3": "ECU"
},
{
"name": "Senegal",
"area": 196722,
"cioc": "SEN",
"cca2": "SN",
"capital": "Dakar",
"lat": 14,
"lng": -14,
"cca3": "SEN"
},
{
"name": "New Zealand",
"area": 270467,
"cioc": "NZL",
"cca2": "NZ",
"capital": "Wellington",
"lat": -41,
"lng": 174,
"cca3": "NZL"
},
{
"name": "Maldives",
"area": 300,
"cioc": "MDV",
"cca2": "MV",
"capital": "Male",
"lat": 3.25,
"lng": 73,
"cca3": "MDV"
},
{
"name": "American Samoa",
"area": 199,
"cioc": "ASA",
"cca2": "AS",
"capital": "Pago Pago",
"lat": -14.33333333,
"lng": -170,
"cca3": "ASM"
},
{
"name": "Saint Pierre and Miquelon",
"area": 242,
"cioc": "",
"cca2": "PM",
"capital": "Saint-Pierre",
"lat": 46.83333333,
"lng": -56.33333333,
"cca3": "SPM"
},
{
"name": "Curacao",
"area": 444,
"cioc": "",
"cca2": "CW",
"capital": "Willemstad",
"lat": 12.116667,
"lng": -68.933333,
"cca3": "CUW"
},
{
"name": "France",
"area": 551695,
"cioc": "FRA",
"cca2": "FR",
"capital": "Paris",
"lat": 46,
"lng": 2,
"cca3": "FRA"
},
{
"name": "Lithuania",
"area": 65300,
"cioc": "LTU",
"cca2": "LT",
"capital": "Vilnius",
"lat": 56,
"lng": 24,
"cca3": "LTU"
},
{
"name": "Rwanda",
"area": 26338,
"cioc": "RWA",
"cca2": "RW",
"capital": "Kigali",
"lat": -2,
"lng": 30,
"cca3": "RWA"
},
{
"name": "Zambia",
"area": 752612,
"cioc": "ZAM",
"cca2": "ZM",
"capital": "Lusaka",
"lat": -15,
"lng": 30,
"cca3": "ZMB"
},
{
"name": "Gambia",
"area": 10689,
"cioc": "GAM",
"cca2": "GM",
"capital": "Banjul",
"lat": 13.46666666,
"lng": -16.56666666,
"cca3": "GMB"
},
{
"name": "Wallis and Futuna",
"area": 142,
"cioc": "",
"cca2": "WF",
"capital": "Mata-Utu",
"lat": -13.3,
"lng": -176.2,
"cca3": "WLF"
},
{
"name": "Jersey",
"area": 116,
"cioc": "",
"cca2": "JE",
"capital": "Saint Helier",
"lat": 49.25,
"lng": -2.16666666,
"cca3": "JEY"
},
{
"name": "Faroe Islands",
"area": 1393,
"cioc": "",
"cca2": "FO",
"capital": "Torshavn",
"lat": 62,
"lng": -7,
"cca3": "FRO"
},
{
"name": "Guatemala",
"area": 108889,
"cioc": "GUA",
"cca2": "GT",
"capital": "Guatemala City",
"lat": 15.5,
"lng": -90.25,
"cca3": "GTM"
},
{
"name": "Denmark",
"area": 43094,
"cioc": "DEN",
"cca2": "DK",
"capital": "Copenhagen",
"lat": 56,
"lng": 10,
"cca3": "DNK"
},
{
"name": "Isle of Man",
"area": 572,
"cioc": "",
"cca2": "IM",
"capital": "Douglas",
"lat": 54.25,
"lng": -4.5,
"cca3": "IMN"
},
{
"name": "Australia",
"area": 7692024,
"cioc": "AUS",
"cca2": "AU",
"capital": "Canberra",
"lat": -27,
"lng": 133,
"cca3": "AUS"
},
{
"name": "Austria",
"area": 83871,
"cioc": "AUT",
"cca2": "AT",
"capital": "Vienna",
"lat": 47.33333333,
"lng": 13.33333333,
"cca3": "AUT"
},
{
"name": "Svalbard and Jan Mayen",
"area": -1,
"cioc": "",
"cca2": "SJ",
"capital": "Longyearbyen",
"lat": 78,
"lng": 20,
"cca3": "SJM"
},
{
"name": "Venezuela",
"area": 916445,
"cioc": "VEN",
"cca2": "VE",
"capital": "Caracas",
"lat": 8,
"lng": -66,
"cca3": "VEN"
},
{
"name": "Kosovo",
"area": 10908,
"cioc": "KOS",
"cca2": "XK",
"capital": "Pristina",
"lat": 42.666667,
"lng": 21.166667,
"cca3": "UNK"
},
{
"name": "Palau",
"area": 459,
"cioc": "PLW",
"cca2": "PW",
"capital": "Ngerulmud",
"lat": 7.5,
"lng": 134.5,
"cca3": "PLW"
},
{
"name": "Kenya",
"area": 580367,
"cioc": "KEN",
"cca2": "KE",
"capital": "Nairobi",
"lat": 1,
"lng": 38,
"cca3": "KEN"
},
{
"name": "Samoa",
"area": 2842,
"cioc": "SAM",
"cca2": "WS",
"capital": "Apia",
"lat": -13.58333333,
"lng": -172.33333333,
"cca3": "WSM"
},
{
"name": "Turkey",
"area": 783562,
"cioc": "TUR",
"cca2": "TR",
"capital": "Ankara",
"lat": 39,
"lng": 35,
"cca3": "TUR"
},
{
"name": "Albania",
"area": 28748,
"cioc": "ALB",
"cca2": "AL",
"capital": "Tirana",
"lat": 41,
"lng": 20,
"cca3": "ALB"
},
{
"name": "Oman",
"area": 309500,
"cioc": "OMA",
"cca2": "OM",
"capital": "Muscat",
"lat": 21,
"lng": 57,
"cca3": "OMN"
},
{
"name": "Tuvalu",
"area": 26,
"cioc": "TUV",
"cca2": "TV",
"capital": "Funafuti",
"lat": -8,
"lng": 178,
"cca3": "TUV"
},
{
"name": "Aland Islands",
"area": 1580,
"cioc": "",
"cca2": "AX",
"capital": "Mariehamn",
"lat": 60.116667,
"lng": 19.9,
"cca3": "ALA"
},
{
"name": "Brunei",
"area": 5765,
"cioc": "BRU",
"cca2": "BN",
"capital": "Bandar Seri Begawan",
"lat": 4.5,
"lng": 114.66666666,
"cca3": "BRN"
},
{
"name": "Tunisia",
"area": 163610,
"cioc": "TUN",
"cca2": "TN",
"capital": "Tunis",
"lat": 34,
"lng": 9,
"cca3": "TUN"
},
{
"name": "Pitcairn Islands",
"area": 47,
"cioc": "",
"cca2": "PN",
"capital": "Adamstown",
"lat": -25.06666666,
"lng": -130.1,
"cca3": "PCN"
},
{
"name": "Barbados",
"area": 430,
"cioc": "BAR",
"cca2": "BB",
"capital": "Bridgetown",
"lat": 13.16666666,
"lng": -59.53333333,
"cca3": "BRB"
},
{
"name": "Brazil",
"area": 8515767,
"cioc": "BRA",
"cca2": "BR",
"capital": "Brasilia",
"lat": -10,
"lng": -55,
"cca3": "BRA"
},
{
"name": "Ivory Coast",
"area": 322463,
"cioc": "CIV",
"cca2": "CI",
"capital": "Yamoussoukro",
"lat": 8,
"lng": -5,
"cca3": "CIV"
},
{
"name": "Serbia",
"area": 88361,
"cioc": "SRB",
"cca2": "RS",
"capital": "Belgrade",
"lat": 44,
"lng": 21,
"cca3": "SRB"
},
{
"name": "Equatorial Guinea",
"area": 28051,
"cioc": "GEQ",
"cca2": "GQ",
"capital": "Malabo",
"lat": 2,
"lng": 10,
"cca3": "GNQ"
},
{
"name": "United States",
"area": 9372610,
"cioc": "USA",
"cca2": "US",
"capital": "Washington D.C.",
"lat": 38,
"lng": -97,
"cca3": "USA"
},
{
"name": "Qatar",
"area": 11586,
"cioc": "QAT",
"cca2": "QA",
"capital": "Doha",
"lat": 25.5,
"lng": 51.25,
"cca3": "QAT"
},
{
"name": "Sweden",
"area": 450295,
"cioc": "SWE",
"cca2": "SE",
"capital": "Stockholm",
"lat": 62,
"lng": 15,
"cca3": "SWE"
},
{
"name": "Azerbaijan",
"area": 86600,
"cioc": "AZE",
"cca2": "AZ",
"capital": "Baku",
"lat": 40.5,
"lng": 47.5,
"cca3": "AZE"
},
{
"name": "Guinea-Bissau",
"area": 36125,
"cioc": "GBS",
"cca2": "GW",
"capital": "Bissau",
"lat": 12,
"lng": -15,
"cca3": "GNB"
},
{
"name": "Swaziland",
"area": 17364,
"cioc": "SWZ",
"cca2": "SZ",
"capital": "Lobamba",
"lat": -26.5,
"lng": 31.5,
"cca3": "SWZ"
},
{
"name": "Tonga",
"area": 747,
"cioc": "TGA",
"cca2": "TO",
"capital": "Nuku'alofa",
"lat": -20,
"lng": -175,
"cca3": "TON"
},
{
"name": "Canada",
"area": 9984670,
"cioc": "CAN",
"cca2": "CA",
"capital": "Ottawa",
"lat": 60,
"lng": -95,
"cca3": "CAN"
},
{
"name": "Ukraine",
"area": 603500,
"cioc": "UKR",
"cca2": "UA",
"capital": "Kiev",
"lat": 49,
"lng": 32,
"cca3": "UKR"
},
{
"name": "South Korea",
"area": 100210,
"cioc": "KOR",
"cca2": "KR",
"capital": "Seoul",
"lat": 37,
"lng": 127.5,
"cca3": "KOR"
},
{
"name": "Anguilla",
"area": 91,
"cioc": "",
"cca2": "AI",
"capital": "The Valley",
"lat": 18.25,
"lng": -63.16666666,
"cca3": "AIA"
},
{
"name": "Central African Republic",
"area": 622984,
"cioc": "CAF",
"cca2": "CF",
"capital": "Bangui",
"lat": 7,
"lng": 21,
"cca3": "CAF"
},
{
"name": "Slovakia",
"area": 49037,
"cioc": "SVK",
"cca2": "SK",
"capital": "Bratislava",
"lat": 48.66666666,
"lng": 19.5,
"cca3": "SVK"
},
{
"name": "Cyprus",
"area": 9251,
"cioc": "CYP",
"cca2": "CY",
"capital": "Nicosia",
"lat": 35,
"lng": 33,
"cca3": "CYP"
},
{
"name": "Bosnia and Herzegovina",
"area": 51209,
"cioc": "BIH",
"cca2": "BA",
"capital": "Sarajevo",
"lat": 44,
"lng": 18,
"cca3": "BIH"
},
{
"name": "Singapore",
"area": 710,
"cioc": "SIN",
"cca2": "SG",
"capital": "Singapore",
"lat": 1.36666666,
"lng": 103.8,
"cca3": "SGP"
},
{
"name": "South Georgia",
"area": 3903,
"cioc": "",
"cca2": "GS",
"capital": "King Edward Point",
"lat": -54.5,
"lng": -37,
"cca3": "SGS"
},
{
"name": "Somalia",
"area": 637657,
"cioc": "SOM",
"cca2": "SO",
"capital": "Mogadishu",
"lat": 10,
"lng": 49,
"cca3": "SOM"
},
{
"name": "Uzbekistan",
"area": 447400,
"cioc": "UZB",
"cca2": "UZ",
"capital": "Tashkent",
"lat": 41,
"lng": 64,
"cca3": "UZB"
},
{
"name": "Eritrea",
"area": 117600,
"cioc": "ERI",
"cca2": "ER",
"capital": "Asmara",
"lat": 15,
"lng": 39,
"cca3": "ERI"
},
{
"name": "Poland",
"area": 312679,
"cioc": "POL",
"cca2": "PL",
"capital": "Warsaw",
"lat": 52,
"lng": 20,
"cca3": "POL"
},
{
"name": "Kuwait",
"area": 17818,
"cioc": "KUW",
"cca2": "KW",
"capital": "Kuwait City",
"lat": 29.5,
"lng": 45.75,
"cca3": "KWT"
},
{
"name": "Gabon",
"area": 267668,
"cioc": "GAB",
"cca2": "GA",
"capital": "Libreville",
"lat": -1,
"lng": 11.75,
"cca3": "GAB"
},
{
"name": "Cayman Islands",
"area": 264,
"cioc": "CAY",
"cca2": "KY",
"capital": "George Town",
"lat": 19.5,
"lng": -80.5,
"cca3": "CYM"
},
{
"name": "Vatican City",
"area": 0.44,
"cioc": "",
"cca2": "VA",
"capital": "Vatican City",
"lat": 41.9,
"lng": 12.45,
"cca3": "VAT"
},
{
"name": "Estonia",
"area": 45227,
"cioc": "EST",
"cca2": "EE",
"capital": "Tallinn",
"lat": 59,
"lng": 26,
"cca3": "EST"
},
{
"name": "Malawi",
"area": 118484,
"cioc": "MAW",
"cca2": "MW",
"capital": "Lilongwe",
"lat": -13.5,
"lng": 34,
"cca3": "MWI"
},
{
"name": "Spain",
"area": 505992,
"cioc": "ESP",
"cca2": "ES",
"capital": "Madrid",
"lat": 40,
"lng": -4,
"cca3": "ESP"
},
{
"name": "Iraq",
"area": 438317,
"cioc": "IRQ",
"cca2": "IQ",
"capital": "Baghdad",
"lat": 33,
"lng": 44,
"cca3": "IRQ"
},
{
"name": "El Salvador",
"area": 21041,
"cioc": "ESA",
"cca2": "SV",
"capital": "San Salvador",
"lat": 13.83333333,
"lng": -88.91666666,
"cca3": "SLV"
},
{
"name": "Mali",
"area": 1240192,
"cioc": "MLI",
"cca2": "ML",
"capital": "Bamako",
"lat": 17,
"lng": -4,
"cca3": "MLI"
},
{
"name": "Ireland",
"area": 70273,
"cioc": "IRL",
"cca2": "IE",
"capital": "Dublin",
"lat": 53,
"lng": -8,
"cca3": "IRL"
},
{
"name": "Iran",
"area": 1648195,
"cioc": "IRI",
"cca2": "IR",
"capital": "Tehran",
"lat": 32,
"lng": 53,
"cca3": "IRN"
},
{
"name": "Aruba",
"area": 180,
"cioc": "ARU",
"cca2": "AW",
"capital": "Oranjestad",
"lat": 12.5,
"lng": -69.96666666,
"cca3": "ABW"
},
{
"name": "Papua New Guinea",
"area": 462840,
"cioc": "PNG",
"cca2": "PG",
"capital": "Port Moresby",
"lat": -6,
"lng": 147,
"cca3": "PNG"
},
{
"name": "Panama",
"area": 75417,
"cioc": "PAN",
"cca2": "PA",
"capital": "Panama City",
"lat": 9,
"lng": -80,
"cca3": "PAN"
},
{
"name": "Sudan",
"area": 1886068,
"cioc": "SUD",
"cca2": "SD",
"capital": "Khartoum",
"lat": 15,
"lng": 30,
"cca3": "SDN"
},
{
"name": "Solomon Islands",
"area": 28896,
"cioc": "SOL",
"cca2": "SB",
"capital": "Honiara",
"lat": -8,
"lng": 159,
"cca3": "SLB"
},
{
"name": "Western Sahara",
"area": 266000,
"cioc": "",
"cca2": "EH",
"capital": "El Aaiun",
"lat": 24.5,
"lng": -13,
"cca3": "ESH"
},
{
"name": "Monaco",
"area": 2.02,
"cioc": "MON",
"cca2": "MC",
"capital": "Monaco",
"lat": 43.73333333,
"lng": 7.4,
"cca3": "MCO"
},
{
"name": "Italy",
"area": 301336,
"cioc": "ITA",
"cca2": "IT",
"capital": "Rome",
"lat": 42.83333333,
"lng": 12.83333333,
"cca3": "ITA"
},
{
"name": "Japan",
"area": 377930,
"cioc": "JPN",
"cca2": "JP",
"capital": "Tokyo",
"lat": 36,
"lng": 138,
"cca3": "JPN"
},
{
"name": "Kyrgyzstan",
"area": 199951,
"cioc": "KGZ",
"cca2": "KG",
"capital": "Bishkek",
"lat": 41,
"lng": 75,
"cca3": "KGZ"
},
{
"name": "Uganda",
"area": 241550,
"cioc": "UGA",
"cca2": "UG",
"capital": "Kampala",
"lat": 1,
"lng": 32,
"cca3": "UGA"
},
{
"name": "New Caledonia",
"area": 18575,
"cioc": "",
"cca2": "NC",
"capital": "Noumea",
"lat": -21.5,
"lng": 165.5,
"cca3": "NCL"
},
{
"name": "United Arab Emirates",
"area": 83600,
"cioc": "UAE",
"cca2": "AE",
"capital": "Abu Dhabi",
"lat": 24,
"lng": 54,
"cca3": "ARE"
},
{
"name": "Argentina",
"area": 2780400,
"cioc": "ARG",
"cca2": "AR",
"capital": "Buenos Aires",
"lat": -34,
"lng": -64,
"cca3": "ARG"
},
{
"name": "Bahamas",
"area": 13943,
"cioc": "BAH",
"cca2": "BS",
"capital": "Nassau",
"lat": 24.25,
"lng": -76,
"cca3": "BHS"
},
{
"name": "Bahrain",
"area": 765,
"cioc": "BRN",
"cca2": "BH",
"capital": "Manama",
"lat": 26,
"lng": 50.55,
"cca3": "BHR"
},
{
"name": "Armenia",
"area": 29743,
"cioc": "ARM",
"cca2": "AM",
"capital": "Yerevan",
"lat": 40,
"lng": 45,
"cca3": "ARM"
},
{
"name": "Nauru",
"area": 21,
"cioc": "NRU",
"cca2": "NR",
"capital": "Yaren",
"lat": -0.53333333,
"lng": 166.91666666,
"cca3": "NRU"
},
{
"name": "Cuba",
"area": 109884,
"cioc": "CUB",
"cca2": "CU",
"capital": "Havana",
"lat": 21.5,
"lng": -80,
"cca3": "CUB"
}
]
all_lookups = {}
lookups = ['cioc', 'cca2', 'cca3', 'name']
for lookup in lookups:
all_lookups[lookup] = {}
for country in countries:
all_lookups[lookup][country[lookup].lower()] = country
def get(field, symbol):
"""
Get country data based on a standard code and a symbol
>>> get('cioc', 'CUB')['name']
"Cuba"
>>> get('cca2', 'CA')['name']
"Canada"
"""
return all_lookups[field].get(symbol.lower())
| apache-2.0 |
BillGuard/cabot | cabot/cabotapp/views.py | 1 | 23464 | from django.template import RequestContext, loader
from datetime import datetime, timedelta, date
from dateutil.relativedelta import relativedelta
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden
from django.core.urlresolvers import reverse_lazy
from django.conf import settings
from models import (
StatusCheck, GraphiteStatusCheck, JenkinsStatusCheck, HttpStatusCheck, ICMPStatusCheck,
StatusCheckResult, UserProfile, Service, Instance, Shift, get_duty_officers)
from tasks import run_status_check as _run_status_check
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic import (
DetailView, CreateView, UpdateView, ListView, DeleteView, TemplateView, View)
from django import forms
from .graphite import get_data, get_matching_metrics
from .alert import telephone_alert_twiml_callback
from django.contrib.auth.models import User
from django.utils import timezone
from django.utils.timezone import utc
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from django.core import serializers
from django.core.serializers import json as django_json
from django.db import models
from itertools import groupby, dropwhile, izip_longest
import requests
import json
import re
class LoginRequiredMixin(object):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
@login_required
def subscriptions(request):
""" Simple list of all checks """
t = loader.get_template('cabotapp/subscriptions.html')
services = Service.objects.all()
users = User.objects.filter(is_active=True)
c = RequestContext(request, {
'services': services,
'users': users,
'duty_officers': get_duty_officers(),
})
return HttpResponse(t.render(c))
@login_required
def run_status_check(request, pk):
"""Runs a specific check"""
_run_status_check(check_or_id=pk)
return HttpResponseRedirect(reverse('check', kwargs={'pk': pk}))
def duplicate_icmp_check(request, pk):
pc = StatusCheck.objects.get(pk=pk)
npk = pc.duplicate()
return HttpResponseRedirect(reverse('update-icmp-check', kwargs={'pk': npk}))
def duplicate_instance(request, pk):
instance = Instance.objects.get(pk=pk)
new_instance = instance.duplicate()
return HttpResponseRedirect(reverse('update-instance', kwargs={'pk': new_instance}))
def duplicate_http_check(request, pk):
pc = StatusCheck.objects.get(pk=pk)
npk = pc.duplicate()
return HttpResponseRedirect(reverse('update-http-check', kwargs={'pk': npk}))
def duplicate_graphite_check(request, pk):
pc = StatusCheck.objects.get(pk=pk)
npk = pc.duplicate()
return HttpResponseRedirect(reverse('update-graphite-check', kwargs={'pk': npk}))
def duplicate_jenkins_check(request, pk):
pc = StatusCheck.objects.get(pk=pk)
npk = pc.duplicate()
return HttpResponseRedirect(reverse('update-jenkins-check', kwargs={'pk': npk}))
class StatusCheckResultDetailView(LoginRequiredMixin, DetailView):
model = StatusCheckResult
context_object_name = 'result'
class SymmetricalForm(forms.ModelForm):
symmetrical_fields = () # Iterable of 2-tuples (field, model)
def __init__(self, *args, **kwargs):
super(SymmetricalForm, self).__init__(*args, **kwargs)
if self.instance and self.instance.pk:
for field in self.symmetrical_fields:
self.fields[field].initial = getattr(
self.instance, field).all()
def save(self, commit=True):
instance = super(SymmetricalForm, self).save(commit=False)
if commit:
instance.save()
if instance.pk:
for field in self.symmetrical_fields:
setattr(instance, field, self.cleaned_data[field])
self.save_m2m()
return instance
base_widgets = {
'name': forms.TextInput(attrs={
'style': 'width:30%',
}),
'importance': forms.RadioSelect(),
}
class StatusCheckForm(SymmetricalForm):
symmetrical_fields = ('service_set', 'instance_set')
service_set = forms.ModelMultipleChoiceField(
queryset=Service.objects.all(),
required=False,
help_text='Link to service(s).',
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
instance_set = forms.ModelMultipleChoiceField(
queryset=Instance.objects.all(),
required=False,
help_text='Link to instance(s).',
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
class GraphiteStatusCheckForm(StatusCheckForm):
class Meta:
model = GraphiteStatusCheck
fields = (
'name',
'metric',
'check_type',
'value',
'frequency',
'active',
'importance',
'expected_num_hosts',
'debounce',
)
widgets = dict(**base_widgets)
widgets.update({
'value': forms.TextInput(attrs={
'style': 'width: 100px',
'placeholder': 'threshold value',
}),
'metric': forms.TextInput(attrs={
'style': 'width: 100%',
'placeholder': 'graphite metric key'
}),
'check_type': forms.Select(attrs={
'data-rel': 'chosen',
})
})
class ICMPStatusCheckForm(StatusCheckForm):
class Meta:
model = ICMPStatusCheck
fields = (
'name',
'frequency',
'importance',
'active',
'debounce',
)
widgets = dict(**base_widgets)
class HttpStatusCheckForm(StatusCheckForm):
class Meta:
model = HttpStatusCheck
fields = (
'name',
'endpoint',
'username',
'password',
'text_match',
'status_code',
'timeout',
'verify_ssl_certificate',
'frequency',
'importance',
'active',
'debounce',
)
widgets = dict(**base_widgets)
widgets.update({
'endpoint': forms.TextInput(attrs={
'style': 'width: 100%',
'placeholder': 'https://www.arachnys.com',
}),
'username': forms.TextInput(attrs={
'style': 'width: 30%',
}),
'password': forms.TextInput(attrs={
'style': 'width: 30%',
}),
'text_match': forms.TextInput(attrs={
'style': 'width: 100%',
'placeholder': '[Aa]rachnys\s+[Rr]ules',
}),
'status_code': forms.TextInput(attrs={
'style': 'width: 20%',
'placeholder': '200',
}),
})
class JenkinsStatusCheckForm(StatusCheckForm):
class Meta:
model = JenkinsStatusCheck
fields = (
'name',
'importance',
'debounce',
'max_queued_build_time',
)
widgets = dict(**base_widgets)
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
exclude = ('user',)
class InstanceForm(SymmetricalForm):
symmetrical_fields = ('service_set',)
service_set = forms.ModelMultipleChoiceField(
queryset=Service.objects.all(),
required=False,
help_text='Link to service(s).',
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
class Meta:
model = Instance
template_name = 'instance_form.html'
fields = (
'name',
'address',
'users_to_notify',
'status_checks',
'service_set',
)
widgets = {
'name': forms.TextInput(attrs={'style': 'width: 30%;'}),
'address': forms.TextInput(attrs={'style': 'width: 70%;'}),
'status_checks': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'service_set': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'users_to_notify': forms.CheckboxSelectMultiple(),
}
def __init__(self, *args, **kwargs):
ret = super(InstanceForm, self).__init__(*args, **kwargs)
self.fields['users_to_notify'].queryset = User.objects.filter(
is_active=True)
return ret
class ServiceForm(forms.ModelForm):
class Meta:
model = Service
template_name = 'service_form.html'
fields = (
'name',
'url',
'users_to_notify',
'status_checks',
'instances',
'email_alert',
'hipchat_alert',
'sms_alert',
'telephone_alert',
'alerts_enabled',
'hackpad_id',
)
widgets = {
'name': forms.TextInput(attrs={'style': 'width: 30%;'}),
'url': forms.TextInput(attrs={'style': 'width: 70%;'}),
'status_checks': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'instances': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'users_to_notify': forms.CheckboxSelectMultiple(),
'hackpad_id': forms.TextInput(attrs={'style': 'width:30%;'}),
}
def __init__(self, *args, **kwargs):
ret = super(ServiceForm, self).__init__(*args, **kwargs)
self.fields['users_to_notify'].queryset = User.objects.filter(
is_active=True)
return ret
def clean_hackpad_id(self):
value = self.cleaned_data['hackpad_id']
if not value:
return ''
for pattern in settings.RECOVERY_SNIPPETS_WHITELIST:
if re.match(pattern, value):
return value
raise ValidationError('Please specify a valid JS snippet link')
class StatusCheckReportForm(forms.Form):
service = forms.ModelChoiceField(
queryset=Service.objects.all(),
widget=forms.HiddenInput
)
checks = forms.ModelMultipleChoiceField(
queryset=StatusCheck.objects.all(),
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
date_from = forms.DateField(label='From', widget=forms.DateInput(attrs={'class': 'datepicker'}))
date_to = forms.DateField(label='To', widget=forms.DateInput(attrs={'class': 'datepicker'}))
def get_report(self):
checks = self.cleaned_data['checks']
now = timezone.now()
for check in checks:
# Group results of the check by status (failed alternating with succeeded),
# take time of the first one in each group (starting from a failed group),
# split them into pairs and form the list of problems.
results = check.statuscheckresult_set.filter(
time__gte=self.cleaned_data['date_from'],
time__lt=self.cleaned_data['date_to'] + timedelta(days=1)
).order_by('time')
groups = dropwhile(lambda item: item[0], groupby(results, key=lambda r: r.succeeded))
times = [next(group).time for succeeded, group in groups]
pairs = izip_longest(*([iter(times)] * 2))
check.problems = [(start, end, (end or now) - start) for start, end in pairs]
if results:
check.success_rate = results.filter(succeeded=True).count() / float(len(results)) * 100
return checks
class CheckCreateView(LoginRequiredMixin, CreateView):
template_name = 'cabotapp/statuscheck_form.html'
def form_valid(self, form):
form.instance.created_by = self.request.user
return super(CheckCreateView, self).form_valid(form)
def get_initial(self):
if self.initial:
initial = self.initial
else:
initial = {}
metric = self.request.GET.get('metric')
if metric:
initial['metric'] = metric
service_id = self.request.GET.get('service')
instance_id = self.request.GET.get('instance')
if service_id:
try:
service = Service.objects.get(id=service_id)
initial['service_set'] = [service]
except Service.DoesNotExist:
pass
if instance_id:
try:
instance = Instance.objects.get(id=instance_id)
initial['instance_set'] = [instance]
except Instance.DoesNotExist:
pass
return initial
def get_success_url(self):
if self.request.GET.get('service'):
return reverse('service', kwargs={'pk': self.request.GET.get('service')})
if self.request.GET.get('instance'):
return reverse('instance', kwargs={'pk': self.request.GET.get('instance')})
return reverse('checks')
class CheckUpdateView(LoginRequiredMixin, UpdateView):
template_name = 'cabotapp/statuscheck_form.html'
def get_success_url(self):
return reverse('check', kwargs={'pk': self.object.id})
class ICMPCheckCreateView(CheckCreateView):
model = ICMPStatusCheck
form_class = ICMPStatusCheckForm
class ICMPCheckUpdateView(CheckUpdateView):
model = ICMPStatusCheck
form_class = ICMPStatusCheckForm
class GraphiteCheckUpdateView(CheckUpdateView):
model = GraphiteStatusCheck
form_class = GraphiteStatusCheckForm
class GraphiteCheckCreateView(CheckCreateView):
model = GraphiteStatusCheck
form_class = GraphiteStatusCheckForm
class HttpCheckCreateView(CheckCreateView):
model = HttpStatusCheck
form_class = HttpStatusCheckForm
class HttpCheckUpdateView(CheckUpdateView):
model = HttpStatusCheck
form_class = HttpStatusCheckForm
class JenkinsCheckCreateView(CheckCreateView):
model = JenkinsStatusCheck
form_class = JenkinsStatusCheckForm
def form_valid(self, form):
form.instance.frequency = 1
return super(JenkinsCheckCreateView, self).form_valid(form)
class JenkinsCheckUpdateView(CheckUpdateView):
model = JenkinsStatusCheck
form_class = JenkinsStatusCheckForm
def form_valid(self, form):
form.instance.frequency = 1
return super(JenkinsCheckUpdateView, self).form_valid(form)
class StatusCheckListView(LoginRequiredMixin, ListView):
model = StatusCheck
context_object_name = 'checks'
def get_queryset(self):
return StatusCheck.objects.all().order_by('name').prefetch_related('service_set', 'instance_set')
class StatusCheckDeleteView(LoginRequiredMixin, DeleteView):
model = StatusCheck
success_url = reverse_lazy('checks')
context_object_name = 'check'
template_name = 'cabotapp/statuscheck_confirm_delete.html'
class StatusCheckDetailView(LoginRequiredMixin, DetailView):
model = StatusCheck
context_object_name = 'check'
template_name = 'cabotapp/statuscheck_detail.html'
def render_to_response(self, context, *args, **kwargs):
if context == None:
context = {}
context['checkresults'] = self.object.statuscheckresult_set.order_by(
'-time_complete')[:100]
return super(StatusCheckDetailView, self).render_to_response(context, *args, **kwargs)
class UserProfileUpdateView(LoginRequiredMixin, UpdateView):
model = UserProfile
success_url = reverse_lazy('subscriptions')
form_class = UserProfileForm
def get_object(self, *args, **kwargs):
try:
return self.model.objects.get(user=self.kwargs['pk'])
except self.model.DoesNotExist:
user = User.objects.get(id=self.kwargs['pk'])
profile = UserProfile(user=user)
profile.save()
return profile
class InstanceListView(LoginRequiredMixin, ListView):
model = Instance
context_object_name = 'instances'
def get_queryset(self):
return Instance.objects.all().order_by('name').prefetch_related('status_checks')
class ServiceListView(LoginRequiredMixin, ListView):
model = Service
context_object_name = 'services'
def get_queryset(self):
return Service.objects.all().order_by('name').prefetch_related('status_checks')
class InstanceDetailView(LoginRequiredMixin, DetailView):
model = Instance
context_object_name = 'instance'
def get_context_data(self, **kwargs):
context = super(InstanceDetailView, self).get_context_data(**kwargs)
date_from = date.today() - relativedelta(day=1)
context['report_form'] = StatusCheckReportForm(initial={
'checks': self.object.status_checks.all(),
'service': self.object,
'date_from': date_from,
'date_to': date_from + relativedelta(months=1) - relativedelta(days=1)
})
return context
class ServiceDetailView(LoginRequiredMixin, DetailView):
model = Service
context_object_name = 'service'
def get_context_data(self, **kwargs):
context = super(ServiceDetailView, self).get_context_data(**kwargs)
date_from = date.today() - relativedelta(day=1)
context['report_form'] = StatusCheckReportForm(initial={
'checks': self.object.status_checks.all(),
'service': self.object,
'date_from': date_from,
'date_to': date_from + relativedelta(months=1) - relativedelta(days=1)
})
return context
class InstanceCreateView(LoginRequiredMixin, CreateView):
model = Instance
form_class = InstanceForm
def form_valid(self, form):
ret = super(InstanceCreateView, self).form_valid(form)
if self.object.status_checks.filter(polymorphic_ctype__model='icmpstatuscheck').count() == 0:
self.generate_default_ping_check(self.object)
return ret
def generate_default_ping_check(self, obj):
pc = ICMPStatusCheck(
name="Default Ping Check for %s" % obj.name,
frequency=5,
importance=Service.ERROR_STATUS,
debounce=0,
created_by=None,
)
pc.save()
obj.status_checks.add(pc)
def get_success_url(self):
return reverse('instance', kwargs={'pk': self.object.id})
def get_initial(self):
if self.initial:
initial = self.initial
else:
initial = {}
service_id = self.request.GET.get('service')
if service_id:
try:
service = Service.objects.get(id=service_id)
initial['service_set'] = [service]
except Service.DoesNotExist:
pass
return initial
class ServiceCreateView(LoginRequiredMixin, CreateView):
model = Service
form_class = ServiceForm
def get_success_url(self):
return reverse('service', kwargs={'pk': self.object.id})
class InstanceUpdateView(LoginRequiredMixin, UpdateView):
model = Instance
form_class = InstanceForm
def get_success_url(self):
return reverse('instance', kwargs={'pk': self.object.id})
class ServiceUpdateView(LoginRequiredMixin, UpdateView):
model = Service
form_class = ServiceForm
def get_success_url(self):
return reverse('service', kwargs={'pk': self.object.id})
class ServiceDeleteView(LoginRequiredMixin, DeleteView):
model = Service
success_url = reverse_lazy('services')
context_object_name = 'service'
template_name = 'cabotapp/service_confirm_delete.html'
class InstanceDeleteView(LoginRequiredMixin, DeleteView):
model = Instance
success_url = reverse_lazy('instances')
context_object_name = 'instance'
template_name = 'cabotapp/instance_confirm_delete.html'
class ShiftListView(LoginRequiredMixin, ListView):
model = Shift
context_object_name = 'shifts'
def get_queryset(self):
return Shift.objects.filter(
end__gt=datetime.utcnow().replace(tzinfo=utc),
deleted=False).order_by('start')
class StatusCheckReportView(LoginRequiredMixin, TemplateView):
template_name = 'cabotapp/statuscheck_report.html'
def get_context_data(self, **kwargs):
form = StatusCheckReportForm(self.request.GET)
if form.is_valid():
return {'checks': form.get_report(), 'service': form.cleaned_data['service']}
class StatusCheckReportJsonView(View):
def get(self, request, *args, **kwargs):
if request.GET.get('secret') != settings.HACKY_API_KEY:
return HttpResponseForbidden()
form = StatusCheckReportForm(self.request.GET)
if form.is_valid():
checks = form.get_report()
service = form.cleaned_data['service']
content = {'checks': []}
for check in checks:
check_raw = {
'name': check.name,
'success_rate': check.success_rate,
'problems': [{
'start_time': start_time,
'end_time': end_time,
'duration': duration.total_seconds()
} for start_time, end_time, duration in check.problems]
}
content['checks'].append(check_raw)
j = json.dumps(content, cls=django_json.DjangoJSONEncoder)
return HttpResponse(j, content_type='application/json')
# Misc JSON api and other stuff
def twiml_callback(request, service_id):
service = Service.objects.get(id=service_id)
twiml = telephone_alert_twiml_callback(service)
return HttpResponse(twiml, content_type='application/xml')
def checks_run_recently(request):
"""
Checks whether or not stuff is running by looking to see if checks have run in last 10 mins
"""
ten_mins = datetime.utcnow().replace(tzinfo=utc) - timedelta(minutes=10)
most_recent = StatusCheckResult.objects.filter(time_complete__gte=ten_mins)
if most_recent.exists():
return HttpResponse('Checks running')
return HttpResponse('Checks not running')
def jsonify(d):
return HttpResponse(json.dumps(d), content_type='application/json')
@login_required
def graphite_api_data(request):
metric = request.GET.get('metric')
data = None
matching_metrics = None
try:
data = get_data(metric)
except requests.exceptions.RequestException, e:
pass
if not data:
try:
matching_metrics = get_matching_metrics(metric)
except requests.exceptions.RequestException, e:
return jsonify({'status': 'error', 'message': str(e)})
matching_metrics = {'metrics': matching_metrics}
return jsonify({'status': 'ok', 'data': data, 'matchingMetrics': matching_metrics})
| mit |
GovReady/govready-q | controls/migrations/0015_elementcommoncontrol.py | 1 | 1613 | # Generated by Django 3.0.7 on 2020-06-07 19:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('controls', '0014_auto_20200504_0003'),
]
operations = [
migrations.CreateModel(
name='ElementCommonControl',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('oscal_ctl_id', models.CharField(blank=True, help_text='OSCAL formatted Control ID (e.g., au-2.3)', max_length=20, null=True)),
('oscal_catalog_key', models.CharField(blank=True, help_text="Catalog key from which catalog file can be derived (e.g., 'NIST_SP-800-53_rev4')", max_length=100, null=True)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated', models.DateTimeField(auto_now=True, db_index=True)),
('common_control', models.ForeignKey(help_text='The Common Control for this association.', on_delete=django.db.models.deletion.CASCADE, related_name='element_common_control', to='controls.CommonControl')),
('element', models.ForeignKey(help_text='The Element (e.g., System, Component, Host) to which common controls are associated.', on_delete=django.db.models.deletion.CASCADE, related_name='common_controls', to='controls.Element')),
],
options={
'unique_together': {('element', 'common_control', 'oscal_ctl_id', 'oscal_catalog_key')},
},
),
]
| gpl-3.0 |
mcclung/pacemaker | shell/modules/xmlutil.py | 1 | 24547 | # Copyright (C) 2008 Dejan Muhamedagic <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import os
import subprocess
import xml.dom.minidom
from userprefs import Options, UserPrefs
from vars import Vars
from msg import *
from utils import *
def xmlparse(f):
try:
doc = xml.dom.minidom.parse(f)
except xml.parsers.expat.ExpatError,msg:
common_err("cannot parse xml: %s" % msg)
return None
return doc
def file2doc(s):
try: f = open(s,'r')
except IOError, msg:
common_err(msg)
return None
doc = xmlparse(f)
f.close()
return doc
cib_dump = "cibadmin -Ql"
def cibdump2doc(section = None):
doc = None
if section:
cmd = "%s -o %s" % (cib_dump,section)
else:
cmd = cib_dump
cmd = add_sudo(cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
try:
doc = xmlparse(p.stdout)
p.wait()
except IOError, msg:
common_err(msg)
return None
return doc
cib_piped = "cibadmin -p"
def commit_rsc(node):
"Replace a resource definition using cibadmin -R"
rc = pipe_string("%s -R -o %s" % \
(cib_piped, "resources"), node.toxml())
return rc == 0
def get_conf_elem(doc, tag):
try:
return doc.getElementsByTagName(tag)[0]
except:
return None
def read_cib(fun, params = None):
doc = fun(params)
if not doc:
return doc,None
cib = doc.childNodes[0]
if not is_element(cib) or cib.tagName != "cib":
cib_no_elem_err("cib")
return doc,None
return doc,cib
def sanity_check_nvpairs(id,node,attr_list):
rc = 0
for nvpair in node.childNodes:
if not is_element(nvpair) or nvpair.tagName != "nvpair":
continue
n = nvpair.getAttribute("name")
if n and not n in attr_list:
common_err("%s: attribute %s does not exist" % (id,n))
rc |= user_prefs.get_check_rc()
return rc
def sanity_check_meta(id,node,attr_list):
rc = 0
if not node or not attr_list:
return rc
for c in node.childNodes:
if not is_element(c):
continue
if c.tagName == "meta_attributes":
rc |= sanity_check_nvpairs(id,c,attr_list)
return rc
def get_interesting_nodes(node,nodes):
for c in node.childNodes:
if is_element(c) and c.tagName in vars.cib_cli_map:
nodes.append(c)
get_interesting_nodes(c,nodes)
return nodes
def resources_xml():
return cibdump2doc("resources")
def rsc2node(id):
doc = resources_xml()
if not doc:
return None
nodes = get_interesting_nodes(doc,[])
for n in nodes:
if is_resource(n) and n.getAttribute("id") == id:
return n
def get_meta_param(id,param):
rsc_meta_show = "crm_resource --meta -r '%s' -g '%s'"
return get_stdout(rsc_meta_show % (id,param), stderr_on = False)
def is_live_cib():
'''We working with the live cluster?'''
return not vars.cib_in_use and not os.getenv("CIB_file")
def cib_shadow_dir():
if os.getenv("CIB_shadow_dir"):
return os.getenv("CIB_shadow_dir")
if getuser() in ("root",vars.crm_daemon_user):
return vars.crm_conf_dir
home = gethomedir()
if home and home.startswith(os.path.sep):
return os.path.join(home,".cib")
return os.getenv("TMPDIR") or "/tmp"
def listshadows():
dir = cib_shadow_dir()
if os.path.isdir(dir):
return stdout2list("ls %s | fgrep shadow. | sed 's/^shadow\.//'" % dir)
else:
return []
def shadowfile(name):
return "%s/shadow.%s" % (cib_shadow_dir(), name)
def shadow2doc(name):
return file2doc(shadowfile(name))
def is_rsc_running(id):
if not is_live_cib():
return False
rsc_node = rsc2node(id)
if not rsc_node:
return False
if not is_resource(rsc_node):
return False
rsc_status = "crm_resource -W -r '%s'"
test_id = rsc_clone(id) or id
outp = get_stdout(rsc_status % test_id, stderr_on = False)
return outp.find("running") > 0 and outp.find("NOT") == -1
def is_rsc_clone(rsc_id):
rsc_node = rsc2node(rsc_id)
return is_clone(rsc_node)
def is_rsc_ms(rsc_id):
rsc_node = rsc2node(rsc_id)
return is_ms(rsc_node)
def rsc_clone(rsc_id):
'''Get a clone of a resource.'''
rsc_node = rsc2node(rsc_id)
if not rsc_node or not rsc_node.parentNode:
return None
pnode = rsc_node.parentNode
if is_group(pnode):
pnode = pnode.parentNode
if is_clonems(pnode):
return pnode.getAttribute("id")
def get_topmost_rsc(node):
'''
Return a topmost node which is a resource and contains this resource
'''
if is_container(node.parentNode):
return get_topmost_rsc(node.parentNode)
return node
def get_cloned_rsc(rsc_id):
rsc_node = rsc2node(rsc_id)
if not rsc_node:
return ""
for c in rsc_node.childNodes:
if is_child_rsc(c):
return c.getAttribute("id")
return ""
attr_defaults_missing = {
}
def add_missing_attr(node):
try:
for defaults in attr_defaults_missing[node.tagName]:
if not node.hasAttribute(defaults[0]):
node.setAttribute(defaults[0],defaults[1])
except: pass
attr_defaults = {
"rule": (("boolean-op","and"),),
"expression": (("type","string"),),
}
def drop_attr_defaults(node, ts = 0):
try:
for defaults in attr_defaults[node.tagName]:
if node.getAttribute(defaults[0]) == defaults[1]:
node.removeAttribute(defaults[0])
except: pass
def is_element(xmlnode):
return xmlnode and xmlnode.nodeType == xmlnode.ELEMENT_NODE
def nameandid(xmlnode,level):
if xmlnode.nodeType == xmlnode.ELEMENT_NODE:
print level*' ',xmlnode.tagName,xmlnode.getAttribute("id"),xmlnode.getAttribute("name")
def xmltraverse(xmlnode,fun,ts=0):
for c in xmlnode.childNodes:
if is_element(c):
fun(c,ts)
xmltraverse(c,fun,ts+1)
def xmltraverse_thin(xmlnode,fun,ts=0):
'''
Skip elements which may be resources themselves.
NB: Call this only on resource (or constraint) nodes, but
never on cib or configuration!
'''
for c in xmlnode.childNodes:
if is_element(c) and not c.tagName in ('primitive','group'):
xmltraverse_thin(c,fun,ts+1)
fun(xmlnode,ts)
def xml_processnodes(xmlnode,node_filter,proc):
'''
Process with proc all nodes that match filter.
'''
node_list = []
for child in xmlnode.childNodes:
if node_filter(child):
node_list.append(child)
if child.hasChildNodes():
xml_processnodes(child,node_filter,proc)
if node_list:
proc(node_list)
# filter the cib
def is_whitespace(node):
return node.nodeType == node.TEXT_NODE and not node.data.strip()
def is_comment(node):
return node.nodeType == node.COMMENT_NODE
def is_status_node(node):
return is_element(node) and node.tagName == "status"
def is_emptynvpairs(node):
if is_element(node) and node.tagName in vars.nvpairs_tags:
for a in vars.precious_attrs:
if node.getAttribute(a):
return False
for n in node.childNodes:
if is_element(n):
return False
return True
else:
return False
def is_group(node):
return is_element(node) \
and node.tagName == "group"
def is_ms(node):
return is_element(node) \
and node.tagName in ("master","ms")
def is_clone(node):
return is_element(node) \
and node.tagName == "clone"
def is_clonems(node):
return is_element(node) \
and node.tagName in vars.clonems_tags
def is_container(node):
return is_element(node) \
and node.tagName in vars.container_tags
def is_primitive(node):
return is_element(node) \
and node.tagName == "primitive"
def is_resource(node):
return is_element(node) \
and node.tagName in vars.resource_tags
def is_child_rsc(node):
return is_element(node) \
and node.tagName in vars.children_tags
def is_constraint(node):
return is_element(node) \
and node.tagName in vars.constraint_tags
def is_defaults(node):
return is_element(node) \
and node.tagName in vars.defaults_tags
def rsc_constraint(rsc_id,cons_node):
if not is_element(cons_node):
return False
for attr in cons_node.attributes.keys():
if attr in vars.constraint_rsc_refs \
and rsc_id == cons_node.getAttribute(attr):
return True
for rref in cons_node.getElementsByTagName("resource_ref"):
if rsc_id == rref.getAttribute("id"):
return True
return False
def sort_container_children(node_list):
'''
Make sure that attributes's nodes are first, followed by the
elements (primitive/group). The order of elements is not
disturbed, they are just shifted to end!
'''
for node in node_list:
children = []
for c in node.childNodes:
if is_element(c) and c.tagName in vars.children_tags:
children.append(c)
for c in children:
node.removeChild(c)
for c in children:
node.appendChild(c)
def rmnode(node):
if node and node.parentNode:
if node.parentNode:
node.parentNode.removeChild(node)
node.unlink()
def rmnodes(node_list):
for node in node_list:
rmnode(node)
def printid(node_list):
for node in node_list:
id = node.getAttribute("id")
if id: print "node id:",id
def sanitize_cib(doc):
xml_processnodes(doc,is_status_node,rmnodes)
#xml_processnodes(doc,is_element,printid)
xml_processnodes(doc,is_emptynvpairs,rmnodes)
xml_processnodes(doc,is_whitespace,rmnodes)
xml_processnodes(doc,is_comment,rmnodes)
xml_processnodes(doc,is_container,sort_container_children)
xmltraverse(doc,drop_attr_defaults)
def is_simpleconstraint(node):
return len(node.getElementsByTagName("resource_ref")) == 0
match_list = {
"node": ("uname",),
"crm_config": (),
"rsc_defaults": (),
"op_defaults": (),
"cluster_property_set": (),
"instance_attributes": (),
"meta_attributes": (),
"operations": (),
"nvpair": ("name",),
"op": ("name","interval"),
"rule": ("score","score-attribute","role"),
"expression": ("attribute","operation","value"),
}
def add_comment(doc,node,s):
'''
Add comment s to node from doc.
'''
if not s:
return
comm_node = doc.createComment(s)
if node.hasChildNodes():
node.insertBefore(comm_node, node.firstChild)
else:
node.appendChild(comm_node)
def set_id_used_attr(node):
node.setAttribute("__id_used", "Yes")
def is_id_used_attr(node):
return node.getAttribute("__id_used") == "Yes"
def remove_id_used_attr(node,lvl):
if is_element(node) and is_id_used_attr(node):
node.removeAttribute("__id_used")
def remove_id_used_attributes(node):
if node:
xmltraverse(node, remove_id_used_attr)
def lookup_node(node,oldnode,location_only = False):
'''
Find a child of oldnode which matches node.
This is used to "harvest" existing ids in order to prevent
irrelevant changes to the XML code.
The list of attributes to match is in the dictionary
match_list.
The "id" attribute is treated differently. In case the new node
(the first parameter here) contains the id, then the "id"
attribute is added to the match list.
'''
#print "lookup:",node.tagName,node.getAttribute("id")
if not oldnode:
return None
#print " in:",oldnode.tagName,oldnode.getAttribute("id")
try:
attr_list = list(match_list[node.tagName])
except KeyError:
attr_list = []
if node.getAttribute("id"):
#print " add id attribute"
attr_list.append("id")
for c in oldnode.childNodes:
if not is_element(c):
continue
if not location_only and is_id_used_attr(c):
continue
#print " checking:",c.tagName,c.getAttribute("id")
if node.tagName == c.tagName:
failed = False
for a in attr_list:
if node.getAttribute(a) != c.getAttribute(a):
failed = True
break
if not failed:
#print " found:",c.tagName,c.getAttribute("id")
return c
return None
def find_operation(rsc_node,name,interval):
op_node_l = rsc_node.getElementsByTagName("operations")
for ops in op_node_l:
for c in ops.childNodes:
if not is_element(c):
continue
if c.tagName != "op":
continue
if c.getAttribute("name") == name \
and c.getAttribute("interval") == interval:
return c
def filter_on_tag(nl,tag):
return [node for node in nl if node.tagName == tag]
def nodes(node_list):
return filter_on_tag(node_list,"node")
def primitives(node_list):
return filter_on_tag(node_list,"primitive")
def groups(node_list):
return filter_on_tag(node_list,"group")
def clones(node_list):
return filter_on_tag(node_list,"clone")
def mss(node_list):
return filter_on_tag(node_list,"master")
def constraints(node_list):
return filter_on_tag(node_list,"rsc_location") \
+ filter_on_tag(node_list,"rsc_colocation") \
+ filter_on_tag(node_list,"rsc_order")
def properties(node_list):
return filter_on_tag(node_list,"cluster_property_set") \
+ filter_on_tag(node_list,"rsc_defaults") \
+ filter_on_tag(node_list,"op_defaults")
def processing_sort(nl):
'''
It's usually important to process cib objects in this order,
i.e. simple objects first.
'''
return nodes(nl) + primitives(nl) + groups(nl) + mss(nl) + clones(nl) \
+ constraints(nl) + properties(nl)
def obj_cmp(obj1,obj2):
return cmp(obj1.obj_id,obj2.obj_id)
def filter_on_type(cl,obj_type):
if type(cl[0]) == type([]):
l = [cli_list for cli_list in cl if cli_list[0][0] == obj_type]
if user_prefs.get_sort_elems():
l.sort(cmp = cmp)
else:
l = [obj for obj in cl if obj.obj_type == obj_type]
if user_prefs.get_sort_elems():
l.sort(cmp = obj_cmp)
return l
def nodes_cli(cl):
return filter_on_type(cl,"node")
def primitives_cli(cl):
return filter_on_type(cl,"primitive")
def groups_cli(cl):
return filter_on_type(cl,"group")
def clones_cli(cl):
return filter_on_type(cl,"clone")
def mss_cli(cl):
return filter_on_type(cl,"ms") + filter_on_type(cl,"master")
def constraints_cli(node_list):
return filter_on_type(node_list,"location") \
+ filter_on_type(node_list,"colocation") \
+ filter_on_type(node_list,"collocation") \
+ filter_on_type(node_list,"order")
def properties_cli(cl):
return filter_on_type(cl,"property") \
+ filter_on_type(cl,"rsc_defaults") \
+ filter_on_type(cl,"op_defaults")
def ops_cli(cl):
return filter_on_type(cl,"op")
def processing_sort_cli(cl):
'''
Return the given list in this order:
nodes, primitives, groups, ms, clones, constraints, rest
Both a list of objects (CibObject) and list of cli
representations accepted.
'''
return nodes_cli(cl) + primitives_cli(cl) + groups_cli(cl) + mss_cli(cl) + clones_cli(cl) \
+ constraints_cli(cl) + properties_cli(cl) + ops_cli(cl)
def is_resource_cli(s):
return s in olist(vars.resource_cli_names)
def is_constraint_cli(s):
return s in olist(vars.constraint_cli_names)
def referenced_resources(node):
if not is_constraint(node):
return []
xml_obj_type = node.tagName
if xml_obj_type == "rsc_location":
node_list = node.getElementsByTagName("rsc")
elif node.getElementsByTagName("resource_ref"): # resource sets
node_list = node.getElementsByTagName("resource_ref")
elif xml_obj_type == "rsc_colocation":
node_list = node.getElementsByTagName("rsc") + \
node.getElementsByTagName("with-rsc")
elif xml_obj_type == "rsc_order":
node_list = node.getElementsByTagName("first") + \
node.getElementsByTagName("then")
return [x.getAttribute("id") for x in node_list]
def rename_id(node,old_id,new_id):
if node.getAttribute("id") == old_id:
node.setAttribute("id", new_id)
def rename_rscref_simple(c_obj,old_id,new_id):
c_modified = False
for attr in c_obj.node.attributes.keys():
if attr in vars.constraint_rsc_refs and \
c_obj.node.getAttribute(attr) == old_id:
c_obj.node.setAttribute(attr, new_id)
c_obj.updated = True
c_modified = True
return c_modified
def delete_rscref_simple(c_obj,rsc_id):
c_modified = False
for attr in c_obj.node.attributes.keys():
if attr in vars.constraint_rsc_refs and \
c_obj.node.getAttribute(attr) == rsc_id:
c_obj.node.removeAttribute(attr)
c_obj.updated = True
c_modified = True
return c_modified
def rset_uniq(c_obj,d):
'''
Drop duplicate resource references.
'''
l = []
for rref in c_obj.node.getElementsByTagName("resource_ref"):
rsc_id = rref.getAttribute("id")
if d[rsc_id] > 1: # drop one
l.append(rref)
d[rsc_id] -= 1
rmnodes(l)
def delete_rscref_rset(c_obj,rsc_id):
'''
Drop all reference to rsc_id.
'''
c_modified = False
l = []
for rref in c_obj.node.getElementsByTagName("resource_ref"):
if rsc_id == rref.getAttribute("id"):
l.append(rref)
c_obj.updated = True
c_modified = True
rmnodes(l)
l = []
for rset in c_obj.node.getElementsByTagName("resource_set"):
if len(rset.getElementsByTagName("resource_ref")) == 0:
l.append(rset)
c_obj.updated = True
c_modified = True
rmnodes(l)
return c_modified
def rset_convert(c_obj):
l = c_obj.node.getElementsByTagName("resource_ref")
if len(l) != 2:
return # eh?
c_obj.modified = True
cli = c_obj.repr_cli(format = -1)
newnode = c_obj.cli2node(cli)
if newnode:
c_obj.node.parentNode.replaceChild(newnode,c_obj.node)
c_obj.node.unlink()
def rename_rscref_rset(c_obj,old_id,new_id):
c_modified = False
d = {}
for rref in c_obj.node.getElementsByTagName("resource_ref"):
rsc_id = rref.getAttribute("id")
if rsc_id == old_id:
rref.setAttribute("id", new_id)
rsc_id = new_id
c_obj.updated = True
c_modified = True
if not rsc_id in d:
d[rsc_id] = 0
else:
d[rsc_id] += 1
rset_uniq(c_obj,d)
# if only two resource references remained then, to preserve
# sanity, convert it to a simple constraint (sigh)
cnt = 0
for key in d:
cnt += d[key]
if cnt == 2:
rset_convert(c_obj)
return c_modified
def rename_rscref(c_obj,old_id,new_id):
if rename_rscref_simple(c_obj,old_id,new_id) or \
rename_rscref_rset(c_obj,old_id,new_id):
err_buf.info("resource references in %s updated" % c_obj.obj_string())
def delete_rscref(c_obj,rsc_id):
return delete_rscref_simple(c_obj,rsc_id) or \
delete_rscref_rset(c_obj,rsc_id)
def silly_constraint(c_node,rsc_id):
'''
Remove a constraint from rsc_id to rsc_id.
Or an invalid one.
'''
if c_node.getElementsByTagName("resource_ref"):
# it's a resource set
# the resource sets have already been uniq-ed
return len(c_node.getElementsByTagName("resource_ref")) <= 1
cnt = 0 # total count of referenced resources have to be at least two
rsc_cnt = 0
for attr in c_node.attributes.keys():
if attr in vars.constraint_rsc_refs:
cnt += 1
if c_node.getAttribute(attr) == rsc_id:
rsc_cnt += 1
if c_node.tagName == "rsc_location": # locations are never silly
return cnt < 1
else:
return rsc_cnt == 2 or cnt < 2
def get_rsc_children_ids(node):
return [x.getAttribute("id") \
for x in node.childNodes if is_child_rsc(x)]
def get_rscop_defaults_meta_node(node):
for c in node.childNodes:
if not is_element(c) or c.tagName != "meta_attributes":
continue
return c
return None
def new_cib():
doc = xml.dom.minidom.Document()
cib = doc.createElement("cib")
doc.appendChild(cib)
configuration = doc.createElement("configuration")
cib.appendChild(configuration)
for name in vars.req_sections:
node = doc.createElement(name)
configuration.appendChild(node)
return doc
def get_topnode(doc, tag):
"Get configuration element or create/append if there's none."
try:
e = doc.getElementsByTagName(tag)[0]
except:
e = doc.createElement(tag)
conf = doc.getElementsByTagName("configuration")[0]
if conf:
conf.appendChild(e)
else:
return None
return e
def new_cib_element(node,tagname,id_pfx):
base_id = node.getAttribute("id")
newnode = node.ownerDocument.createElement(tagname)
newnode.setAttribute("id", "%s-%s" % (base_id,id_pfx))
node.appendChild(newnode)
return newnode
def get_attr_in_set(node,attr):
for c in node.childNodes:
if not is_element(c):
continue
if c.tagName == "nvpair" and c.getAttribute("name") == attr:
return c
return None
def set_attr(node,attr,value):
'''
Set an attribute in the attribute set.
'''
nvpair = get_attr_in_set(node,attr)
if not nvpair:
nvpair = new_cib_element(node,"nvpair",attr)
nvpair.setAttribute("name",attr)
nvpair.setAttribute("value",value)
def get_set_nodes(node,setname,create = 0):
'Return the attributes set nodes (create one if requested)'
l = []
for c in node.childNodes:
if not is_element(c):
continue
if c.tagName == setname:
l.append(c)
if l:
return l
if create:
l.append(new_cib_element(node,setname,setname))
return l
def xml_cmp(n, m, show = False):
rc = hash(n.toxml()) == hash(m.toxml())
if not rc and show and user_prefs.get_debug():
print "original:",n.toprettyxml()
print "processed:",m.toprettyxml()
return hash(n.toxml()) == hash(m.toxml())
def merge_nvpairs(dnode,snode):
rc = False
add_children = []
for c in snode.childNodes:
if not is_element(c):
continue
if c.tagName == "nvpair":
dc = lookup_node(c,dnode)
if dc:
dc.setAttribute("value",c.getAttribute("value"))
else:
add_children.append(c)
rc = True
for c in add_children:
dnode.appendChild(c)
return rc
def merge_nodes(dnode,snode):
'''
Import elements from snode into dnode.
If an element is attributes set (vars.nvpairs_tags), then
merge nvpairs by the name attribute.
Otherwise, replace the whole element. (TBD)
'''
#print "1:",dnode.toprettyxml()
#print "2:",snode.toprettyxml()
#vars.nvpairs_tags
rc = False # any changes done?
if not dnode or not snode:
return rc
for c in snode.childNodes:
dc = lookup_node(c,dnode)
if not dc:
if c.tagName in vars.nvpairs_tags:
dnode.appendChild(c)
rc = True
continue
if dc.tagName in vars.nvpairs_tags:
rc = rc or merge_nvpairs(dc,c)
return rc
user_prefs = UserPrefs.getInstance()
vars = Vars.getInstance()
# vim:ts=4:sw=4:et:
| gpl-2.0 |
epitron/youtube-dl | youtube_dl/extractor/ndtv.py | 16 | 4606 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote_plus
)
from ..utils import (
parse_duration,
remove_end,
unified_strdate,
urljoin
)
class NDTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^/]+\.)?ndtv\.com/(?:[^/]+/)*videos?/?(?:[^/]+/)*[^/?^&]+-(?P<id>\d+)'
_TESTS = [
{
'url': 'https://khabar.ndtv.com/video/show/prime-time/prime-time-ill-system-and-poor-education-468818',
'md5': '78efcf3880ef3fd9b83d405ca94a38eb',
'info_dict': {
'id': '468818',
'ext': 'mp4',
'title': "प्राइम टाइम: सिस्टम बीमार, स्कूल बदहाल",
'description': 'md5:f410512f1b49672e5695dea16ef2731d',
'upload_date': '20170928',
'duration': 2218,
'thumbnail': r're:https?://.*\.jpg',
}
},
{
# __filename is url
'url': 'http://movies.ndtv.com/videos/cracker-free-diwali-wishes-from-karan-johar-kriti-sanon-other-stars-470304',
'md5': 'f1d709352305b44443515ac56b45aa46',
'info_dict': {
'id': '470304',
'ext': 'mp4',
'title': "Cracker-Free Diwali Wishes From Karan Johar, Kriti Sanon & Other Stars",
'description': 'md5:f115bba1adf2f6433fa7c1ade5feb465',
'upload_date': '20171019',
'duration': 137,
'thumbnail': r're:https?://.*\.jpg',
}
},
{
'url': 'https://www.ndtv.com/video/news/news/delhi-s-air-quality-status-report-after-diwali-is-very-poor-470372',
'only_matching': True
},
{
'url': 'https://auto.ndtv.com/videos/the-cnb-daily-october-13-2017-469935',
'only_matching': True
},
{
'url': 'https://sports.ndtv.com/cricket/videos/2nd-t20i-rock-thrown-at-australia-cricket-team-bus-after-win-over-india-469764',
'only_matching': True
},
{
'url': 'http://gadgets.ndtv.com/videos/uncharted-the-lost-legacy-review-465568',
'only_matching': True
},
{
'url': 'http://profit.ndtv.com/videos/news/video-indian-economy-on-very-solid-track-international-monetary-fund-chief-470040',
'only_matching': True
},
{
'url': 'http://food.ndtv.com/video-basil-seeds-coconut-porridge-419083',
'only_matching': True
},
{
'url': 'https://doctor.ndtv.com/videos/top-health-stories-of-the-week-467396',
'only_matching': True
},
{
'url': 'https://swirlster.ndtv.com/video/how-to-make-friends-at-work-469324',
'only_matching': True
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
# '__title' does not contain extra words such as sub-site name, "Video" etc.
title = compat_urllib_parse_unquote_plus(
self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None) or
self._og_search_title(webpage))
filename = self._search_regex(
r"(?:__)?filename\s*[:=]\s*'([^']+)'", webpage, 'video filename')
# in "movies" sub-site pages, filename is URL
video_url = urljoin('https://ndtvod.bc-ssl.cdn.bitgravity.com/23372/ndtv/', filename.lstrip('/'))
# "doctor" sub-site has MM:SS format
duration = parse_duration(self._search_regex(
r"(?:__)?duration\s*[:=]\s*'([^']+)'", webpage, 'duration', fatal=False))
# "sports", "doctor", "swirlster" sub-sites don't have 'publish-date'
upload_date = unified_strdate(self._html_search_meta(
'publish-date', webpage, 'upload date', default=None) or self._html_search_meta(
'uploadDate', webpage, 'upload date', default=None) or self._search_regex(
r'datePublished"\s*:\s*"([^"]+)"', webpage, 'upload date', fatal=False))
description = remove_end(self._og_search_description(webpage), ' (Read more)')
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': self._og_search_thumbnail(webpage),
'duration': duration,
'upload_date': upload_date,
}
| unlicense |
petemoore/git-repo | git_config.py | 12 | 19430 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import os
import re
import subprocess
import sys
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
import time
from pyversion import is_python3
if is_python3():
import urllib.request
import urllib.error
else:
import urllib2
import imp
urllib = imp.new_module('urllib')
urllib.request = urllib2
urllib.error = urllib2
from signal import SIGTERM
from error import GitError, UploadError
from trace import Trace
if is_python3():
from http.client import HTTPException
else:
from httplib import HTTPException
from git_command import GitCommand
from git_command import ssh_sock
from git_command import terminate_ssh_clients
R_HEADS = 'refs/heads/'
R_TAGS = 'refs/tags/'
ID_RE = re.compile(r'^[0-9a-f]{40}$')
REVIEW_CACHE = dict()
def IsId(rev):
return ID_RE.match(rev)
def _key(name):
parts = name.split('.')
if len(parts) < 2:
return name.lower()
parts[ 0] = parts[ 0].lower()
parts[-1] = parts[-1].lower()
return '.'.join(parts)
class GitConfig(object):
_ForUser = None
@classmethod
def ForUser(cls):
if cls._ForUser is None:
cls._ForUser = cls(configfile = os.path.expanduser('~/.gitconfig'))
return cls._ForUser
@classmethod
def ForRepository(cls, gitdir, defaults=None):
return cls(configfile = os.path.join(gitdir, 'config'),
defaults = defaults)
def __init__(self, configfile, defaults=None, jsonFile=None):
self.file = configfile
self.defaults = defaults
self._cache_dict = None
self._section_dict = None
self._remotes = {}
self._branches = {}
self._json = jsonFile
if self._json is None:
self._json = os.path.join(
os.path.dirname(self.file),
'.repo_' + os.path.basename(self.file) + '.json')
def Has(self, name, include_defaults = True):
"""Return true if this configuration file has the key.
"""
if _key(name) in self._cache:
return True
if include_defaults and self.defaults:
return self.defaults.Has(name, include_defaults = True)
return False
def GetBoolean(self, name):
"""Returns a boolean from the configuration file.
None : The value was not defined, or is not a boolean.
True : The value was set to true or yes.
False: The value was set to false or no.
"""
v = self.GetString(name)
if v is None:
return None
v = v.lower()
if v in ('true', 'yes'):
return True
if v in ('false', 'no'):
return False
return None
def GetString(self, name, all_keys=False):
"""Get the first value for a key, or None if it is not defined.
This configuration file is used first, if the key is not
defined or all_keys = True then the defaults are also searched.
"""
try:
v = self._cache[_key(name)]
except KeyError:
if self.defaults:
return self.defaults.GetString(name, all_keys = all_keys)
v = []
if not all_keys:
if v:
return v[0]
return None
r = []
r.extend(v)
if self.defaults:
r.extend(self.defaults.GetString(name, all_keys = True))
return r
def SetString(self, name, value):
"""Set the value(s) for a key.
Only this configuration file is modified.
The supplied value should be either a string,
or a list of strings (to store multiple values).
"""
key = _key(name)
try:
old = self._cache[key]
except KeyError:
old = []
if value is None:
if old:
del self._cache[key]
self._do('--unset-all', name)
elif isinstance(value, list):
if len(value) == 0:
self.SetString(name, None)
elif len(value) == 1:
self.SetString(name, value[0])
elif old != value:
self._cache[key] = list(value)
self._do('--replace-all', name, value[0])
for i in range(1, len(value)):
self._do('--add', name, value[i])
elif len(old) != 1 or old[0] != value:
self._cache[key] = [value]
self._do('--replace-all', name, value)
def GetRemote(self, name):
"""Get the remote.$name.* configuration values as an object.
"""
try:
r = self._remotes[name]
except KeyError:
r = Remote(self, name)
self._remotes[r.name] = r
return r
def GetBranch(self, name):
"""Get the branch.$name.* configuration values as an object.
"""
try:
b = self._branches[name]
except KeyError:
b = Branch(self, name)
self._branches[b.name] = b
return b
def GetSubSections(self, section):
"""List all subsection names matching $section.*.*
"""
return self._sections.get(section, set())
def HasSection(self, section, subsection = ''):
"""Does at least one key in section.subsection exist?
"""
try:
return subsection in self._sections[section]
except KeyError:
return False
def UrlInsteadOf(self, url):
"""Resolve any url.*.insteadof references.
"""
for new_url in self.GetSubSections('url'):
for old_url in self.GetString('url.%s.insteadof' % new_url, True):
if old_url is not None and url.startswith(old_url):
return new_url + url[len(old_url):]
return url
@property
def _sections(self):
d = self._section_dict
if d is None:
d = {}
for name in self._cache.keys():
p = name.split('.')
if 2 == len(p):
section = p[0]
subsect = ''
else:
section = p[0]
subsect = '.'.join(p[1:-1])
if section not in d:
d[section] = set()
d[section].add(subsect)
self._section_dict = d
return d
@property
def _cache(self):
if self._cache_dict is None:
self._cache_dict = self._Read()
return self._cache_dict
def _Read(self):
d = self._ReadJson()
if d is None:
d = self._ReadGit()
self._SaveJson(d)
return d
def _ReadJson(self):
try:
if os.path.getmtime(self._json) \
<= os.path.getmtime(self.file):
os.remove(self._json)
return None
except OSError:
return None
try:
Trace(': parsing %s', self.file)
fd = open(self._json)
try:
return json.load(fd)
finally:
fd.close()
except (IOError, ValueError):
os.remove(self._json)
return None
def _SaveJson(self, cache):
try:
fd = open(self._json, 'w')
try:
json.dump(cache, fd, indent=2)
finally:
fd.close()
except (IOError, TypeError):
if os.path.exists(self._json):
os.remove(self._json)
def _ReadGit(self):
"""
Read configuration data from git.
This internal method populates the GitConfig cache.
"""
c = {}
d = self._do('--null', '--list')
if d is None:
return c
for line in d.decode('utf-8').rstrip('\0').split('\0'): # pylint: disable=W1401
# Backslash is not anomalous
if '\n' in line:
key, val = line.split('\n', 1)
else:
key = line
val = None
if key in c:
c[key].append(val)
else:
c[key] = [val]
return c
def _do(self, *args):
command = ['config', '--file', self.file]
command.extend(args)
p = GitCommand(None,
command,
capture_stdout = True,
capture_stderr = True)
if p.Wait() == 0:
return p.stdout
else:
GitError('git config %s: %s' % (str(args), p.stderr))
class RefSpec(object):
"""A Git refspec line, split into its components:
forced: True if the line starts with '+'
src: Left side of the line
dst: Right side of the line
"""
@classmethod
def FromString(cls, rs):
lhs, rhs = rs.split(':', 2)
if lhs.startswith('+'):
lhs = lhs[1:]
forced = True
else:
forced = False
return cls(forced, lhs, rhs)
def __init__(self, forced, lhs, rhs):
self.forced = forced
self.src = lhs
self.dst = rhs
def SourceMatches(self, rev):
if self.src:
if rev == self.src:
return True
if self.src.endswith('/*') and rev.startswith(self.src[:-1]):
return True
return False
def DestMatches(self, ref):
if self.dst:
if ref == self.dst:
return True
if self.dst.endswith('/*') and ref.startswith(self.dst[:-1]):
return True
return False
def MapSource(self, rev):
if self.src.endswith('/*'):
return self.dst[:-1] + rev[len(self.src) - 1:]
return self.dst
def __str__(self):
s = ''
if self.forced:
s += '+'
if self.src:
s += self.src
if self.dst:
s += ':'
s += self.dst
return s
_master_processes = []
_master_keys = set()
_ssh_master = True
_master_keys_lock = None
def init_ssh():
"""Should be called once at the start of repo to init ssh master handling.
At the moment, all we do is to create our lock.
"""
global _master_keys_lock
assert _master_keys_lock is None, "Should only call init_ssh once"
_master_keys_lock = _threading.Lock()
def _open_ssh(host, port=None):
global _ssh_master
# Acquire the lock. This is needed to prevent opening multiple masters for
# the same host when we're running "repo sync -jN" (for N > 1) _and_ the
# manifest <remote fetch="ssh://xyz"> specifies a different host from the
# one that was passed to repo init.
_master_keys_lock.acquire()
try:
# Check to see whether we already think that the master is running; if we
# think it's already running, return right away.
if port is not None:
key = '%s:%s' % (host, port)
else:
key = host
if key in _master_keys:
return True
if not _ssh_master \
or 'GIT_SSH' in os.environ \
or sys.platform in ('win32', 'cygwin'):
# failed earlier, or cygwin ssh can't do this
#
return False
# We will make two calls to ssh; this is the common part of both calls.
command_base = ['ssh',
'-o','ControlPath %s' % ssh_sock(),
host]
if port is not None:
command_base[1:1] = ['-p', str(port)]
# Since the key wasn't in _master_keys, we think that master isn't running.
# ...but before actually starting a master, we'll double-check. This can
# be important because we can't tell that that '[email protected]' is the same
# as 'myhost.com' where "User git" is setup in the user's ~/.ssh/config file.
check_command = command_base + ['-O','check']
try:
Trace(': %s', ' '.join(check_command))
check_process = subprocess.Popen(check_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
check_process.communicate() # read output, but ignore it...
isnt_running = check_process.wait()
if not isnt_running:
# Our double-check found that the master _was_ infact running. Add to
# the list of keys.
_master_keys.add(key)
return True
except Exception:
# Ignore excpetions. We we will fall back to the normal command and print
# to the log there.
pass
command = command_base[:1] + \
['-M', '-N'] + \
command_base[1:]
try:
Trace(': %s', ' '.join(command))
p = subprocess.Popen(command)
except Exception as e:
_ssh_master = False
print('\nwarn: cannot enable ssh control master for %s:%s\n%s'
% (host,port, str(e)), file=sys.stderr)
return False
_master_processes.append(p)
_master_keys.add(key)
time.sleep(1)
return True
finally:
_master_keys_lock.release()
def close_ssh():
global _master_keys_lock
terminate_ssh_clients()
for p in _master_processes:
try:
os.kill(p.pid, SIGTERM)
p.wait()
except OSError:
pass
del _master_processes[:]
_master_keys.clear()
d = ssh_sock(create=False)
if d:
try:
os.rmdir(os.path.dirname(d))
except OSError:
pass
# We're done with the lock, so we can delete it.
_master_keys_lock = None
URI_SCP = re.compile(r'^([^@:]*@?[^:/]{1,}):')
URI_ALL = re.compile(r'^([a-z][a-z+-]*)://([^@/]*@?[^/]*)/')
def GetSchemeFromUrl(url):
m = URI_ALL.match(url)
if m:
return m.group(1)
return None
def _preconnect(url):
m = URI_ALL.match(url)
if m:
scheme = m.group(1)
host = m.group(2)
if ':' in host:
host, port = host.split(':')
else:
port = None
if scheme in ('ssh', 'git+ssh', 'ssh+git'):
return _open_ssh(host, port)
return False
m = URI_SCP.match(url)
if m:
host = m.group(1)
return _open_ssh(host)
return False
class Remote(object):
"""Configuration options related to a remote.
"""
def __init__(self, config, name):
self._config = config
self.name = name
self.url = self._Get('url')
self.review = self._Get('review')
self.projectname = self._Get('projectname')
self.fetch = list(map(RefSpec.FromString,
self._Get('fetch', all_keys=True)))
self._review_url = None
def _InsteadOf(self):
globCfg = GitConfig.ForUser()
urlList = globCfg.GetSubSections('url')
longest = ""
longestUrl = ""
for url in urlList:
key = "url." + url + ".insteadOf"
insteadOfList = globCfg.GetString(key, all_keys=True)
for insteadOf in insteadOfList:
if self.url.startswith(insteadOf) \
and len(insteadOf) > len(longest):
longest = insteadOf
longestUrl = url
if len(longest) == 0:
return self.url
return self.url.replace(longest, longestUrl, 1)
def PreConnectFetch(self):
connectionUrl = self._InsteadOf()
return _preconnect(connectionUrl)
def ReviewUrl(self, userEmail):
if self._review_url is None:
if self.review is None:
return None
u = self.review
if u.startswith('persistent-'):
u = u[len('persistent-'):]
if u.split(':')[0] not in ('http', 'https', 'sso'):
u = 'http://%s' % u
if u.endswith('/Gerrit'):
u = u[:len(u) - len('/Gerrit')]
if u.endswith('/ssh_info'):
u = u[:len(u) - len('/ssh_info')]
if not u.endswith('/'):
u += '/'
http_url = u
if u in REVIEW_CACHE:
self._review_url = REVIEW_CACHE[u]
elif 'REPO_HOST_PORT_INFO' in os.environ:
host, port = os.environ['REPO_HOST_PORT_INFO'].split()
self._review_url = self._SshReviewUrl(userEmail, host, port)
REVIEW_CACHE[u] = self._review_url
elif u.startswith('sso:'):
self._review_url = u # Assume it's right
REVIEW_CACHE[u] = self._review_url
else:
try:
info_url = u + 'ssh_info'
info = urllib.request.urlopen(info_url).read()
if info == 'NOT_AVAILABLE' or '<' in info:
# If `info` contains '<', we assume the server gave us some sort
# of HTML response back, like maybe a login page.
#
# Assume HTTP if SSH is not enabled or ssh_info doesn't look right.
self._review_url = http_url
else:
host, port = info.split()
self._review_url = self._SshReviewUrl(userEmail, host, port)
except urllib.error.HTTPError as e:
raise UploadError('%s: %s' % (self.review, str(e)))
except urllib.error.URLError as e:
raise UploadError('%s: %s' % (self.review, str(e)))
except HTTPException as e:
raise UploadError('%s: %s' % (self.review, e.__class__.__name__))
REVIEW_CACHE[u] = self._review_url
return self._review_url + self.projectname
def _SshReviewUrl(self, userEmail, host, port):
username = self._config.GetString('review.%s.username' % self.review)
if username is None:
username = userEmail.split('@')[0]
return 'ssh://%s@%s:%s/' % (username, host, port)
def ToLocal(self, rev):
"""Convert a remote revision string to something we have locally.
"""
if self.name == '.' or IsId(rev):
return rev
if not rev.startswith('refs/'):
rev = R_HEADS + rev
for spec in self.fetch:
if spec.SourceMatches(rev):
return spec.MapSource(rev)
if not rev.startswith(R_HEADS):
return rev
raise GitError('remote %s does not have %s' % (self.name, rev))
def WritesTo(self, ref):
"""True if the remote stores to the tracking ref.
"""
for spec in self.fetch:
if spec.DestMatches(ref):
return True
return False
def ResetFetch(self, mirror=False):
"""Set the fetch refspec to its default value.
"""
if mirror:
dst = 'refs/heads/*'
else:
dst = 'refs/remotes/%s/*' % self.name
self.fetch = [RefSpec(True, 'refs/heads/*', dst)]
def Save(self):
"""Save this remote to the configuration.
"""
self._Set('url', self.url)
self._Set('review', self.review)
self._Set('projectname', self.projectname)
self._Set('fetch', list(map(str, self.fetch)))
def _Set(self, key, value):
key = 'remote.%s.%s' % (self.name, key)
return self._config.SetString(key, value)
def _Get(self, key, all_keys=False):
key = 'remote.%s.%s' % (self.name, key)
return self._config.GetString(key, all_keys = all_keys)
class Branch(object):
"""Configuration options related to a single branch.
"""
def __init__(self, config, name):
self._config = config
self.name = name
self.merge = self._Get('merge')
r = self._Get('remote')
if r:
self.remote = self._config.GetRemote(r)
else:
self.remote = None
@property
def LocalMerge(self):
"""Convert the merge spec to a local name.
"""
if self.remote and self.merge:
return self.remote.ToLocal(self.merge)
return None
def Save(self):
"""Save this branch back into the configuration.
"""
if self._config.HasSection('branch', self.name):
if self.remote:
self._Set('remote', self.remote.name)
else:
self._Set('remote', None)
self._Set('merge', self.merge)
else:
fd = open(self._config.file, 'a')
try:
fd.write('[branch "%s"]\n' % self.name)
if self.remote:
fd.write('\tremote = %s\n' % self.remote.name)
if self.merge:
fd.write('\tmerge = %s\n' % self.merge)
finally:
fd.close()
def _Set(self, key, value):
key = 'branch.%s.%s' % (self.name, key)
return self._config.SetString(key, value)
def _Get(self, key, all_keys=False):
key = 'branch.%s.%s' % (self.name, key)
return self._config.GetString(key, all_keys = all_keys)
| apache-2.0 |
daniko/support-tools | wiki_to_md/impl/converter.py | 34 | 42218 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles conversion of Wiki files."""
import urlparse
from . import constants
class Converter(object):
"""Class that handles the actual parsing and generation."""
# A map of HTML tags to a list of the supported args for that tag.
_BASIC_HTML_ARGS = ["title", "dir", "lang"]
_BASIC_HTML_SIZEABLE_ARGS = (_BASIC_HTML_ARGS +
["border", "height", "width", "align"])
_BASIC_HTML_TABLE_ARGS = (_BASIC_HTML_SIZEABLE_ARGS +
["valign", "cellspacing", "cellpadding"])
_ALLOWED_HTML_TAGS = {
"a": _BASIC_HTML_ARGS + ["href"],
"b": _BASIC_HTML_ARGS,
"br": _BASIC_HTML_ARGS,
"blockquote": _BASIC_HTML_ARGS,
"code": _BASIC_HTML_ARGS + ["language"],
"dd": _BASIC_HTML_ARGS,
"div": _BASIC_HTML_ARGS,
"dl": _BASIC_HTML_ARGS,
"dt": _BASIC_HTML_ARGS,
"em": _BASIC_HTML_ARGS,
"font": _BASIC_HTML_ARGS + ["face", "size", "color"],
"h1": _BASIC_HTML_ARGS,
"h2": _BASIC_HTML_ARGS,
"h3": _BASIC_HTML_ARGS,
"h4": _BASIC_HTML_ARGS,
"h5": _BASIC_HTML_ARGS,
"i": _BASIC_HTML_ARGS,
"img": _BASIC_HTML_SIZEABLE_ARGS + ["src", "alt"],
"li": _BASIC_HTML_ARGS,
"ol": _BASIC_HTML_ARGS + ["type", "start"],
"p": _BASIC_HTML_ARGS + ["align"],
"pre": _BASIC_HTML_ARGS,
"q": _BASIC_HTML_ARGS,
"s": _BASIC_HTML_ARGS,
"span": _BASIC_HTML_ARGS,
"strike": _BASIC_HTML_ARGS,
"strong": _BASIC_HTML_ARGS,
"sub": _BASIC_HTML_ARGS,
"sup": _BASIC_HTML_ARGS,
"table": _BASIC_HTML_TABLE_ARGS,
"tbody": _BASIC_HTML_TABLE_ARGS,
"td": _BASIC_HTML_TABLE_ARGS,
"tfoot": _BASIC_HTML_TABLE_ARGS,
"th": _BASIC_HTML_TABLE_ARGS,
"thead": _BASIC_HTML_TABLE_ARGS + ["colspan", "rowspan"],
"tr": _BASIC_HTML_TABLE_ARGS + ["colspan", "rowspan"],
"tt": _BASIC_HTML_ARGS,
"u": _BASIC_HTML_ARGS,
"ul": _BASIC_HTML_ARGS + ["type"],
"var": _BASIC_HTML_ARGS,
}
# These plugins consume raw text.
_RAW_PLUGINS = ["code", "wiki:comment", "pre"]
# Parameters supported by the g:plusone plugin.
_PLUSONE_ARGS = ["count", "size", "href"]
# Parameters supported by the wiki:video plugin.
_VIDEO_ARGS = ["url", "width", "height"]
_VIDEO_DEFAULT_WIDTH = "425"
_VIDEO_DEFAULT_HEIGHT = "344"
def __init__(
self,
pragma_handler,
formatting_handler,
warning_method,
project,
wikipages):
"""Create a converter.
Args:
pragma_handler: Handler for parsed pragmas.
formatting_handler: Handler for parsed formatting rules.
warning_method: A function to call to display a warning message.
project: The name of the Google Code project for the Wiki page.
wikipages: Wiki pages assumed to exist for auto-linking.
"""
self._pragma_handler = pragma_handler
self._formatting_handler = formatting_handler
self._warning_method = warning_method
self._wikipages = wikipages
self._project = project
def Convert(self, input_stream, output_stream):
"""Converts a file in Google Code Wiki format to Github-flavored Markdown.
Args:
input_stream: Input Wiki file.
output_stream: Output Markdown file.
"""
# For simpler processing just load the entire file into memory.
input_lines = input_stream.readlines()
input_line = 1
# First extract pragmas, which must be placed at the top of the file.
input_line = self._ExtractPragmas(input_line, input_lines, output_stream)
# Now ignore any starting vertical whitespace.
input_line = self._MoveToMain(input_line, input_lines, output_stream)
# At the main text, begin processing.
input_line = self._ProcessBody(input_line, input_lines, output_stream)
# Done, but sanity check the amount of input processed.
remaining_lines = len(input_lines) - input_line + 1
if remaining_lines != 0:
self._warning_method(
input_line,
u"Processing completed, but not all lines were processed. "
"Remaining lines: {0}.".format(remaining_lines))
def _ExtractPragmas(self, input_line, input_lines, output_stream):
"""Extracts pragmas from a given input.
Args:
input_line: Current line number being processed.
input_lines: Input Wiki file lines.
output_stream: Output Markdown file.
Returns:
The new value of input_line after processing.
"""
for line in input_lines[input_line - 1:]:
pragma_match = constants.PRAGMA_RE.match(line)
if not pragma_match:
# Found all the pragmas.
break
# Found a pragma, strip it and pass it to the handler.
pragma_type, pragma_value = pragma_match.groups()
self._pragma_handler.HandlePragma(
input_line,
output_stream,
pragma_type.strip(),
pragma_value.strip())
# Moving on to the next line.
input_line += 1
return input_line
def _MoveToMain(self, input_line, input_lines, unused_output_stream):
"""Move the input line position to the main body, after pragmas.
Args:
input_line: Current line number being processed.
input_lines: Input Wiki file lines.
Returns:
The new value of input_line after processing.
"""
for line in input_lines[input_line - 1:]:
whitespace_match = constants.WHITESPACE_RE.match(line)
if not whitespace_match:
# Skipped all the whitespace.
break
# Moving on to the next line.
input_line += 1
return input_line
def _ProcessBody(self, input_line, input_lines, output_stream):
"""The process core.
It is a simple loop that tries to match formatting rules
then pass it to the correct handler. It processes the matches
in the same order as Google Code's wiki parser.
Args:
input_line: Current line number being processed.
input_lines: Input Wiki file lines.
output_stream: Output Markdown file.
Returns:
The new value of input_line after processing.
"""
# State tracked during processing:
self._code_block_depth = 0 # How many code block openings we've seen.
self._code_block_lines = [] # What lines we've collected for a code block.
self._indents = [] # 2-tuple of indent position and list type.
self._open_tags = [] # List of open tags, like bold or italic.
self._table_columns = [] # Table column sizes, taken from the header row.
self._table_column = 0 # Current column in the table body, or zero if none.
self._plugin_stack = [] # Current stack of plugins and their parameters.
first_line = True
for line in input_lines[input_line - 1:]:
stripped_line = line.strip()
self._ProcessLine(
first_line,
input_line,
line,
stripped_line,
output_stream)
# Moving on to the next line.
input_line += 1
first_line = False
if self._code_block_depth:
# Forgotten code block ending, close it implicitly.
code = "".join(self._code_block_lines)
self._formatting_handler.HandleText(input_line, output_stream, code)
self._formatting_handler.HandleCodeBlockClose(input_line, output_stream)
return input_line
def _ProcessLine(
self,
first_line,
input_line,
line,
stripped_line,
output_stream):
"""Processes a single line, depending on state.
Args:
first_line: True if this is the first line, false otherwise.
input_line: Current line number being processed.
line: The raw line string.
stripped_line: The line string, stripped of surrounding whitepsace.
output_stream: Output Markdown file.
Returns:
The new value of input_line after processing.
"""
# Check for the start of a code block.
if constants.START_CODEBLOCK_RE.match(stripped_line):
if self._code_block_depth == 0:
# Start a new collection of lines.
self._code_block_lines = []
else:
# Just an embedded code block.
self._code_block_lines.append(line)
self._code_block_depth += 1
return
# Check for the end of a code block.
if constants.END_CODEBLOCK_RE.match(stripped_line):
self._code_block_depth -= 1
if self._code_block_depth == 0:
# Closed the highest-level code block, handle it.
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
"\n")
self._formatting_handler.HandleCodeBlockOpen(
input_line,
output_stream,
None)
code = "".join(self._code_block_lines)
self._formatting_handler.HandleText(input_line, output_stream, code)
self._formatting_handler.HandleCodeBlockClose(input_line, output_stream)
else:
# Just closed an embedded clode block.
self._code_block_lines.append(line)
return
# Check if we're in a code block.
# If we are, just put the raw text into code_block_lines.
if self._code_block_depth != 0:
self._code_block_lines.append(line)
return
# For empty lines, close all formatting.
if not stripped_line:
if not self._ConsumeTextForPlugin():
self._SetCurrentList(input_line, 0, " ", output_stream)
self._CloseTags(input_line, output_stream)
if self._table_columns:
self._formatting_handler.HandleTableClose(input_line, output_stream)
self._table_columns = []
self._table_column = 0
self._formatting_handler.HandleParagraphBreak(input_line, output_stream)
return
# Non-empty line, finish the previous line's newline.
if not first_line:
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
"\n")
# Now check if we're processing within a list.
indent_pos = constants.INDENT_RE.match(line).end()
if (indent_pos and indent_pos < len(line) and
not self._ConsumeTextForPlugin()):
list_type = constants.LIST_TYPES.get(line[indent_pos], "blockquote")
if self._SetCurrentList(input_line, indent_pos, list_type, output_stream):
# Blockquotes take the entire remainder of the line,
# but everything else skips the list symbol plus the space after.
# (In case there is no space after, the first character is skipped;
# we will warn if this is detected, as it was probably unintended.)
if list_type == "blockquote":
line = line[indent_pos:]
else:
if line[indent_pos + 1] != " ":
self._warning_method(
input_line,
u"Missing space after list symbol: {0}, "
"'{1}' was removed instead."
.format(line[indent_pos], line[indent_pos + 1]))
line = line[indent_pos + 2:]
stripped_line = line.strip()
else:
# Reset to no indent.
self._SetCurrentList(input_line, 0, " ", output_stream)
# Finally, split the line into formatting primitives.
# We do so without whitespace so we can catch line breaks across tags.
if constants.LINE_FORMAT_RE.match(stripped_line):
self._ProcessMatch(
input_line,
constants.LINE_FORMAT_RE,
stripped_line,
output_stream)
else:
self._ProcessMatch(
input_line,
constants.TEXT_FORMAT_RE,
stripped_line,
output_stream)
self._CloseTableRow(input_line, output_stream)
def _SetCurrentList(self, input_line, indent_pos, list_type, output_stream):
"""Set the current list level based on the indentation.
Args:
input_line: Current line number being processed.
indent_pos: How far into the line we are indented.
list_type: What the type of the list should be.
output_stream: Output Markdown file.
Returns:
True if we are in a list item, False otherwise.
"""
# Pop and close the lists until we hit a
# list that is at the current position and type
while self._indents and self._indents[-1][0] >= indent_pos:
indents_top = self._indents[-1]
if indents_top[0] == indent_pos and indents_top[1] == list_type:
break
self._formatting_handler.HandleListClose(input_line, output_stream)
self._indents.pop()
# If we just popped everything off, we're not in a list.
if indent_pos == 0:
return False
if not self._indents or indent_pos >= self._indents[-1][0]:
# Add a new indentation if this is the first item overall,
# or the first item at this indentation position.
if not self._indents or indent_pos > self._indents[-1][0]:
self._indents.append((indent_pos, list_type))
# Add the leading Markdown for the list.
indentation_level = len(self._indents)
if list_type == "numeric":
self._formatting_handler.HandleNumericListOpen(
input_line,
output_stream,
indentation_level)
elif list_type == "bullet":
self._formatting_handler.HandleBulletListOpen(
input_line,
output_stream,
indentation_level)
elif list_type == "blockquote":
self._formatting_handler.HandleBlockQuoteOpen(
input_line,
output_stream,
indentation_level)
else:
self._warning_method(
input_line,
u"Bad list type: '{0}'".format(list_type))
return True
def _OpenTag(self, input_line, tag, output_stream):
"""Open a tag and add it to the open tags list.
Args:
input_line: Current line number being processed.
tag: Tag to open.
output_stream: Output Markdown file.
"""
handler = getattr(
self._formatting_handler, u"Handle{0}Open".format(tag), None)
if handler:
handler(input_line, output_stream)
else:
self._warning_method(input_line, u"Bad open tag: '{0}'".format(tag))
self._open_tags.append(tag)
def _CloseTag(self, input_line, tag, output_stream):
"""Close a tag and remove it from the open tags list.
Args:
input_line: Current line number being processed.
tag: Tag to close.
output_stream: Output Markdown file.
"""
handler = getattr(
self._formatting_handler, u"Handle{0}Close".format(tag), None)
if handler:
handler(input_line, output_stream)
else:
self._warning_method(input_line, u"Bad close tag: '{0}'".format(tag))
self._open_tags.remove(tag)
def _CloseTags(self, input_line, output_stream):
"""Close all tags.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
for tag in self._open_tags:
self._CloseTag(input_line, tag, output_stream)
def _CloseTableRow(self, input_line, output_stream):
"""Close table row, if any.
Args:
input_line: Current line number being processed.
output_stream: Output Markdown file.
"""
if self._table_columns:
if self._table_column != 1:
self._formatting_handler.HandleTableRowEnd(input_line, output_stream)
# Check if we just finished the header row.
if not self._table_column:
self._formatting_handler.HandleTableHeader(
input_line,
output_stream,
self._table_columns)
# In a table body, set the current column to 1.
self._table_column = 1
def _ConsumeTextForPlugin(self):
"""Check if text should be consumed raw for a plugin.
Returns:
True if the current plugin is consuming raw text, false otherwise.
"""
return (self._plugin_stack and
self._plugin_stack[-1]["id"] in self._RAW_PLUGINS)
def _ProcessMatch(self, input_line, match_regex, line, output_stream):
"""Process text, using a regex to match against.
Args:
input_line: Current line number being processed.
match_regex: Regex to match the line against.
line: The line being processed.
output_stream: Output Markdown file.
"""
lastpos = 0
for fullmatch in match_regex.finditer(line):
# Add text before the match as regular text.
if lastpos < fullmatch.start():
starting_line = line[lastpos:fullmatch.start()]
if self._ConsumeTextForPlugin():
self._formatting_handler.HandleText(
input_line,
output_stream,
starting_line)
else:
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
starting_line)
for rulename, match in fullmatch.groupdict().items():
if match is not None:
if self._ConsumeTextForPlugin() and rulename != "PluginEnd":
self._formatting_handler.HandleText(
input_line,
output_stream,
match)
else:
handler = getattr(self, u"_Handle{0}".format(rulename), None)
handler(input_line, match, output_stream)
lastpos = fullmatch.end()
# Add remainder of the line as regular text.
if lastpos < len(line):
remaining_line = line[lastpos:]
if self._ConsumeTextForPlugin():
self._formatting_handler.HandleText(
input_line,
output_stream,
remaining_line)
else:
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
remaining_line)
def _HandleHeading(self, input_line, match, output_stream):
"""Handle a heading formatter.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
match = match.strip()
# Count the equals on the left side.
leftequalcount = 0
for char in match:
if char != "=":
break
leftequalcount += 1
# Count the equals on the right side.
rightequalcount = 0
for char in reversed(match):
if char != "=":
break
rightequalcount += 1
# Users often forget to have the same number of equals signs on
# both sides. Rather than simply error out, we say the level is
# the number of equals signs on the left side.
header_level = leftequalcount
# If the level is greater than 6, the header is invalid and the contents
# are parsed as if no header markup were provided.
if header_level > 6:
header_level = None
# Everything else is the heading text.
heading_text = match[leftequalcount:-rightequalcount].strip()
if header_level:
self._formatting_handler.HandleHeaderOpen(
input_line,
output_stream,
header_level)
self._ProcessMatch(
input_line,
constants.TEXT_FORMAT_RE,
heading_text,
output_stream)
if header_level:
self._formatting_handler.HandleHeaderClose(
input_line,
output_stream,
header_level)
def _HandleHRule(self, input_line, unused_match, output_stream):
"""Handle a heading formatter.
Args:
input_line: Current line number being processed.
unused_match: Matched text.
output_stream: Output Markdown file.
"""
self._formatting_handler.HandleHRule(input_line, output_stream)
def _HandleBold(self, input_line, unused_match, output_stream):
"""Handle a bold formatter.
Args:
input_line: Current line number being processed.
unused_match: Matched text.
output_stream: Output Markdown file.
"""
self._HandleTag(input_line, "Bold", output_stream)
def _HandleItalic(self, input_line, unused_match, output_stream):
"""Handle a italic formatter.
Args:
input_line: Current line number being processed.
unused_match: Matched text.
output_stream: Output Markdown file.
"""
self._HandleTag(input_line, "Italic", output_stream)
def _HandleStrikethrough(self, input_line, unused_match, output_stream):
"""Handle a strikethrough formatter.
Args:
input_line: Current line number being processed.
unused_match: Matched text.
output_stream: Output Markdown file.
"""
self._HandleTag(input_line, "Strikethrough", output_stream)
def _HandleSuperscript(self, input_line, match, output_stream):
"""Handle superscript.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
self._formatting_handler.HandleSuperscript(input_line, output_stream, match)
def _HandleSubscript(self, input_line, match, output_stream):
"""Handle subscript.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
self._formatting_handler.HandleSubscript(input_line, output_stream, match)
def _HandleInlineCode(self, input_line, match, output_stream):
"""Handle inline code, method one.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
self._formatting_handler.HandleInlineCode(input_line, output_stream, match)
def _HandleInlineCode2(self, input_line, match, output_stream):
"""Handle inline code, method two.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
self._formatting_handler.HandleInlineCode(input_line, output_stream, match)
def _HandleTableCell(self, input_line, match, output_stream):
"""Handle a table cell.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
# Table cells end previous formatting.
self._CloseTags(input_line, output_stream)
# Count the pipes to calculate the column span.
pipecount = 0
for char in match:
if char != "|":
break
pipecount += 1
span = pipecount / 2
# Now output the cell, tracking the size of the contents.
self._formatting_handler.HandleTableCellBorder(input_line, output_stream)
starting_pos = output_stream.tell()
self._ProcessMatch(
input_line,
constants.TEXT_FORMAT_RE,
match[pipecount:],
output_stream)
ending_pos = output_stream.tell()
# Handle the cell width, either tracking or padding.
cell_width = ending_pos - starting_pos
if not self._table_column:
# In the header row, track the column sizes.
self._table_columns.append(cell_width)
else:
# In the table body, pad the cell (for prettier raw text viewing).
header_cell_width = self._table_columns[self._table_column - 1]
remaining_width = header_cell_width - cell_width
if remaining_width > 0:
padding = " " * remaining_width
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
padding)
self._table_column += 1
if span > 1:
self._warning_method(
input_line,
"Multi-span cells are not directly supported in GFM. They have been "
"emulated by adding empty cells. This may give the correct rendered "
"result, but the plain-text representation may be noisy. Consider "
"removing the multi-span cells from your table, or using HTML.")
while span > 1:
# Empty cell.
self._formatting_handler.HandleTableCellBorder(
input_line,
output_stream)
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
" ")
self._table_columns.append(1)
span -= 1
def _HandleTableRowEnd(self, input_line, unused_match, output_stream):
"""Handle a table row ending.
Args:
input_line: Current line number being processed.
unused_match: Matched text.
output_stream: Output Markdown file.
"""
# Table cells end previous formatting.
self._CloseTags(input_line, output_stream)
self._CloseTableRow(input_line, output_stream)
def _HandleUrl(self, input_line, match, output_stream):
"""Handle an auto-linked URL.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
self._formatting_handler.HandleLink(input_line, output_stream, match, None)
def _HandleUrlBracket(self, input_line, match, output_stream):
"""Handle a bracketed URL.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
# First, strip the brackets off to get to the URL and description.
core = match[1:-1]
# Now strip out the description.
parts = constants.WHITESPACE_RE.split(core, 1)
if len(parts) == 1:
url = parts[0]
description = None
else:
url = parts[0]
description = parts[1]
self._formatting_handler.HandleLink(
input_line,
output_stream,
url,
description)
def _HandleWikiWord(self, input_line, match, output_stream):
"""Handle a wiki word.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
if match[0] == "!":
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
match[1:])
elif match not in self._wikipages:
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
match)
else:
self._formatting_handler.HandleWiki(
input_line,
output_stream,
match,
None)
def _HandleWikiWordBracket(self, input_line, match, output_stream):
"""Handle a bracketed wiki word.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
# First, strip the brackets off to get to the wiki and description.
core = match[1:-1]
# Now strip out the description.
parts = constants.WHITESPACE_RE.split(core, 1)
if len(parts) == 1:
wiki = parts[0]
description = None
else:
wiki = parts[0]
description = parts[1]
self._formatting_handler.HandleWiki(
input_line,
output_stream,
wiki,
description)
def _HandleIssueLink(self, input_line, match, output_stream):
"""Handle an auto-linked issue.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
issue = match[len("issue"):].strip()
prefix = match[:-len(issue)]
self._formatting_handler.HandleIssue(
input_line,
output_stream,
prefix,
issue)
def _HandleRevisionLink(self, input_line, match, output_stream):
"""Handle an auto-linked revision.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
if match[1].lower() == "e":
revision = match[len("revision"):].strip()
else:
revision = match[len("r"):].strip()
prefix = match[:-len(revision)]
self._formatting_handler.HandleRevision(
input_line,
output_stream,
prefix,
revision)
def _HandlePlugin(self, input_line, match, output_stream):
"""Handle a plugin tag.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
# Plugins close formatting tags.
self._CloseTags(input_line, output_stream)
# Get the core of the tag, check if this is also an end tag.
if match.endswith("/>"):
core = match[1:-2]
has_end = True
else:
core = match[1:-1]
has_end = False
# Extract the ID for the plugin.
plugin_id = constants.PLUGIN_ID_RE.match(core).group(0)
core_params = core[len(plugin_id):].strip()
# Extract the parameters for the plugin.
params = {}
for name, value in constants.PLUGIN_PARAM_RE.findall(core_params):
# Remove quotes from the value, if they exist
if value.startswith("'"):
value = value.strip("'")
elif value.startswith("\""):
value = value.strip("\"")
params[name] = value
# Now figure out what to do with the plugin.
if plugin_id in self._ALLOWED_HTML_TAGS:
self._HandlePluginHtml(
input_line,
plugin_id,
params,
has_end,
output_stream)
elif plugin_id == "g:plusone":
self._HandlePluginGPlus(
input_line,
plugin_id,
params,
output_stream)
elif plugin_id == "wiki:comment":
self._HandlePluginWikiComment(
input_line,
plugin_id,
params,
output_stream)
elif plugin_id == "wiki:gadget":
self._HandlePluginWikiGadget(input_line, match, output_stream)
elif plugin_id == "wiki:video":
self._HandlePluginWikiVideo(
input_line,
plugin_id,
params,
output_stream)
elif plugin_id == "wiki:toc":
self._HandlePluginWikiToc(input_line, match, output_stream)
else:
self._warning_method(
input_line,
u"Unknown plugin was given, outputting "
"as plain text:\n\t{0}".format(match))
# Wiki syntax put this class of error on its own line.
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
u"\n\n{0}\n\n".format(match))
# Add plugin and parameters to the stack.
if not has_end:
plugin_info = {"id": plugin_id, "params": params}
self._plugin_stack.append(plugin_info)
def _HandlePluginHtml(
self,
input_line,
plugin_id,
params,
has_end,
output_stream):
"""Handle a plugin tag for HTML.
Args:
input_line: Current line number being processed.
plugin_id: The plugin ID.
params: The plugin params.
has_end: Plugin has an end tag.
output_stream: Output Markdown file.
"""
# Filter the parameters. These are only filtered for output,
# they still have the effect of being usable variables.
allowed_parameters = self._ALLOWED_HTML_TAGS[plugin_id]
filtered_params = {}
for name, value in params.items():
if name in allowed_parameters:
filtered_params[name] = value
else:
self._warning_method(
input_line,
u"The following parameter was given for the '{0}' tag, "
"but will not be present in the outputted HTML:\n\t'{1}': '{2}'"
.format(plugin_id, name, value))
if plugin_id == "code":
self._formatting_handler.HandleCodeBlockOpen(
input_line,
output_stream,
filtered_params.get("language"))
else:
self._formatting_handler.HandleHtmlOpen(
input_line,
output_stream,
plugin_id,
filtered_params,
has_end)
def _HandlePluginGPlus(
self,
input_line,
plugin_id,
params,
output_stream):
"""Handle a plugin tag for +1 button.
Args:
input_line: Current line number being processed.
plugin_id: The plugin ID.
params: The plugin params.
output_stream: Output Markdown file.
"""
filtered_params = {}
for name, value in params.items():
if name in self._PLUSONE_ARGS:
filtered_params[name] = value
else:
self._warning_method(
input_line,
u"The following parameter was given for the '{0}' tag, "
"but will not be present in the outputted HTML:\n\t'{1}': '{2}'"
.format(plugin_id, name, value))
self._formatting_handler.HandleGPlusOpen(
input_line,
output_stream,
filtered_params)
def _HandlePluginWikiComment(
self,
input_line,
plugin_id,
params,
output_stream):
"""Handle a plugin tag for a wiki comment.
Args:
input_line: Current line number being processed.
plugin_id: The plugin ID.
params: The plugin params.
output_stream: Output Markdown file.
"""
for name, value in params.items():
self._warning_method(
input_line,
u"The following parameter was given for the '{0}' tag, "
"but will not be present in the outputted HTML:\n\t'{1}': '{2}'"
.format(plugin_id, name, value))
self._formatting_handler.HandleCommentOpen(input_line, output_stream)
def _HandlePluginWikiGadget(self, input_line, match, output_stream):
"""Handle a plugin tag for a wiki gadget.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
self._warning_method(
input_line,
u"A wiki gadget was used, but this must be manually converted to a "
"GFM-supported method, if possible. Outputting as plain text:\n\t{0}"
.format(match))
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
match)
def _HandlePluginWikiVideo(
self,
input_line,
plugin_id,
params,
output_stream):
"""Handle a plugin tag for a wiki video.
Args:
input_line: Current line number being processed.
plugin_id: The plugin ID.
params: The plugin params.
output_stream: Output Markdown file.
"""
filtered_params = {}
for name, value in params.items():
if name in self._VIDEO_ARGS:
filtered_params[name] = value
else:
self._warning_method(
input_line,
u"The following parameter was given for the '{0}' tag, "
"but will not be present in the outputted HTML:\n\t'{1}': '{2}'"
.format(plugin_id, name, value))
if "url" in filtered_params:
width = filtered_params.get("width", self._VIDEO_DEFAULT_WIDTH)
height = filtered_params.get("height", self._VIDEO_DEFAULT_HEIGHT)
extracted = urlparse.urlparse(filtered_params["url"])
query = urlparse.parse_qs(extracted.query)
video_id = query.get("v", [""])[0]
if not video_id and extracted.path.startswith("/v/"):
video_id = extracted.path[3:]
if not constants.YOUTUBE_VIDEO_ID_RE.match(video_id):
output = ("wiki:video: cannot find YouTube "
"video id within parameter \"url\".")
self._warning_method(
input_line,
u"Video plugin has invalid video ID, outputting error:\n\t{0}"
.format(output))
# Wiki syntax put this class of error on its own line.
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
u"\n\n{0}\n\n".format(output))
else:
self._formatting_handler.HandleVideoOpen(
input_line,
output_stream,
video_id,
width,
height)
else:
output = "wiki:video: missing mandatory parameter \"url\"."
self._warning_method(
input_line,
u"Video plugin is missing 'url' parameter, outputting error:\n\t{0}"
.format(output))
# Wiki syntax put this class of error on its own line.
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
u"\n\n{0}\n\n".format(output))
def _HandlePluginWikiToc(self, input_line, match, output_stream):
"""Handle a plugin tag for a wiki table of contents.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
self._warning_method(
input_line,
u"A table of contents plugin was used for this wiki:\n"
"\t{0}\n"
"The Gollum wiki system supports table of content generation.\n"
"See https://github.com/gollum/gollum/wiki for more information.\n"
"It has been removed."
.format(match))
def _HandlePluginEnd(self, input_line, match, output_stream):
"""Handle a plugin ending tag.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
core = match[2:-1]
plugin_id = constants.PLUGIN_ID_RE.match(core).group(0)
if self._plugin_stack and self._plugin_stack[-1]["id"] == plugin_id:
self._plugin_stack.pop()
if plugin_id in self._ALLOWED_HTML_TAGS:
if plugin_id == "code":
self._formatting_handler.HandleCodeBlockClose(
input_line,
output_stream)
else:
self._formatting_handler.HandleHtmlClose(
input_line,
output_stream,
plugin_id)
elif plugin_id == "g:plusone":
self._formatting_handler.HandleGPlusClose(input_line, output_stream)
elif plugin_id == "wiki:comment":
self._formatting_handler.HandleCommentClose(input_line, output_stream)
elif plugin_id == "wiki:gadget":
# A warning was already issued on the opening tag.
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
match)
elif plugin_id == "wiki:video":
self._formatting_handler.HandleVideoClose(input_line, output_stream)
elif plugin_id == "wiki:toc":
# A warning was already issued on the opening tag.
pass
else:
self._warning_method(
input_line,
u"Unknown but matching plugin end was given, outputting "
"as plain text:\n\t{0}".format(match))
# Wiki syntax put this class of error on its own line.
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
u"\n\n{0}\n\n".format(match))
else:
self._warning_method(
input_line,
u"Unknown/unmatched plugin end was given, outputting "
"as plain text with errors:\n\t{0}".format(match))
# Wiki syntax put this class of error on its own line,
# with a prefix error message, and did not display the tag namespace.
tag_without_ns = plugin_id.split(":", 1)[-1]
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
u"\n\nUnknown end tag for </{0}>\n\n".format(tag_without_ns))
def _HandleVariable(self, input_line, match, output_stream):
"""Handle a variable.
Args:
input_line: Current line number being processed.
match: Matched text.
output_stream: Output Markdown file.
"""
output = None
instructions = None
# If the variable is defined somewhere in the plugin stack, use it.
if self._plugin_stack:
value = None
for plugin_info in reversed(self._plugin_stack):
if match in plugin_info["params"]:
value = plugin_info["params"][match]
break
if value:
output = value
# Otherwise, it needs to be globally-defined.
if not output and match == "username":
output = "(TODO: Replace with username.)"
instructions = ("On Google Code this would have been replaced with the "
"username of the current user, but GitHub has no "
"direct support for equivalent behavior. It has been "
"replaced with\n\t{0}\nConsider removing this altogether."
.format(output))
elif not output and match == "email":
output = "(TODO: Replace with email address.)"
instructions = ("On Google Code this would have been replaced with the "
"email address of the current user, but GitHub has no "
"direct support for equivalent behavior. It has been "
"replaced with\n\t{0}\nConsider removing this altogether."
.format(output))
elif not output and match == "project":
if self._project:
output = self._project
instructions = (u"It has been replaced with static text containing the "
"name of the project:\n\t{0}".format(self._project))
else:
output = "(TODO: Replace with project name.)"
instructions = ("Because no project name was specified, the text has "
"been replaced with:\n\t{0}".format(output))
# Not defined anywhere, just treat as regular text.
if not output:
# Add surrounding %% back on.
output = u"%%{0}%%".format(match)
self._formatting_handler.HandleEscapedText(
input_line,
output_stream,
output)
if instructions:
self._warning_method(
input_line,
u"A variable substitution was performed with %%{0}%%. {1}"
.format(match, instructions))
def _HandleTag(self, input_line, tag, output_stream):
"""Handle a tag, which has an opening and closing.
Args:
input_line: Current line number being processed.
tag: The tag to handle.
output_stream: Output Markdown file.
"""
if tag not in self._open_tags:
self._OpenTag(input_line, tag, output_stream)
else:
self._CloseTag(input_line, tag, output_stream)
| apache-2.0 |
motion2015/a3 | lms/djangoapps/mobile_api/social_facebook/friends/views.py | 85 | 2439 | """
Views for friends info API
"""
from rest_framework import generics, status
from rest_framework.response import Response
from opaque_keys.edx.keys import CourseKey
from student.models import CourseEnrollment
from ...utils import mobile_view
from ..utils import get_friends_from_facebook, get_linked_edx_accounts, share_with_facebook_friends
from lms.djangoapps.mobile_api.social_facebook.friends import serializers
@mobile_view()
class FriendsInCourse(generics.ListAPIView):
"""
**Use Case**
API endpoint that returns all the users friends that are in the course specified.
Note that only friends that allow their courses to be shared will be included.
**Example request**:
GET /api/mobile/v0.5/social/facebook/friends/course/<course_id>
where course_id is in the form of /edX/DemoX/Demo_Course
**Response Values**
{
"friends": [
{
"name": "test",
"id": "12345",
},
...
]
}
"""
serializer_class = serializers.FriendsInCourseSerializer
def list(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.GET, files=request.FILES)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Get all the user's FB friends
result = get_friends_from_facebook(serializer)
if not isinstance(result, list):
return result
def is_member(friend, course_key):
"""
Return true if friend is a member of the course specified by the course_key
"""
return CourseEnrollment.objects.filter(
course_id=course_key,
user_id=friend['edX_id']
).count() == 1
# For each friend check if they are a linked edX user
friends_with_edx_users = get_linked_edx_accounts(result)
# Filter by sharing preferences and enrollment in course
course_key = CourseKey.from_string(kwargs['course_id'])
friends_with_sharing_in_course = [
{'id': friend['id'], 'name': friend['name']}
for friend in friends_with_edx_users
if share_with_facebook_friends(friend) and is_member(friend, course_key)
]
return Response({'friends': friends_with_sharing_in_course})
| agpl-3.0 |
moto-timo/ironpython3 | Src/StdLib/Lib/test/test_shutil.py | 3 | 74111 | # Copyright (C) 2003 Python Software Foundation
import unittest
import unittest.mock
import shutil
import tempfile
import sys
import stat
import os
import os.path
import errno
import functools
import subprocess
from contextlib import ExitStack
from shutil import (make_archive,
register_archive_format, unregister_archive_format,
get_archive_formats, Error, unpack_archive,
register_unpack_format, RegistryError,
unregister_unpack_format, get_unpack_formats,
SameFileError)
import tarfile
import warnings
from test import support
from test.support import TESTFN, check_warnings, captured_stdout, requires_zlib
try:
import bz2
BZ2_SUPPORTED = True
except ImportError:
BZ2_SUPPORTED = False
TESTFN2 = TESTFN + "2"
try:
import grp
import pwd
UID_GID_SUPPORT = True
except ImportError:
UID_GID_SUPPORT = False
try:
import zipfile
ZIP_SUPPORT = True
except ImportError:
ZIP_SUPPORT = shutil.which('zip')
def _fake_rename(*args, **kwargs):
# Pretend the destination path is on a different filesystem.
raise OSError(getattr(errno, 'EXDEV', 18), "Invalid cross-device link")
def mock_rename(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
try:
builtin_rename = os.rename
os.rename = _fake_rename
return func(*args, **kwargs)
finally:
os.rename = builtin_rename
return wrap
def write_file(path, content, binary=False):
"""Write *content* to a file located at *path*.
If *path* is a tuple instead of a string, os.path.join will be used to
make a path. If *binary* is true, the file will be opened in binary
mode.
"""
if isinstance(path, tuple):
path = os.path.join(*path)
with open(path, 'wb' if binary else 'w') as fp:
fp.write(content)
def read_file(path, binary=False):
"""Return contents from a file located at *path*.
If *path* is a tuple instead of a string, os.path.join will be used to
make a path. If *binary* is true, the file will be opened in binary
mode.
"""
if isinstance(path, tuple):
path = os.path.join(*path)
with open(path, 'rb' if binary else 'r') as fp:
return fp.read()
def rlistdir(path):
res = []
for name in sorted(os.listdir(path)):
p = os.path.join(path, name)
if os.path.isdir(p) and not os.path.islink(p):
res.append(name + '/')
for n in rlistdir(p):
res.append(name + '/' + n)
else:
res.append(name)
return res
class TestShutil(unittest.TestCase):
def setUp(self):
super(TestShutil, self).setUp()
self.tempdirs = []
def tearDown(self):
super(TestShutil, self).tearDown()
while self.tempdirs:
d = self.tempdirs.pop()
shutil.rmtree(d, os.name in ('nt', 'cygwin'))
def mkdtemp(self):
"""Create a temporary directory that will be cleaned up.
Returns the path of the directory.
"""
d = tempfile.mkdtemp()
self.tempdirs.append(d)
return d
def test_rmtree_works_on_bytes(self):
tmp = self.mkdtemp()
victim = os.path.join(tmp, 'killme')
os.mkdir(victim)
write_file(os.path.join(victim, 'somefile'), 'foo')
victim = os.fsencode(victim)
self.assertIsInstance(victim, bytes)
win = (os.name == 'nt')
with self.assertWarns(DeprecationWarning) if win else ExitStack():
shutil.rmtree(victim)
@support.skip_unless_symlink
def test_rmtree_fails_on_symlink(self):
tmp = self.mkdtemp()
dir_ = os.path.join(tmp, 'dir')
os.mkdir(dir_)
link = os.path.join(tmp, 'link')
os.symlink(dir_, link)
self.assertRaises(OSError, shutil.rmtree, link)
self.assertTrue(os.path.exists(dir_))
self.assertTrue(os.path.lexists(link))
errors = []
def onerror(*args):
errors.append(args)
shutil.rmtree(link, onerror=onerror)
self.assertEqual(len(errors), 1)
self.assertIs(errors[0][0], os.path.islink)
self.assertEqual(errors[0][1], link)
self.assertIsInstance(errors[0][2][1], OSError)
@support.skip_unless_symlink
def test_rmtree_works_on_symlinks(self):
tmp = self.mkdtemp()
dir1 = os.path.join(tmp, 'dir1')
dir2 = os.path.join(dir1, 'dir2')
dir3 = os.path.join(tmp, 'dir3')
for d in dir1, dir2, dir3:
os.mkdir(d)
file1 = os.path.join(tmp, 'file1')
write_file(file1, 'foo')
link1 = os.path.join(dir1, 'link1')
os.symlink(dir2, link1)
link2 = os.path.join(dir1, 'link2')
os.symlink(dir3, link2)
link3 = os.path.join(dir1, 'link3')
os.symlink(file1, link3)
# make sure symlinks are removed but not followed
shutil.rmtree(dir1)
self.assertFalse(os.path.exists(dir1))
self.assertTrue(os.path.exists(dir3))
self.assertTrue(os.path.exists(file1))
def test_rmtree_errors(self):
# filename is guaranteed not to exist
filename = tempfile.mktemp()
self.assertRaises(FileNotFoundError, shutil.rmtree, filename)
# test that ignore_errors option is honored
shutil.rmtree(filename, ignore_errors=True)
# existing file
tmpdir = self.mkdtemp()
write_file((tmpdir, "tstfile"), "")
filename = os.path.join(tmpdir, "tstfile")
with self.assertRaises(NotADirectoryError) as cm:
shutil.rmtree(filename)
# The reason for this rather odd construct is that Windows sprinkles
# a \*.* at the end of file names. But only sometimes on some buildbots
possible_args = [filename, os.path.join(filename, '*.*')]
self.assertIn(cm.exception.filename, possible_args)
self.assertTrue(os.path.exists(filename))
# test that ignore_errors option is honored
shutil.rmtree(filename, ignore_errors=True)
self.assertTrue(os.path.exists(filename))
errors = []
def onerror(*args):
errors.append(args)
shutil.rmtree(filename, onerror=onerror)
self.assertEqual(len(errors), 2)
self.assertIs(errors[0][0], os.listdir)
self.assertEqual(errors[0][1], filename)
self.assertIsInstance(errors[0][2][1], NotADirectoryError)
self.assertIn(errors[0][2][1].filename, possible_args)
self.assertIs(errors[1][0], os.rmdir)
self.assertEqual(errors[1][1], filename)
self.assertIsInstance(errors[1][2][1], NotADirectoryError)
self.assertIn(errors[1][2][1].filename, possible_args)
@unittest.skipUnless(hasattr(os, 'chmod'), 'requires os.chmod()')
@unittest.skipIf(sys.platform[:6] == 'cygwin',
"This test can't be run on Cygwin (issue #1071513).")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"This test can't be run reliably as root (issue #1076467).")
def test_on_error(self):
self.errorState = 0
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
self.child_file_path = os.path.join(TESTFN, 'a')
self.child_dir_path = os.path.join(TESTFN, 'b')
support.create_empty_file(self.child_file_path)
os.mkdir(self.child_dir_path)
old_dir_mode = os.stat(TESTFN).st_mode
old_child_file_mode = os.stat(self.child_file_path).st_mode
old_child_dir_mode = os.stat(self.child_dir_path).st_mode
# Make unwritable.
new_mode = stat.S_IREAD|stat.S_IEXEC
os.chmod(self.child_file_path, new_mode)
os.chmod(self.child_dir_path, new_mode)
os.chmod(TESTFN, new_mode)
self.addCleanup(os.chmod, TESTFN, old_dir_mode)
self.addCleanup(os.chmod, self.child_file_path, old_child_file_mode)
self.addCleanup(os.chmod, self.child_dir_path, old_child_dir_mode)
shutil.rmtree(TESTFN, onerror=self.check_args_to_onerror)
# Test whether onerror has actually been called.
self.assertEqual(self.errorState, 3,
"Expected call to onerror function did not happen.")
def check_args_to_onerror(self, func, arg, exc):
# test_rmtree_errors deliberately runs rmtree
# on a directory that is chmod 500, which will fail.
# This function is run when shutil.rmtree fails.
# 99.9% of the time it initially fails to remove
# a file in the directory, so the first time through
# func is os.remove.
# However, some Linux machines running ZFS on
# FUSE experienced a failure earlier in the process
# at os.listdir. The first failure may legally
# be either.
if self.errorState < 2:
if func is os.unlink:
self.assertEqual(arg, self.child_file_path)
elif func is os.rmdir:
self.assertEqual(arg, self.child_dir_path)
else:
self.assertIs(func, os.listdir)
self.assertIn(arg, [TESTFN, self.child_dir_path])
self.assertTrue(issubclass(exc[0], OSError))
self.errorState += 1
else:
self.assertEqual(func, os.rmdir)
self.assertEqual(arg, TESTFN)
self.assertTrue(issubclass(exc[0], OSError))
self.errorState = 3
def test_rmtree_does_not_choke_on_failing_lstat(self):
try:
orig_lstat = os.lstat
def raiser(fn, *args, **kwargs):
if fn != TESTFN:
raise OSError()
else:
return orig_lstat(fn)
os.lstat = raiser
os.mkdir(TESTFN)
write_file((TESTFN, 'foo'), 'foo')
shutil.rmtree(TESTFN)
finally:
os.lstat = orig_lstat
@unittest.skipUnless(hasattr(os, 'chmod'), 'requires os.chmod')
@support.skip_unless_symlink
def test_copymode_follow_symlinks(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
dst_link = os.path.join(tmp_dir, 'quux')
write_file(src, 'foo')
write_file(dst, 'foo')
os.symlink(src, src_link)
os.symlink(dst, dst_link)
os.chmod(src, stat.S_IRWXU|stat.S_IRWXG)
# file to file
os.chmod(dst, stat.S_IRWXO)
self.assertNotEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
shutil.copymode(src, dst)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
# On Windows, os.chmod does not follow symlinks (issue #15411)
if os.name != 'nt':
# follow src link
os.chmod(dst, stat.S_IRWXO)
shutil.copymode(src_link, dst)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
# follow dst link
os.chmod(dst, stat.S_IRWXO)
shutil.copymode(src, dst_link)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
# follow both links
os.chmod(dst, stat.S_IRWXO)
shutil.copymode(src_link, dst_link)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
@unittest.skipUnless(hasattr(os, 'lchmod'), 'requires os.lchmod')
@support.skip_unless_symlink
def test_copymode_symlink_to_symlink(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
dst_link = os.path.join(tmp_dir, 'quux')
write_file(src, 'foo')
write_file(dst, 'foo')
os.symlink(src, src_link)
os.symlink(dst, dst_link)
os.chmod(src, stat.S_IRWXU|stat.S_IRWXG)
os.chmod(dst, stat.S_IRWXU)
os.lchmod(src_link, stat.S_IRWXO|stat.S_IRWXG)
# link to link
os.lchmod(dst_link, stat.S_IRWXO)
shutil.copymode(src_link, dst_link, follow_symlinks=False)
self.assertEqual(os.lstat(src_link).st_mode,
os.lstat(dst_link).st_mode)
self.assertNotEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
# src link - use chmod
os.lchmod(dst_link, stat.S_IRWXO)
shutil.copymode(src_link, dst, follow_symlinks=False)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
# dst link - use chmod
os.lchmod(dst_link, stat.S_IRWXO)
shutil.copymode(src, dst_link, follow_symlinks=False)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
@unittest.skipIf(hasattr(os, 'lchmod'), 'requires os.lchmod to be missing')
@support.skip_unless_symlink
def test_copymode_symlink_to_symlink_wo_lchmod(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
dst_link = os.path.join(tmp_dir, 'quux')
write_file(src, 'foo')
write_file(dst, 'foo')
os.symlink(src, src_link)
os.symlink(dst, dst_link)
shutil.copymode(src_link, dst_link, follow_symlinks=False) # silent fail
@support.skip_unless_symlink
def test_copystat_symlinks(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
dst_link = os.path.join(tmp_dir, 'qux')
write_file(src, 'foo')
src_stat = os.stat(src)
os.utime(src, (src_stat.st_atime,
src_stat.st_mtime - 42.0)) # ensure different mtimes
write_file(dst, 'bar')
self.assertNotEqual(os.stat(src).st_mtime, os.stat(dst).st_mtime)
os.symlink(src, src_link)
os.symlink(dst, dst_link)
if hasattr(os, 'lchmod'):
os.lchmod(src_link, stat.S_IRWXO)
if hasattr(os, 'lchflags') and hasattr(stat, 'UF_NODUMP'):
os.lchflags(src_link, stat.UF_NODUMP)
src_link_stat = os.lstat(src_link)
# follow
if hasattr(os, 'lchmod'):
shutil.copystat(src_link, dst_link, follow_symlinks=True)
self.assertNotEqual(src_link_stat.st_mode, os.stat(dst).st_mode)
# don't follow
shutil.copystat(src_link, dst_link, follow_symlinks=False)
dst_link_stat = os.lstat(dst_link)
if os.utime in os.supports_follow_symlinks:
for attr in 'st_atime', 'st_mtime':
# The modification times may be truncated in the new file.
self.assertLessEqual(getattr(src_link_stat, attr),
getattr(dst_link_stat, attr) + 1)
if hasattr(os, 'lchmod'):
self.assertEqual(src_link_stat.st_mode, dst_link_stat.st_mode)
if hasattr(os, 'lchflags') and hasattr(src_link_stat, 'st_flags'):
self.assertEqual(src_link_stat.st_flags, dst_link_stat.st_flags)
# tell to follow but dst is not a link
shutil.copystat(src_link, dst, follow_symlinks=False)
self.assertTrue(abs(os.stat(src).st_mtime - os.stat(dst).st_mtime) <
00000.1)
@unittest.skipUnless(hasattr(os, 'chflags') and
hasattr(errno, 'EOPNOTSUPP') and
hasattr(errno, 'ENOTSUP'),
"requires os.chflags, EOPNOTSUPP & ENOTSUP")
def test_copystat_handles_harmless_chflags_errors(self):
tmpdir = self.mkdtemp()
file1 = os.path.join(tmpdir, 'file1')
file2 = os.path.join(tmpdir, 'file2')
write_file(file1, 'xxx')
write_file(file2, 'xxx')
def make_chflags_raiser(err):
ex = OSError()
def _chflags_raiser(path, flags, *, follow_symlinks=True):
ex.errno = err
raise ex
return _chflags_raiser
old_chflags = os.chflags
try:
for err in errno.EOPNOTSUPP, errno.ENOTSUP:
os.chflags = make_chflags_raiser(err)
shutil.copystat(file1, file2)
# assert others errors break it
os.chflags = make_chflags_raiser(errno.EOPNOTSUPP + errno.ENOTSUP)
self.assertRaises(OSError, shutil.copystat, file1, file2)
finally:
os.chflags = old_chflags
@support.skip_unless_xattr
def test_copyxattr(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
write_file(src, 'foo')
dst = os.path.join(tmp_dir, 'bar')
write_file(dst, 'bar')
# no xattr == no problem
shutil._copyxattr(src, dst)
# common case
os.setxattr(src, 'user.foo', b'42')
os.setxattr(src, 'user.bar', b'43')
shutil._copyxattr(src, dst)
self.assertEqual(sorted(os.listxattr(src)), sorted(os.listxattr(dst)))
self.assertEqual(
os.getxattr(src, 'user.foo'),
os.getxattr(dst, 'user.foo'))
# check errors don't affect other attrs
os.remove(dst)
write_file(dst, 'bar')
os_error = OSError(errno.EPERM, 'EPERM')
def _raise_on_user_foo(fname, attr, val, **kwargs):
if attr == 'user.foo':
raise os_error
else:
orig_setxattr(fname, attr, val, **kwargs)
try:
orig_setxattr = os.setxattr
os.setxattr = _raise_on_user_foo
shutil._copyxattr(src, dst)
self.assertIn('user.bar', os.listxattr(dst))
finally:
os.setxattr = orig_setxattr
# the source filesystem not supporting xattrs should be ok, too.
def _raise_on_src(fname, *, follow_symlinks=True):
if fname == src:
raise OSError(errno.ENOTSUP, 'Operation not supported')
return orig_listxattr(fname, follow_symlinks=follow_symlinks)
try:
orig_listxattr = os.listxattr
os.listxattr = _raise_on_src
shutil._copyxattr(src, dst)
finally:
os.listxattr = orig_listxattr
# test that shutil.copystat copies xattrs
src = os.path.join(tmp_dir, 'the_original')
write_file(src, src)
os.setxattr(src, 'user.the_value', b'fiddly')
dst = os.path.join(tmp_dir, 'the_copy')
write_file(dst, dst)
shutil.copystat(src, dst)
self.assertEqual(os.getxattr(dst, 'user.the_value'), b'fiddly')
@support.skip_unless_symlink
@support.skip_unless_xattr
@unittest.skipUnless(hasattr(os, 'geteuid') and os.geteuid() == 0,
'root privileges required')
def test_copyxattr_symlinks(self):
# On Linux, it's only possible to access non-user xattr for symlinks;
# which in turn require root privileges. This test should be expanded
# as soon as other platforms gain support for extended attributes.
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
src_link = os.path.join(tmp_dir, 'baz')
write_file(src, 'foo')
os.symlink(src, src_link)
os.setxattr(src, 'trusted.foo', b'42')
os.setxattr(src_link, 'trusted.foo', b'43', follow_symlinks=False)
dst = os.path.join(tmp_dir, 'bar')
dst_link = os.path.join(tmp_dir, 'qux')
write_file(dst, 'bar')
os.symlink(dst, dst_link)
shutil._copyxattr(src_link, dst_link, follow_symlinks=False)
self.assertEqual(os.getxattr(dst_link, 'trusted.foo', follow_symlinks=False), b'43')
self.assertRaises(OSError, os.getxattr, dst, 'trusted.foo')
shutil._copyxattr(src_link, dst, follow_symlinks=False)
self.assertEqual(os.getxattr(dst, 'trusted.foo'), b'43')
@support.skip_unless_symlink
def test_copy_symlinks(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
write_file(src, 'foo')
os.symlink(src, src_link)
if hasattr(os, 'lchmod'):
os.lchmod(src_link, stat.S_IRWXU | stat.S_IRWXO)
# don't follow
shutil.copy(src_link, dst, follow_symlinks=True)
self.assertFalse(os.path.islink(dst))
self.assertEqual(read_file(src), read_file(dst))
os.remove(dst)
# follow
shutil.copy(src_link, dst, follow_symlinks=False)
self.assertTrue(os.path.islink(dst))
self.assertEqual(os.readlink(dst), os.readlink(src_link))
if hasattr(os, 'lchmod'):
self.assertEqual(os.lstat(src_link).st_mode,
os.lstat(dst).st_mode)
@support.skip_unless_symlink
def test_copy2_symlinks(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
write_file(src, 'foo')
os.symlink(src, src_link)
if hasattr(os, 'lchmod'):
os.lchmod(src_link, stat.S_IRWXU | stat.S_IRWXO)
if hasattr(os, 'lchflags') and hasattr(stat, 'UF_NODUMP'):
os.lchflags(src_link, stat.UF_NODUMP)
src_stat = os.stat(src)
src_link_stat = os.lstat(src_link)
# follow
shutil.copy2(src_link, dst, follow_symlinks=True)
self.assertFalse(os.path.islink(dst))
self.assertEqual(read_file(src), read_file(dst))
os.remove(dst)
# don't follow
shutil.copy2(src_link, dst, follow_symlinks=False)
self.assertTrue(os.path.islink(dst))
self.assertEqual(os.readlink(dst), os.readlink(src_link))
dst_stat = os.lstat(dst)
if os.utime in os.supports_follow_symlinks:
for attr in 'st_atime', 'st_mtime':
# The modification times may be truncated in the new file.
self.assertLessEqual(getattr(src_link_stat, attr),
getattr(dst_stat, attr) + 1)
if hasattr(os, 'lchmod'):
self.assertEqual(src_link_stat.st_mode, dst_stat.st_mode)
self.assertNotEqual(src_stat.st_mode, dst_stat.st_mode)
if hasattr(os, 'lchflags') and hasattr(src_link_stat, 'st_flags'):
self.assertEqual(src_link_stat.st_flags, dst_stat.st_flags)
@support.skip_unless_xattr
def test_copy2_xattr(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
write_file(src, 'foo')
os.setxattr(src, 'user.foo', b'42')
shutil.copy2(src, dst)
self.assertEqual(
os.getxattr(src, 'user.foo'),
os.getxattr(dst, 'user.foo'))
os.remove(dst)
@support.skip_unless_symlink
def test_copyfile_symlinks(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'src')
dst = os.path.join(tmp_dir, 'dst')
dst_link = os.path.join(tmp_dir, 'dst_link')
link = os.path.join(tmp_dir, 'link')
write_file(src, 'foo')
os.symlink(src, link)
# don't follow
shutil.copyfile(link, dst_link, follow_symlinks=False)
self.assertTrue(os.path.islink(dst_link))
self.assertEqual(os.readlink(link), os.readlink(dst_link))
# follow
shutil.copyfile(link, dst)
self.assertFalse(os.path.islink(dst))
def test_rmtree_uses_safe_fd_version_if_available(self):
_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <=
os.supports_dir_fd and
os.listdir in os.supports_fd and
os.stat in os.supports_follow_symlinks)
if _use_fd_functions:
self.assertTrue(shutil._use_fd_functions)
self.assertTrue(shutil.rmtree.avoids_symlink_attacks)
tmp_dir = self.mkdtemp()
d = os.path.join(tmp_dir, 'a')
os.mkdir(d)
try:
real_rmtree = shutil._rmtree_safe_fd
class Called(Exception): pass
def _raiser(*args, **kwargs):
raise Called
shutil._rmtree_safe_fd = _raiser
self.assertRaises(Called, shutil.rmtree, d)
finally:
shutil._rmtree_safe_fd = real_rmtree
else:
self.assertFalse(shutil._use_fd_functions)
self.assertFalse(shutil.rmtree.avoids_symlink_attacks)
def test_rmtree_dont_delete_file(self):
# When called on a file instead of a directory, don't delete it.
handle, path = tempfile.mkstemp()
os.close(handle)
self.assertRaises(NotADirectoryError, shutil.rmtree, path)
os.remove(path)
def test_copytree_simple(self):
src_dir = tempfile.mkdtemp()
dst_dir = os.path.join(tempfile.mkdtemp(), 'destination')
self.addCleanup(shutil.rmtree, src_dir)
self.addCleanup(shutil.rmtree, os.path.dirname(dst_dir))
write_file((src_dir, 'test.txt'), '123')
os.mkdir(os.path.join(src_dir, 'test_dir'))
write_file((src_dir, 'test_dir', 'test.txt'), '456')
shutil.copytree(src_dir, dst_dir)
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test.txt')))
self.assertTrue(os.path.isdir(os.path.join(dst_dir, 'test_dir')))
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test_dir',
'test.txt')))
actual = read_file((dst_dir, 'test.txt'))
self.assertEqual(actual, '123')
actual = read_file((dst_dir, 'test_dir', 'test.txt'))
self.assertEqual(actual, '456')
@support.skip_unless_symlink
def test_copytree_symlinks(self):
tmp_dir = self.mkdtemp()
src_dir = os.path.join(tmp_dir, 'src')
dst_dir = os.path.join(tmp_dir, 'dst')
sub_dir = os.path.join(src_dir, 'sub')
os.mkdir(src_dir)
os.mkdir(sub_dir)
write_file((src_dir, 'file.txt'), 'foo')
src_link = os.path.join(sub_dir, 'link')
dst_link = os.path.join(dst_dir, 'sub/link')
os.symlink(os.path.join(src_dir, 'file.txt'),
src_link)
if hasattr(os, 'lchmod'):
os.lchmod(src_link, stat.S_IRWXU | stat.S_IRWXO)
if hasattr(os, 'lchflags') and hasattr(stat, 'UF_NODUMP'):
os.lchflags(src_link, stat.UF_NODUMP)
src_stat = os.lstat(src_link)
shutil.copytree(src_dir, dst_dir, symlinks=True)
self.assertTrue(os.path.islink(os.path.join(dst_dir, 'sub', 'link')))
self.assertEqual(os.readlink(os.path.join(dst_dir, 'sub', 'link')),
os.path.join(src_dir, 'file.txt'))
dst_stat = os.lstat(dst_link)
if hasattr(os, 'lchmod'):
self.assertEqual(dst_stat.st_mode, src_stat.st_mode)
if hasattr(os, 'lchflags'):
self.assertEqual(dst_stat.st_flags, src_stat.st_flags)
def test_copytree_with_exclude(self):
# creating data
join = os.path.join
exists = os.path.exists
src_dir = tempfile.mkdtemp()
try:
dst_dir = join(tempfile.mkdtemp(), 'destination')
write_file((src_dir, 'test.txt'), '123')
write_file((src_dir, 'test.tmp'), '123')
os.mkdir(join(src_dir, 'test_dir'))
write_file((src_dir, 'test_dir', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2'))
write_file((src_dir, 'test_dir2', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2', 'subdir'))
os.mkdir(join(src_dir, 'test_dir2', 'subdir2'))
write_file((src_dir, 'test_dir2', 'subdir', 'test.txt'), '456')
write_file((src_dir, 'test_dir2', 'subdir2', 'test.py'), '456')
# testing glob-like patterns
try:
patterns = shutil.ignore_patterns('*.tmp', 'test_dir2')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertTrue(exists(join(dst_dir, 'test.txt')))
self.assertFalse(exists(join(dst_dir, 'test.tmp')))
self.assertFalse(exists(join(dst_dir, 'test_dir2')))
finally:
shutil.rmtree(dst_dir)
try:
patterns = shutil.ignore_patterns('*.tmp', 'subdir*')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertFalse(exists(join(dst_dir, 'test.tmp')))
self.assertFalse(exists(join(dst_dir, 'test_dir2', 'subdir2')))
self.assertFalse(exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
shutil.rmtree(dst_dir)
# testing callable-style
try:
def _filter(src, names):
res = []
for name in names:
path = os.path.join(src, name)
if (os.path.isdir(path) and
path.split()[-1] == 'subdir'):
res.append(name)
elif os.path.splitext(path)[-1] in ('.py'):
res.append(name)
return res
shutil.copytree(src_dir, dst_dir, ignore=_filter)
# checking the result: some elements should not be copied
self.assertFalse(exists(join(dst_dir, 'test_dir2', 'subdir2',
'test.py')))
self.assertFalse(exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
shutil.rmtree(dst_dir)
finally:
shutil.rmtree(src_dir)
shutil.rmtree(os.path.dirname(dst_dir))
def test_copytree_retains_permissions(self):
tmp_dir = tempfile.mkdtemp()
src_dir = os.path.join(tmp_dir, 'source')
os.mkdir(src_dir)
dst_dir = os.path.join(tmp_dir, 'destination')
self.addCleanup(shutil.rmtree, tmp_dir)
os.chmod(src_dir, 0o777)
write_file((src_dir, 'permissive.txt'), '123')
os.chmod(os.path.join(src_dir, 'permissive.txt'), 0o777)
write_file((src_dir, 'restrictive.txt'), '456')
os.chmod(os.path.join(src_dir, 'restrictive.txt'), 0o600)
restrictive_subdir = tempfile.mkdtemp(dir=src_dir)
os.chmod(restrictive_subdir, 0o600)
shutil.copytree(src_dir, dst_dir)
self.assertEqual(os.stat(src_dir).st_mode, os.stat(dst_dir).st_mode)
self.assertEqual(os.stat(os.path.join(src_dir, 'permissive.txt')).st_mode,
os.stat(os.path.join(dst_dir, 'permissive.txt')).st_mode)
self.assertEqual(os.stat(os.path.join(src_dir, 'restrictive.txt')).st_mode,
os.stat(os.path.join(dst_dir, 'restrictive.txt')).st_mode)
restrictive_subdir_dst = os.path.join(dst_dir,
os.path.split(restrictive_subdir)[1])
self.assertEqual(os.stat(restrictive_subdir).st_mode,
os.stat(restrictive_subdir_dst).st_mode)
@unittest.mock.patch('os.chmod')
def test_copytree_winerror(self, mock_patch):
# When copying to VFAT, copystat() raises OSError. On Windows, the
# exception object has a meaningful 'winerror' attribute, but not
# on other operating systems. Do not assume 'winerror' is set.
src_dir = tempfile.mkdtemp()
dst_dir = os.path.join(tempfile.mkdtemp(), 'destination')
self.addCleanup(shutil.rmtree, src_dir)
self.addCleanup(shutil.rmtree, os.path.dirname(dst_dir))
mock_patch.side_effect = PermissionError('ka-boom')
with self.assertRaises(shutil.Error):
shutil.copytree(src_dir, dst_dir)
@unittest.skipIf(os.name == 'nt', 'temporarily disabled on Windows')
@unittest.skipUnless(hasattr(os, 'link'), 'requires os.link')
def test_dont_copy_file_onto_link_to_itself(self):
# bug 851123.
os.mkdir(TESTFN)
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
try:
with open(src, 'w') as f:
f.write('cheddar')
os.link(src, dst)
self.assertRaises(shutil.SameFileError, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
@support.skip_unless_symlink
def test_dont_copy_file_onto_symlink_to_itself(self):
# bug 851123.
os.mkdir(TESTFN)
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
try:
with open(src, 'w') as f:
f.write('cheddar')
# Using `src` here would mean we end up with a symlink pointing
# to TESTFN/TESTFN/cheese, while it should point at
# TESTFN/cheese.
os.symlink('cheese', dst)
self.assertRaises(shutil.SameFileError, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
@support.skip_unless_symlink
def test_rmtree_on_symlink(self):
# bug 1669.
os.mkdir(TESTFN)
try:
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
os.mkdir(src)
os.symlink(src, dst)
self.assertRaises(OSError, shutil.rmtree, dst)
shutil.rmtree(dst, ignore_errors=True)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
# Issue #3002: copyfile and copytree block indefinitely on named pipes
@unittest.skipUnless(hasattr(os, "mkfifo"), 'requires os.mkfifo()')
def test_copyfile_named_pipe(self):
os.mkfifo(TESTFN)
try:
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, TESTFN, TESTFN2)
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, __file__, TESTFN)
finally:
os.remove(TESTFN)
@unittest.skipUnless(hasattr(os, "mkfifo"), 'requires os.mkfifo()')
@support.skip_unless_symlink
def test_copytree_named_pipe(self):
os.mkdir(TESTFN)
try:
subdir = os.path.join(TESTFN, "subdir")
os.mkdir(subdir)
pipe = os.path.join(subdir, "mypipe")
os.mkfifo(pipe)
try:
shutil.copytree(TESTFN, TESTFN2)
except shutil.Error as e:
errors = e.args[0]
self.assertEqual(len(errors), 1)
src, dst, error_msg = errors[0]
self.assertEqual("`%s` is a named pipe" % pipe, error_msg)
else:
self.fail("shutil.Error should have been raised")
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
shutil.rmtree(TESTFN2, ignore_errors=True)
def test_copytree_special_func(self):
src_dir = self.mkdtemp()
dst_dir = os.path.join(self.mkdtemp(), 'destination')
write_file((src_dir, 'test.txt'), '123')
os.mkdir(os.path.join(src_dir, 'test_dir'))
write_file((src_dir, 'test_dir', 'test.txt'), '456')
copied = []
def _copy(src, dst):
copied.append((src, dst))
shutil.copytree(src_dir, dst_dir, copy_function=_copy)
self.assertEqual(len(copied), 2)
@support.skip_unless_symlink
def test_copytree_dangling_symlinks(self):
# a dangling symlink raises an error at the end
src_dir = self.mkdtemp()
dst_dir = os.path.join(self.mkdtemp(), 'destination')
os.symlink('IDONTEXIST', os.path.join(src_dir, 'test.txt'))
os.mkdir(os.path.join(src_dir, 'test_dir'))
write_file((src_dir, 'test_dir', 'test.txt'), '456')
self.assertRaises(Error, shutil.copytree, src_dir, dst_dir)
# a dangling symlink is ignored with the proper flag
dst_dir = os.path.join(self.mkdtemp(), 'destination2')
shutil.copytree(src_dir, dst_dir, ignore_dangling_symlinks=True)
self.assertNotIn('test.txt', os.listdir(dst_dir))
# a dangling symlink is copied if symlinks=True
dst_dir = os.path.join(self.mkdtemp(), 'destination3')
shutil.copytree(src_dir, dst_dir, symlinks=True)
self.assertIn('test.txt', os.listdir(dst_dir))
@support.skip_unless_symlink
def test_copytree_symlink_dir(self):
src_dir = self.mkdtemp()
dst_dir = os.path.join(self.mkdtemp(), 'destination')
os.mkdir(os.path.join(src_dir, 'real_dir'))
with open(os.path.join(src_dir, 'real_dir', 'test.txt'), 'w'):
pass
os.symlink(os.path.join(src_dir, 'real_dir'),
os.path.join(src_dir, 'link_to_dir'),
target_is_directory=True)
shutil.copytree(src_dir, dst_dir, symlinks=False)
self.assertFalse(os.path.islink(os.path.join(dst_dir, 'link_to_dir')))
self.assertIn('test.txt', os.listdir(os.path.join(dst_dir, 'link_to_dir')))
dst_dir = os.path.join(self.mkdtemp(), 'destination2')
shutil.copytree(src_dir, dst_dir, symlinks=True)
self.assertTrue(os.path.islink(os.path.join(dst_dir, 'link_to_dir')))
self.assertIn('test.txt', os.listdir(os.path.join(dst_dir, 'link_to_dir')))
def _copy_file(self, method):
fname = 'test.txt'
tmpdir = self.mkdtemp()
write_file((tmpdir, fname), 'xxx')
file1 = os.path.join(tmpdir, fname)
tmpdir2 = self.mkdtemp()
method(file1, tmpdir2)
file2 = os.path.join(tmpdir2, fname)
return (file1, file2)
@unittest.skipUnless(hasattr(os, 'chmod'), 'requires os.chmod')
def test_copy(self):
# Ensure that the copied file exists and has the same mode bits.
file1, file2 = self._copy_file(shutil.copy)
self.assertTrue(os.path.exists(file2))
self.assertEqual(os.stat(file1).st_mode, os.stat(file2).st_mode)
@unittest.skipUnless(hasattr(os, 'chmod'), 'requires os.chmod')
@unittest.skipUnless(hasattr(os, 'utime'), 'requires os.utime')
def test_copy2(self):
# Ensure that the copied file exists and has the same mode and
# modification time bits.
file1, file2 = self._copy_file(shutil.copy2)
self.assertTrue(os.path.exists(file2))
file1_stat = os.stat(file1)
file2_stat = os.stat(file2)
self.assertEqual(file1_stat.st_mode, file2_stat.st_mode)
for attr in 'st_atime', 'st_mtime':
# The modification times may be truncated in the new file.
self.assertLessEqual(getattr(file1_stat, attr),
getattr(file2_stat, attr) + 1)
if hasattr(os, 'chflags') and hasattr(file1_stat, 'st_flags'):
self.assertEqual(getattr(file1_stat, 'st_flags'),
getattr(file2_stat, 'st_flags'))
@requires_zlib
def test_make_tarball(self):
# creating something to tar
root_dir, base_dir = self._create_files('')
tmpdir2 = self.mkdtemp()
# force shutil to create the directory
os.rmdir(tmpdir2)
# working with relative paths
work_dir = os.path.dirname(tmpdir2)
rel_base_name = os.path.join(os.path.basename(tmpdir2), 'archive')
with support.change_cwd(work_dir):
base_name = os.path.abspath(rel_base_name)
tarball = make_archive(rel_base_name, 'gztar', root_dir, '.')
# check if the compressed tarball was created
self.assertEqual(tarball, base_name + '.tar.gz')
self.assertTrue(os.path.isfile(tarball))
self.assertTrue(tarfile.is_tarfile(tarball))
with tarfile.open(tarball, 'r:gz') as tf:
self.assertCountEqual(tf.getnames(),
['.', './sub', './sub2',
'./file1', './file2', './sub/file3'])
# trying an uncompressed one
with support.change_cwd(work_dir):
tarball = make_archive(rel_base_name, 'tar', root_dir, '.')
self.assertEqual(tarball, base_name + '.tar')
self.assertTrue(os.path.isfile(tarball))
self.assertTrue(tarfile.is_tarfile(tarball))
with tarfile.open(tarball, 'r') as tf:
self.assertCountEqual(tf.getnames(),
['.', './sub', './sub2',
'./file1', './file2', './sub/file3'])
def _tarinfo(self, path):
with tarfile.open(path) as tar:
names = tar.getnames()
names.sort()
return tuple(names)
def _create_files(self, base_dir='dist'):
# creating something to tar
root_dir = self.mkdtemp()
dist = os.path.join(root_dir, base_dir)
os.makedirs(dist, exist_ok=True)
write_file((dist, 'file1'), 'xxx')
write_file((dist, 'file2'), 'xxx')
os.mkdir(os.path.join(dist, 'sub'))
write_file((dist, 'sub', 'file3'), 'xxx')
os.mkdir(os.path.join(dist, 'sub2'))
if base_dir:
write_file((root_dir, 'outer'), 'xxx')
return root_dir, base_dir
@requires_zlib
@unittest.skipUnless(shutil.which('tar'),
'Need the tar command to run')
def test_tarfile_vs_tar(self):
root_dir, base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
tarball = make_archive(base_name, 'gztar', root_dir, base_dir)
# check if the compressed tarball was created
self.assertEqual(tarball, base_name + '.tar.gz')
self.assertTrue(os.path.isfile(tarball))
# now create another tarball using `tar`
tarball2 = os.path.join(root_dir, 'archive2.tar')
tar_cmd = ['tar', '-cf', 'archive2.tar', base_dir]
subprocess.check_call(tar_cmd, cwd=root_dir,
stdout=subprocess.DEVNULL)
self.assertTrue(os.path.isfile(tarball2))
# let's compare both tarballs
self.assertEqual(self._tarinfo(tarball), self._tarinfo(tarball2))
# trying an uncompressed one
tarball = make_archive(base_name, 'tar', root_dir, base_dir)
self.assertEqual(tarball, base_name + '.tar')
self.assertTrue(os.path.isfile(tarball))
# now for a dry_run
tarball = make_archive(base_name, 'tar', root_dir, base_dir,
dry_run=True)
self.assertEqual(tarball, base_name + '.tar')
self.assertTrue(os.path.isfile(tarball))
@requires_zlib
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
def test_make_zipfile(self):
# creating something to zip
root_dir, base_dir = self._create_files()
tmpdir2 = self.mkdtemp()
# force shutil to create the directory
os.rmdir(tmpdir2)
# working with relative paths
work_dir = os.path.dirname(tmpdir2)
rel_base_name = os.path.join(os.path.basename(tmpdir2), 'archive')
with support.change_cwd(work_dir):
base_name = os.path.abspath(rel_base_name)
res = make_archive(rel_base_name, 'zip', root_dir, base_dir)
self.assertEqual(res, base_name + '.zip')
self.assertTrue(os.path.isfile(res))
self.assertTrue(zipfile.is_zipfile(res))
with zipfile.ZipFile(res) as zf:
self.assertCountEqual(zf.namelist(),
['dist/', 'dist/sub/', 'dist/sub2/',
'dist/file1', 'dist/file2', 'dist/sub/file3'])
@requires_zlib
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
@unittest.skipUnless(shutil.which('zip'),
'Need the zip command to run')
def test_zipfile_vs_zip(self):
root_dir, base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
archive = make_archive(base_name, 'zip', root_dir, base_dir)
# check if ZIP file was created
self.assertEqual(archive, base_name + '.zip')
self.assertTrue(os.path.isfile(archive))
# now create another ZIP file using `zip`
archive2 = os.path.join(root_dir, 'archive2.zip')
zip_cmd = ['zip', '-q', '-r', 'archive2.zip', base_dir]
subprocess.check_call(zip_cmd, cwd=root_dir,
stdout=subprocess.DEVNULL)
self.assertTrue(os.path.isfile(archive2))
# let's compare both ZIP files
with zipfile.ZipFile(archive) as zf:
names = zf.namelist()
with zipfile.ZipFile(archive2) as zf:
names2 = zf.namelist()
self.assertEqual(sorted(names), sorted(names2))
@requires_zlib
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
@unittest.skipUnless(shutil.which('unzip'),
'Need the unzip command to run')
def test_unzip_zipfile(self):
root_dir, base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
archive = make_archive(base_name, 'zip', root_dir, base_dir)
# check if ZIP file was created
self.assertEqual(archive, base_name + '.zip')
self.assertTrue(os.path.isfile(archive))
# now check the ZIP file using `unzip -t`
zip_cmd = ['unzip', '-t', archive]
with support.change_cwd(root_dir):
try:
subprocess.check_output(zip_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
details = exc.output.decode(errors="replace")
msg = "{}\n\n**Unzip Output**\n{}"
self.fail(msg.format(exc, details))
def test_make_archive(self):
tmpdir = self.mkdtemp()
base_name = os.path.join(tmpdir, 'archive')
self.assertRaises(ValueError, make_archive, base_name, 'xxx')
@requires_zlib
def test_make_archive_owner_group(self):
# testing make_archive with owner and group, with various combinations
# this works even if there's not gid/uid support
if UID_GID_SUPPORT:
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
else:
group = owner = 'root'
root_dir, base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner,
group=group)
self.assertTrue(os.path.isfile(res))
res = make_archive(base_name, 'zip', root_dir, base_dir)
self.assertTrue(os.path.isfile(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner=owner, group=group)
self.assertTrue(os.path.isfile(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner='kjhkjhkjg', group='oihohoh')
self.assertTrue(os.path.isfile(res))
@requires_zlib
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
def test_tarfile_root_owner(self):
root_dir, base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
with support.change_cwd(root_dir):
archive_name = make_archive(base_name, 'gztar', root_dir, 'dist',
owner=owner, group=group)
# check if the compressed tarball was created
self.assertTrue(os.path.isfile(archive_name))
# now checks the rights
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, 0)
self.assertEqual(member.gid, 0)
finally:
archive.close()
def test_make_archive_cwd(self):
current_dir = os.getcwd()
def _breaks(*args, **kw):
raise RuntimeError()
register_archive_format('xxx', _breaks, [], 'xxx file')
try:
try:
make_archive('xxx', 'xxx', root_dir=self.mkdtemp())
except Exception:
pass
self.assertEqual(os.getcwd(), current_dir)
finally:
unregister_archive_format('xxx')
def test_make_tarfile_in_curdir(self):
# Issue #21280
root_dir = self.mkdtemp()
with support.change_cwd(root_dir):
self.assertEqual(make_archive('test', 'tar'), 'test.tar')
self.assertTrue(os.path.isfile('test.tar'))
@requires_zlib
def test_make_zipfile_in_curdir(self):
# Issue #21280
root_dir = self.mkdtemp()
with support.change_cwd(root_dir):
self.assertEqual(make_archive('test', 'zip'), 'test.zip')
self.assertTrue(os.path.isfile('test.zip'))
def test_register_archive_format(self):
self.assertRaises(TypeError, register_archive_format, 'xxx', 1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
[(1, 2), (1, 2, 3)])
register_archive_format('xxx', lambda: x, [(1, 2)], 'xxx file')
formats = [name for name, params in get_archive_formats()]
self.assertIn('xxx', formats)
unregister_archive_format('xxx')
formats = [name for name, params in get_archive_formats()]
self.assertNotIn('xxx', formats)
@requires_zlib
def test_unpack_archive(self):
formats = ['tar', 'gztar', 'zip']
if BZ2_SUPPORTED:
formats.append('bztar')
root_dir, base_dir = self._create_files()
expected = rlistdir(root_dir)
expected.remove('outer')
for format in formats:
base_name = os.path.join(self.mkdtemp(), 'archive')
filename = make_archive(base_name, format, root_dir, base_dir)
# let's try to unpack it now
tmpdir2 = self.mkdtemp()
unpack_archive(filename, tmpdir2)
self.assertEqual(rlistdir(tmpdir2), expected)
# and again, this time with the format specified
tmpdir3 = self.mkdtemp()
unpack_archive(filename, tmpdir3, format=format)
self.assertEqual(rlistdir(tmpdir3), expected)
self.assertRaises(shutil.ReadError, unpack_archive, TESTFN)
self.assertRaises(ValueError, unpack_archive, TESTFN, format='xxx')
def test_unpack_registery(self):
formats = get_unpack_formats()
def _boo(filename, extract_dir, extra):
self.assertEqual(extra, 1)
self.assertEqual(filename, 'stuff.boo')
self.assertEqual(extract_dir, 'xx')
register_unpack_format('Boo', ['.boo', '.b2'], _boo, [('extra', 1)])
unpack_archive('stuff.boo', 'xx')
# trying to register a .boo unpacker again
self.assertRaises(RegistryError, register_unpack_format, 'Boo2',
['.boo'], _boo)
# should work now
unregister_unpack_format('Boo')
register_unpack_format('Boo2', ['.boo'], _boo)
self.assertIn(('Boo2', ['.boo'], ''), get_unpack_formats())
self.assertNotIn(('Boo', ['.boo'], ''), get_unpack_formats())
# let's leave a clean state
unregister_unpack_format('Boo2')
self.assertEqual(get_unpack_formats(), formats)
@unittest.skipUnless(hasattr(shutil, 'disk_usage'),
"disk_usage not available on this platform")
def test_disk_usage(self):
usage = shutil.disk_usage(os.getcwd())
self.assertGreater(usage.total, 0)
self.assertGreater(usage.used, 0)
self.assertGreaterEqual(usage.free, 0)
self.assertGreaterEqual(usage.total, usage.used)
self.assertGreater(usage.total, usage.free)
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
@unittest.skipUnless(hasattr(os, 'chown'), 'requires os.chown')
def test_chown(self):
# cleaned-up automatically by TestShutil.tearDown method
dirname = self.mkdtemp()
filename = tempfile.mktemp(dir=dirname)
write_file(filename, 'testing chown function')
with self.assertRaises(ValueError):
shutil.chown(filename)
with self.assertRaises(LookupError):
shutil.chown(filename, user='non-exising username')
with self.assertRaises(LookupError):
shutil.chown(filename, group='non-exising groupname')
with self.assertRaises(TypeError):
shutil.chown(filename, b'spam')
with self.assertRaises(TypeError):
shutil.chown(filename, 3.14)
uid = os.getuid()
gid = os.getgid()
def check_chown(path, uid=None, gid=None):
s = os.stat(filename)
if uid is not None:
self.assertEqual(uid, s.st_uid)
if gid is not None:
self.assertEqual(gid, s.st_gid)
shutil.chown(filename, uid, gid)
check_chown(filename, uid, gid)
shutil.chown(filename, uid)
check_chown(filename, uid)
shutil.chown(filename, user=uid)
check_chown(filename, uid)
shutil.chown(filename, group=gid)
check_chown(filename, gid=gid)
shutil.chown(dirname, uid, gid)
check_chown(dirname, uid, gid)
shutil.chown(dirname, uid)
check_chown(dirname, uid)
shutil.chown(dirname, user=uid)
check_chown(dirname, uid)
shutil.chown(dirname, group=gid)
check_chown(dirname, gid=gid)
user = pwd.getpwuid(uid)[0]
group = grp.getgrgid(gid)[0]
shutil.chown(filename, user, group)
check_chown(filename, uid, gid)
shutil.chown(dirname, user, group)
check_chown(dirname, uid, gid)
def test_copy_return_value(self):
# copy and copy2 both return their destination path.
for fn in (shutil.copy, shutil.copy2):
src_dir = self.mkdtemp()
dst_dir = self.mkdtemp()
src = os.path.join(src_dir, 'foo')
write_file(src, 'foo')
rv = fn(src, dst_dir)
self.assertEqual(rv, os.path.join(dst_dir, 'foo'))
rv = fn(src, os.path.join(dst_dir, 'bar'))
self.assertEqual(rv, os.path.join(dst_dir, 'bar'))
def test_copyfile_return_value(self):
# copytree returns its destination path.
src_dir = self.mkdtemp()
dst_dir = self.mkdtemp()
dst_file = os.path.join(dst_dir, 'bar')
src_file = os.path.join(src_dir, 'foo')
write_file(src_file, 'foo')
rv = shutil.copyfile(src_file, dst_file)
self.assertTrue(os.path.exists(rv))
self.assertEqual(read_file(src_file), read_file(dst_file))
def test_copyfile_same_file(self):
# copyfile() should raise SameFileError if the source and destination
# are the same.
src_dir = self.mkdtemp()
src_file = os.path.join(src_dir, 'foo')
write_file(src_file, 'foo')
self.assertRaises(SameFileError, shutil.copyfile, src_file, src_file)
# But Error should work too, to stay backward compatible.
self.assertRaises(Error, shutil.copyfile, src_file, src_file)
def test_copytree_return_value(self):
# copytree returns its destination path.
src_dir = self.mkdtemp()
dst_dir = src_dir + "dest"
self.addCleanup(shutil.rmtree, dst_dir, True)
src = os.path.join(src_dir, 'foo')
write_file(src, 'foo')
rv = shutil.copytree(src_dir, dst_dir)
self.assertEqual(['foo'], os.listdir(rv))
class TestWhich(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp(prefix="Tmp")
self.addCleanup(shutil.rmtree, self.temp_dir, True)
# Give the temp_file an ".exe" suffix for all.
# It's needed on Windows and not harmful on other platforms.
self.temp_file = tempfile.NamedTemporaryFile(dir=self.temp_dir,
prefix="Tmp",
suffix=".Exe")
os.chmod(self.temp_file.name, stat.S_IXUSR)
self.addCleanup(self.temp_file.close)
self.dir, self.file = os.path.split(self.temp_file.name)
def test_basic(self):
# Given an EXE in a directory, it should be returned.
rv = shutil.which(self.file, path=self.dir)
self.assertEqual(rv, self.temp_file.name)
def test_absolute_cmd(self):
# When given the fully qualified path to an executable that exists,
# it should be returned.
rv = shutil.which(self.temp_file.name, path=self.temp_dir)
self.assertEqual(rv, self.temp_file.name)
def test_relative_cmd(self):
# When given the relative path with a directory part to an executable
# that exists, it should be returned.
base_dir, tail_dir = os.path.split(self.dir)
relpath = os.path.join(tail_dir, self.file)
with support.change_cwd(path=base_dir):
rv = shutil.which(relpath, path=self.temp_dir)
self.assertEqual(rv, relpath)
# But it shouldn't be searched in PATH directories (issue #16957).
with support.change_cwd(path=self.dir):
rv = shutil.which(relpath, path=base_dir)
self.assertIsNone(rv)
def test_cwd(self):
# Issue #16957
base_dir = os.path.dirname(self.dir)
with support.change_cwd(path=self.dir):
rv = shutil.which(self.file, path=base_dir)
if sys.platform == "win32":
# Windows: current directory implicitly on PATH
self.assertEqual(rv, os.path.join(os.curdir, self.file))
else:
# Other platforms: shouldn't match in the current directory.
self.assertIsNone(rv)
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
'non-root user required')
def test_non_matching_mode(self):
# Set the file read-only and ask for writeable files.
os.chmod(self.temp_file.name, stat.S_IREAD)
if os.access(self.temp_file.name, os.W_OK):
self.skipTest("can't set the file read-only")
rv = shutil.which(self.file, path=self.dir, mode=os.W_OK)
self.assertIsNone(rv)
def test_relative_path(self):
base_dir, tail_dir = os.path.split(self.dir)
with support.change_cwd(path=base_dir):
rv = shutil.which(self.file, path=tail_dir)
self.assertEqual(rv, os.path.join(tail_dir, self.file))
def test_nonexistent_file(self):
# Return None when no matching executable file is found on the path.
rv = shutil.which("foo.exe", path=self.dir)
self.assertIsNone(rv)
@unittest.skipUnless(sys.platform == "win32",
"pathext check is Windows-only")
def test_pathext_checking(self):
# Ask for the file without the ".exe" extension, then ensure that
# it gets found properly with the extension.
rv = shutil.which(self.file[:-4], path=self.dir)
self.assertEqual(rv, self.temp_file.name[:-4] + ".EXE")
def test_environ_path(self):
with support.EnvironmentVarGuard() as env:
env['PATH'] = self.dir
rv = shutil.which(self.file)
self.assertEqual(rv, self.temp_file.name)
def test_empty_path(self):
base_dir = os.path.dirname(self.dir)
with support.change_cwd(path=self.dir), \
support.EnvironmentVarGuard() as env:
env['PATH'] = self.dir
rv = shutil.which(self.file, path='')
self.assertIsNone(rv)
def test_empty_path_no_PATH(self):
with support.EnvironmentVarGuard() as env:
env.pop('PATH', None)
rv = shutil.which(self.file)
self.assertIsNone(rv)
class TestMove(unittest.TestCase):
def setUp(self):
filename = "foo"
self.src_dir = tempfile.mkdtemp()
self.dst_dir = tempfile.mkdtemp()
self.src_file = os.path.join(self.src_dir, filename)
self.dst_file = os.path.join(self.dst_dir, filename)
with open(self.src_file, "wb") as f:
f.write(b"spam")
def tearDown(self):
for d in (self.src_dir, self.dst_dir):
try:
if d:
shutil.rmtree(d)
except:
pass
def _check_move_file(self, src, dst, real_dst):
with open(src, "rb") as f:
contents = f.read()
shutil.move(src, dst)
with open(real_dst, "rb") as f:
self.assertEqual(contents, f.read())
self.assertFalse(os.path.exists(src))
def _check_move_dir(self, src, dst, real_dst):
contents = sorted(os.listdir(src))
shutil.move(src, dst)
self.assertEqual(contents, sorted(os.listdir(real_dst)))
self.assertFalse(os.path.exists(src))
def test_move_file(self):
# Move a file to another location on the same filesystem.
self._check_move_file(self.src_file, self.dst_file, self.dst_file)
def test_move_file_to_dir(self):
# Move a file inside an existing dir on the same filesystem.
self._check_move_file(self.src_file, self.dst_dir, self.dst_file)
@mock_rename
def test_move_file_other_fs(self):
# Move a file to an existing dir on another filesystem.
self.test_move_file()
@mock_rename
def test_move_file_to_dir_other_fs(self):
# Move a file to another location on another filesystem.
self.test_move_file_to_dir()
def test_move_dir(self):
# Move a dir to another location on the same filesystem.
dst_dir = tempfile.mktemp()
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
try:
shutil.rmtree(dst_dir)
except:
pass
@mock_rename
def test_move_dir_other_fs(self):
# Move a dir to another location on another filesystem.
self.test_move_dir()
def test_move_dir_to_dir(self):
# Move a dir inside an existing dir on the same filesystem.
self._check_move_dir(self.src_dir, self.dst_dir,
os.path.join(self.dst_dir, os.path.basename(self.src_dir)))
@mock_rename
def test_move_dir_to_dir_other_fs(self):
# Move a dir inside an existing dir on another filesystem.
self.test_move_dir_to_dir()
def test_move_dir_sep_to_dir(self):
self._check_move_dir(self.src_dir + os.path.sep, self.dst_dir,
os.path.join(self.dst_dir, os.path.basename(self.src_dir)))
@unittest.skipUnless(os.path.altsep, 'requires os.path.altsep')
def test_move_dir_altsep_to_dir(self):
self._check_move_dir(self.src_dir + os.path.altsep, self.dst_dir,
os.path.join(self.dst_dir, os.path.basename(self.src_dir)))
def test_existing_file_inside_dest_dir(self):
# A file with the same name inside the destination dir already exists.
with open(self.dst_file, "wb"):
pass
self.assertRaises(shutil.Error, shutil.move, self.src_file, self.dst_dir)
def test_dont_move_dir_in_itself(self):
# Moving a dir inside itself raises an Error.
dst = os.path.join(self.src_dir, "bar")
self.assertRaises(shutil.Error, shutil.move, self.src_dir, dst)
def test_destinsrc_false_negative(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'srcdir/dest')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertTrue(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is not in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
def test_destinsrc_false_positive(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'src/dest'), ('srcdir', 'srcdir.new')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertFalse(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
@support.skip_unless_symlink
@mock_rename
def test_move_file_symlink(self):
dst = os.path.join(self.src_dir, 'bar')
os.symlink(self.src_file, dst)
shutil.move(dst, self.dst_file)
self.assertTrue(os.path.islink(self.dst_file))
self.assertTrue(os.path.samefile(self.src_file, self.dst_file))
@support.skip_unless_symlink
@mock_rename
def test_move_file_symlink_to_dir(self):
filename = "bar"
dst = os.path.join(self.src_dir, filename)
os.symlink(self.src_file, dst)
shutil.move(dst, self.dst_dir)
final_link = os.path.join(self.dst_dir, filename)
self.assertTrue(os.path.islink(final_link))
self.assertTrue(os.path.samefile(self.src_file, final_link))
@support.skip_unless_symlink
@mock_rename
def test_move_dangling_symlink(self):
src = os.path.join(self.src_dir, 'baz')
dst = os.path.join(self.src_dir, 'bar')
os.symlink(src, dst)
dst_link = os.path.join(self.dst_dir, 'quux')
shutil.move(dst, dst_link)
self.assertTrue(os.path.islink(dst_link))
# On Windows, os.path.realpath does not follow symlinks (issue #9949)
if os.name == 'nt':
self.assertEqual(os.path.realpath(src), os.readlink(dst_link))
else:
self.assertEqual(os.path.realpath(src), os.path.realpath(dst_link))
@support.skip_unless_symlink
@mock_rename
def test_move_dir_symlink(self):
src = os.path.join(self.src_dir, 'baz')
dst = os.path.join(self.src_dir, 'bar')
os.mkdir(src)
os.symlink(src, dst)
dst_link = os.path.join(self.dst_dir, 'quux')
shutil.move(dst, dst_link)
self.assertTrue(os.path.islink(dst_link))
self.assertTrue(os.path.samefile(src, dst_link))
def test_move_return_value(self):
rv = shutil.move(self.src_file, self.dst_dir)
self.assertEqual(rv,
os.path.join(self.dst_dir, os.path.basename(self.src_file)))
def test_move_as_rename_return_value(self):
rv = shutil.move(self.src_file, os.path.join(self.dst_dir, 'bar'))
self.assertEqual(rv, os.path.join(self.dst_dir, 'bar'))
class TestCopyFile(unittest.TestCase):
_delete = False
class Faux(object):
_entered = False
_exited_with = None
_raised = False
def __init__(self, raise_in_exit=False, suppress_at_exit=True):
self._raise_in_exit = raise_in_exit
self._suppress_at_exit = suppress_at_exit
def read(self, *args):
return ''
def __enter__(self):
self._entered = True
def __exit__(self, exc_type, exc_val, exc_tb):
self._exited_with = exc_type, exc_val, exc_tb
if self._raise_in_exit:
self._raised = True
raise OSError("Cannot close")
return self._suppress_at_exit
def tearDown(self):
if self._delete:
del shutil.open
def _set_shutil_open(self, func):
shutil.open = func
self._delete = True
def test_w_source_open_fails(self):
def _open(filename, mode='r'):
if filename == 'srcfile':
raise OSError('Cannot open "srcfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(OSError, shutil.copyfile, 'srcfile', 'destfile')
def test_w_dest_open_fails(self):
srcfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
raise OSError('Cannot open "destfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(srcfile._exited_with[0] is OSError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot open "destfile"',))
def test_w_dest_close_fails(self):
srcfile = self.Faux()
destfile = self.Faux(True)
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertTrue(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is OSError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot close',))
def test_w_source_close_fails(self):
srcfile = self.Faux(True)
destfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(OSError,
shutil.copyfile, 'srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertFalse(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is None)
self.assertTrue(srcfile._raised)
def test_move_dir_caseinsensitive(self):
# Renames a folder to the same name
# but a different case.
self.src_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.src_dir, True)
dst_dir = os.path.join(
os.path.dirname(self.src_dir),
os.path.basename(self.src_dir).upper())
self.assertNotEqual(self.src_dir, dst_dir)
try:
shutil.move(self.src_dir, dst_dir)
self.assertTrue(os.path.isdir(dst_dir))
finally:
os.rmdir(dst_dir)
class TermsizeTests(unittest.TestCase):
def test_does_not_crash(self):
"""Check if get_terminal_size() returns a meaningful value.
There's no easy portable way to actually check the size of the
terminal, so let's check if it returns something sensible instead.
"""
size = shutil.get_terminal_size()
self.assertGreaterEqual(size.columns, 0)
self.assertGreaterEqual(size.lines, 0)
def test_os_environ_first(self):
"Check if environment variables have precedence"
with support.EnvironmentVarGuard() as env:
env['COLUMNS'] = '777'
size = shutil.get_terminal_size()
self.assertEqual(size.columns, 777)
with support.EnvironmentVarGuard() as env:
env['LINES'] = '888'
size = shutil.get_terminal_size()
self.assertEqual(size.lines, 888)
@unittest.skipUnless(os.isatty(sys.__stdout__.fileno()), "not on tty")
def test_stty_match(self):
"""Check if stty returns the same results ignoring env
This test will fail if stdin and stdout are connected to
different terminals with different sizes. Nevertheless, such
situations should be pretty rare.
"""
try:
size = subprocess.check_output(['stty', 'size']).decode().split()
except (FileNotFoundError, subprocess.CalledProcessError):
self.skipTest("stty invocation failed")
expected = (int(size[1]), int(size[0])) # reversed order
with support.EnvironmentVarGuard() as env:
del env['LINES']
del env['COLUMNS']
actual = shutil.get_terminal_size()
self.assertEqual(expected, actual)
class PublicAPITests(unittest.TestCase):
"""Ensures that the correct values are exposed in the public API."""
def test_module_all_attribute(self):
self.assertTrue(hasattr(shutil, '__all__'))
target_api = ['copyfileobj', 'copyfile', 'copymode', 'copystat',
'copy', 'copy2', 'copytree', 'move', 'rmtree', 'Error',
'SpecialFileError', 'ExecError', 'make_archive',
'get_archive_formats', 'register_archive_format',
'unregister_archive_format', 'get_unpack_formats',
'register_unpack_format', 'unregister_unpack_format',
'unpack_archive', 'ignore_patterns', 'chown', 'which',
'get_terminal_size', 'SameFileError']
if hasattr(os, 'statvfs') or os.name == 'nt':
target_api.append('disk_usage')
self.assertEqual(set(shutil.__all__), set(target_api))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
kvar/ansible | test/units/modules/network/fortios/test_fortios_system_virtual_wire_pair.py | 21 | 8685 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_system_virtual_wire_pair
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_system_virtual_wire_pair.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_virtual_wire_pair_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_virtual_wire_pair': {'name': 'default_name_3',
'vlan_filter': 'test_value_4',
'wildcard_vlan': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_virtual_wire_pair.fortios_system(input_data, fos_instance)
expected_data = {'name': 'default_name_3',
'vlan-filter': 'test_value_4',
'wildcard-vlan': 'enable'
}
set_method_mock.assert_called_with('system', 'virtual-wire-pair', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_virtual_wire_pair_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_virtual_wire_pair': {'name': 'default_name_3',
'vlan_filter': 'test_value_4',
'wildcard_vlan': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_virtual_wire_pair.fortios_system(input_data, fos_instance)
expected_data = {'name': 'default_name_3',
'vlan-filter': 'test_value_4',
'wildcard-vlan': 'enable'
}
set_method_mock.assert_called_with('system', 'virtual-wire-pair', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_virtual_wire_pair_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_virtual_wire_pair': {'name': 'default_name_3',
'vlan_filter': 'test_value_4',
'wildcard_vlan': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_virtual_wire_pair.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'virtual-wire-pair', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_virtual_wire_pair_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_virtual_wire_pair': {'name': 'default_name_3',
'vlan_filter': 'test_value_4',
'wildcard_vlan': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_virtual_wire_pair.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'virtual-wire-pair', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_virtual_wire_pair_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_virtual_wire_pair': {'name': 'default_name_3',
'vlan_filter': 'test_value_4',
'wildcard_vlan': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_virtual_wire_pair.fortios_system(input_data, fos_instance)
expected_data = {'name': 'default_name_3',
'vlan-filter': 'test_value_4',
'wildcard-vlan': 'enable'
}
set_method_mock.assert_called_with('system', 'virtual-wire-pair', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_virtual_wire_pair_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_virtual_wire_pair': {
'random_attribute_not_valid': 'tag', 'name': 'default_name_3',
'vlan_filter': 'test_value_4',
'wildcard_vlan': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_virtual_wire_pair.fortios_system(input_data, fos_instance)
expected_data = {'name': 'default_name_3',
'vlan-filter': 'test_value_4',
'wildcard-vlan': 'enable'
}
set_method_mock.assert_called_with('system', 'virtual-wire-pair', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
Nikea/VisTrails | contrib/NumSciPy/ArrayIO.py | 6 | 12854 | import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import Module, ModuleError
import numpy
import scipy
import scipy.io
import pylab
from Array import *
from Matrix import *
class ArrayIOModule(object):
my_namespace = 'numpy|io'
class NrrdHelper(object):
def __init__(self):
self.type = {}
self.type['float32'] = 'f'
self.type['float'] = 'f'
self.type['float64'] = 'd'
self.type['double'] = 'd'
self.type['int'] = 'i'
self.type['int16'] = 'i'
self.type['long'] = 'l'
self.type['int32'] = 'l'
self.type['unsigned short'] = 's'
self.type['unsigned char'] = 'c'
self.nrrd_type = {}
self.nrrd_type['float'] = 'float'
self.nrrd_type['float32'] = 'float'
self.nrrd_type['float64'] = 'double'
self.nrrd_type['double'] = 'double'
self.nrrd_type['int64'] = 'long'
self.nrrd_type['long'] = 'long'
self.nrrd_type['int32'] = 'int'
self.nrrd_type['int16'] = 'short'
self.nrrd_type['int8'] = 'uchar'
self.nrrd_type['unsigned short'] = 'short'
self.little_endian = True
def num_bytes(self, dtype):
if self.type.has_key(dtype):
return self.type[dtype]
else:
print "Cannot find " + dtype + " in type library."
print "Assuming float32 for dtype"
return 'f'
def get_nrrd_type(self, data):
dt = data.dtype.name
if self.nrrd_type.has_key(dt):
return self.nrrd_type[dt]
else:
print "Cannot find " + dt + " in type library."
print "Assuming float32 for dtype"
return 'float'
def read_raw(self, fn, sizes, dtype, little_end=True):
try:
fid = open(fn, 'rb')
dt = self.num_bytes(dtype)
ndim = len(sizes)
num_el = 1
for i in xrange(ndim):
num_el *= sizes[i]
if little_end:
dt = '<'+dt
else:
dt = '>'+dt
data = numpy.fromfile(fn, dt)
fid.close()
data.shape = sizes
return data
except:
raise ModuleError("Could not read .raw file!")
def write_raw(self, fn, data):
try:
fid = open(fn, 'wb')
scipy.io.fwrite(fid, data.size, data)
fid.close()
except:
raise ModuleError("Could not write .raw file!")
def write_nhdr(self, fn, data):
import os
l = fn.split('/')
name = l[len(l)-1]
base = name.split('.')[0]
rawname = base + '.raw'
rawpath = fn.rstrip(name)
rawpath += rawname
self.write_raw(rawpath, data)
cmd = 'unu make -h -t '
cmd += self.get_nrrd_type(data) + ' '
cmd += '-e raw -i ' + rawname + ' -s '
sh = data.shape
ndims = len(sh)
for i in xrange(ndims):
cmd += str(sh[i]) + ' '
cmd += '-o ' + fn
try:
os.system(cmd)
except:
raise ModuleError("Could not write NHDR file. Please make sure the Teem and UNU utilities are on your path.")
def read_nhdr(self, fn):
import os.path
try:
fid = open(fn, 'r')
for line in fid:
if line.split(':')[0] == 'type':
self.dtype = line.split(':')[1].strip()
if line.split(':')[0] == 'dimension':
self.ndim = int(line.split(':')[1].strip())
if line.split(':')[0] == 'sizes':
s = line.split(':')[1].strip().split(' ')
self.sizes = []
for l in s:
self.sizes.append(int(l))
if line.split(':')[0] == 'endian':
if line.split(':')[1].strip() == 'little':
self.little_endian = True
else:
self.little_endian = False
if line.split(':')[0] == 'data file':
self.fn = line.split(':')[1].strip()
if line.split(':')[0] == 'encoding':
self.encoding = line.split(':')[1].strip()
fid.close()
except:
raise ModuleError("Could not read .nhdr file!")
if self.encoding == 'raw':
curpath = os.getcwd()
npath = os.path.dirname(fn)
os.chdir(npath)
data = self.read_raw(self.fn, self.sizes, self.dtype, little_end=self.little_endian)
os.chdir(curpath)
return data
raise ModuleError(".nhdr file contains file not in .raw format!")
class ReadPNG(ArrayIOModule, Module):
""" Load a .png type image into a Numpy Array. """
def compute(self):
fn = self.get_input("Filename")
ar = pylab.imread(fn)
out = NDArray()
out.set_array(ar)
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Filename", (basic.String, 'Filename'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class WritePNG(ArrayIOModule, Module):
""" Write a .png type image from a Numpy Array. """
def compute(self):
fn = self.get_input("Filename")
ar = self.get_input("Image")
minv = self.force_get_input("Min")
maxv = self.force_get_input("Max")
if minv == None:
minv = 0
if maxv == None:
maxv = 255
da_ar = ar.get_array().squeeze()
im = scipy.misc.toimage(da_ar, cmin=minv, cmax=maxv).save(fn)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Filename", (basic.String, 'Filename'))
reg.add_input_port(cls, "Min", (basic.Integer, 'Min Value'))
reg.add_input_port(cls, "Max", (basic.Integer, 'Max Value'))
reg.add_input_port(cls, "Image", (NDArray, 'Image To Write'))
class ReadRAW(ArrayIOModule, Module):
""" Load a .raw file into a Numpy Array. The .raw files are
assumed to be in the volvis format: http://www.volvis.org """
def __init__(self):
Module.__init__(self)
self.helper = NrrdHelper()
def compute(self):
fn = self.get_input("Filename")
sizes = self.get_input_list("Sizes")
dtype = self.get_input("DataType")
ar = self.helper.read_raw(fn, sizes, dtype)
out = NDArray()
out.set_array(ar)
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Filename", (basic.String, 'Filename'))
reg.add_input_port(cls, "Sizes", (basic.Integer, 'Dimension Sizes'))
reg.add_input_port(cls, "DataType", (basic.String, 'Datatype'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class WriteRAW(ArrayIOModule, Module):
""" Write a .raw file from a Numpy Array. """
def __init__(self):
Module.__init__(self)
self.helper = NrrdHeler()
def compute(self):
fn = self.get_input("Filename")
ar = self.get_input("Array").get_array()
self.helper.write_raw(fn,ar)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Filename", (basic.String, 'Filename'))
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
class ReadNHDR(ArrayIOModule, Module):
""" Load a .nhdr/.raw pair into a Numpy Array. """
def __init__(self):
Module.__init__(self)
self.helper = NrrdHelper()
def compute(self):
fn = ''
if self.has_input("File"):
fn = self.get_input("File").name
else:
fn = self.get_input("Filename")
ar = self.helper.read_nhdr(fn)
out = NDArray()
out.set_array(ar)
self.set_output("Output Array", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Filename", (basic.String, 'Filename'))
reg.add_input_port(cls, "File", (basic.File, 'File'))
reg.add_output_port(cls, "Output Array", (NDArray, 'Output Array'))
class WriteNHDR(ArrayIOModule, Module):
""" Write a .nhdr/.raw pair from a Numpy Array """
def __init__(self):
Module.__init__(self)
self.helper = NrrdHelper()
def compute(self):
fn = self.get_input("Filename")
ar = self.get_input("Array").get_array()
self.helper.write_nhdr(fn,ar)
self.set_output("Filename Out", fn)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Filename", (basic.String, 'Filename'))
reg.add_input_port(cls, "Array", (NDArray, 'Input Array'))
reg.add_output_port(cls, "Filename Out", (basic.String, 'Output Filename'))
class ReadStatisticalSummary(ArrayIOModule, Module):
"""
Documentation
"""
def compute(self):
fn = ''
if self.has_input("File"):
fn = self.get_input("File").name
else:
fn = self.get_input("Filename")
if self.force_get_input("Allocate Aggregated Array"):
alloc_array = True
else:
alloc_array = False
fid = open(fn, 'r')
dims = fid.readline().strip().split()
n_pts = int(dims[0])
n_bins = int(dims[1])
min_ar = numpy.zeros(n_pts)
lq_ar = numpy.zeros(n_pts)
med_ar = numpy.zeros(n_pts)
hq_ar = numpy.zeros(n_pts)
max_ar = numpy.zeros(n_pts)
mode_ar = numpy.zeros((n_pts, 4))
hist_ar = numpy.zeros((n_pts, n_bins))
if alloc_array:
ag_ar = numpy.zeros((n_pts, 5+4+n_bins))
for i in xrange(n_pts):
l = fid.readline().strip().split()
min_ar[i] = float(l[0])
lq_ar[i] = float(l[1])
med_ar[i] = float(l[2])
hq_ar[i] = float(l[3])
max_ar[i] = float(l[4])
for j in xrange(4):
mode_ar[i, j] = float(l[5+j])
for b in xrange(n_bins):
hist_ar[i, b] = float(l[9+b])
if alloc_array:
vals = numpy.array(l).astype('float')
ag_ar[i,:] += vals
fid.close()
min_ar_out = NDArray()
min_ar_out.set_array(min_ar)
self.set_output("Min Array", min_ar_out)
lq_ar_out = NDArray()
lq_ar_out.set_array(lq_ar)
self.set_output("Lower Quartile Array", lq_ar_out)
med_ar_out = NDArray()
med_ar_out.set_array(med_ar)
self.set_output("Median Array", med_ar_out)
hq_ar_out = NDArray()
hq_ar_out.set_array(hq_ar)
self.set_output("Upper Quartile Array", hq_ar_out)
max_ar_out = NDArray()
max_ar_out.set_array(max_ar)
self.set_output("Max Array", max_ar_out)
mode_ar_out = NDArray()
mode_ar_out.set_array(mode_ar)
self.set_output("Mode Array", mode_ar_out)
hist_ar_out = NDArray()
hist_ar_out.set_array(hist_ar)
self.set_output("Histogram Array", hist_ar_out)
if alloc_array:
ag_ar_out = NDArray()
ag_ar_out.set_array(ag_ar)
self.set_output("Aggregated Array", ag_ar_out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Filename", (basic.String, 'Filename'))
reg.add_input_port(cls, "File", (basic.File, 'File'))
reg.add_input_port(cls, "Allocate Aggregated Array", (basic.Boolean, 'Allocate Extra Space for Aggregated Array'), True)
reg.add_output_port(cls, "Min Array", (NDArray, 'Minima Array'))
reg.add_output_port(cls, "Lower Quartile Array", (NDArray, 'Lower Quartile Array'))
reg.add_output_port(cls, "Median Array", (NDArray, 'Median Array'))
reg.add_output_port(cls, "Upper Quartile Array", (NDArray, 'Upper Quartile Array'))
reg.add_output_port(cls, "Max Array", (NDArray, 'Maxima Array'))
reg.add_output_port(cls, "Mode Array", (NDArray, 'Mode Array'))
reg.add_output_port(cls, "Histogram Array", (NDArray, 'Histogram Array'))
reg.add_output_port(cls, "Aggregated Array", (NDArray, 'Aggregated Array'), True)
| bsd-3-clause |
tasoc/photometry | notes/pixelcoord_of_camera_centre.py | 1 | 1673 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Rasmus Handberg <[email protected]>
"""
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
import h5py
from astropy.wcs import WCS
import itertools
import os.path
from tqdm import trange
if __name__ == '__main__':
rootdir = r'/aadc/tasoc/archive/S01_DR01'
xycendict = {}
for camera, ccd in itertools.product((1,2,3,4), (1,2,3,4)):
if camera == 1:
camera_centre = [324.566998914166, -33.172999301379]
elif camera == 2:
camera_centre = [338.57656612933, -55.0789269350771]
elif camera == 3:
camera_centre = [19.4927827153412, -71.9781542628999]
elif camera == 4:
camera_centre = [90.0042379538484, -66.5647239768875]
with h5py.File(os.path.join(rootdir, f'sector001_camera{camera:d}_ccd{ccd:d}.hdf5'), 'r') as hdf:
N = len(hdf['images'])
a = np.full(N, np.NaN)
b = np.full(N, np.NaN)
cno = np.arange(0, N, 1)
for k in trange(N):
if hdf['quality'][k] == 0:
hdr_string = hdf['wcs'][f'{k:04d}'][0]
wcs = WCS(header=fits.Header.fromstring(hdr_string), relax=True)
xycen = wcs.all_world2pix(np.atleast_2d(camera_centre), 0, ra_dec_order=True)
a[k] = xycen[0][0]
b[k] = xycen[0][1]
am = np.nanmedian(a)
bm = np.nanmedian(b)
plt.figure()
plt.scatter(cno, a)
plt.axhline(am)
plt.figure()
plt.scatter(cno, b)
plt.axhline(bm)
plt.show()
# Save the
xycendict[(camera, ccd)] = np.array([am, bm])
print("xycen = {")
for key, value in xycendict.items():
print("\t(%d, %d): [%f, %f]," % (
key[0],
key[1],
value[0],
value[1]
))
print("}.get((camera, ccd))")
| gpl-3.0 |
Anonymouslemming/ansible | lib/ansible/plugins/lookup/passwordstore.py | 36 | 7799 | # (c) 2017, Patrick Deelman <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import subprocess
import time
from distutils import util
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
# backhacked check_output with input for python 2.7
# http://stackoverflow.com/questions/10103551/passing-data-to-subprocess-check-output
def check_output2(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if 'stderr' in kwargs:
raise ValueError('stderr argument not allowed, it will be overridden.')
if 'input' in kwargs:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
inputdata = kwargs['input']
del kwargs['input']
kwargs['stdin'] = subprocess.PIPE
else:
inputdata = None
process = subprocess.Popen(*popenargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
try:
out, err = process.communicate(inputdata)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, out + err)
return out
class LookupModule(LookupBase):
def parse_params(self, term):
# I went with the "traditional" param followed with space separated KV pairs.
# Waiting for final implementation of lookup parameter parsing.
# See: https://github.com/ansible/ansible/issues/12255
params = term.split()
if len(params) > 0:
# the first param is the pass-name
self.passname = params[0]
# next parse the optional parameters in keyvalue pairs
try:
for param in params[1:]:
name, value = param.split('=')
assert(name in self.paramvals)
self.paramvals[name] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
# check and convert values
try:
for key in ['create', 'returnall', 'overwrite']:
if not isinstance(self.paramvals[key], bool):
self.paramvals[key] = util.strtobool(self.paramvals[key])
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
if not isinstance(self.paramvals['length'], int):
if self.paramvals['length'].isdigit():
self.paramvals['length'] = int(self.paramvals['length'])
else:
raise AnsibleError("{} is not a correct value for length".format(self.paramvals['length']))
# Set PASSWORD_STORE_DIR if directory is set
if self.paramvals['directory']:
if os.path.isdir(self.paramvals['directory']):
os.environ['PASSWORD_STORE_DIR'] = self.paramvals['directory']
else:
raise AnsibleError('Passwordstore directory \'{}\' does not exist'.format(self.paramvals['directory']))
def check_pass(self):
try:
self.passoutput = check_output2(["pass", self.passname]).splitlines()
self.password = self.passoutput[0]
self.passdict = {}
for line in self.passoutput[1:]:
if ":" in line:
name, value = line.split(':', 1)
self.passdict[name.strip()] = value.strip()
except (subprocess.CalledProcessError) as e:
if e.returncode == 1 and 'not in the password store' in e.output:
# if pass returns 1 and return string contains 'is not in the password store.'
# We need to determine if this is valid or Error.
if not self.paramvals['create']:
raise AnsibleError('passname: {} not found, use create=True'.format(self.passname))
else:
return False
else:
raise AnsibleError(e)
return True
def get_newpass(self):
if self.paramvals['userpass']:
newpass = self.paramvals['userpass']
else:
try:
newpass = check_output2(['pwgen', '-cns', str(self.paramvals['length']), '1']).rstrip()
except (subprocess.CalledProcessError) as e:
raise AnsibleError(e)
return newpass
def update_password(self):
# generate new password, insert old lines from current result and return new password
newpass = self.get_newpass()
datetime = time.strftime("%d/%m/%Y %H:%M:%S")
msg = newpass + '\n' + '\n'.join(self.passoutput[1:])
msg += "\nlookup_pass: old password was {} (Updated on {})\n".format(self.password, datetime)
try:
generate = check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg)
except (subprocess.CalledProcessError) as e:
raise AnsibleError(e)
return newpass
def generate_password(self):
# generate new file and insert lookup_pass: Generated by Ansible on {date}
# use pwgen to generate the password and insert values with pass -m
newpass = self.get_newpass()
datetime = time.strftime("%d/%m/%Y %H:%M:%S")
msg = newpass + '\n' + "lookup_pass: First generated by ansible on {}\n".format(datetime)
try:
generate = check_output2(['pass', 'insert', '-f', '-m', self.passname], input=msg)
except (subprocess.CalledProcessError) as e:
raise AnsibleError(e)
return newpass
def get_passresult(self):
if self.paramvals['returnall']:
return os.linesep.join(self.passoutput)
if self.paramvals['subkey'] == 'password':
return self.password
else:
if self.paramvals['subkey'] in self.passdict:
return self.passdict[self.paramvals['subkey']]
else:
return None
def run(self, terms, variables, **kwargs):
result = []
self.paramvals = {
'subkey': 'password',
'directory': variables.get('passwordstore'),
'create': False,
'returnall': False,
'overwrite': False,
'userpass': '',
'length': 16,
}
for term in terms:
self.parse_params(term) # parse the input into paramvals
if self.check_pass(): # password exists
if self.paramvals['overwrite'] and self.paramvals['subkey'] == 'password':
result.append(self.update_password())
else:
result.append(self.get_passresult())
else: # password does not exist
if self.paramvals['create']:
result.append(self.generate_password())
return result
| gpl-3.0 |
ikaee/bfr-attendant | facerecognitionlibrary/jni-build/jni/include/tensorflow/contrib/factorization/python/ops/kmeans_test.py | 23 | 14710 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
import tensorflow as tf
from tensorflow.python.platform import benchmark
FLAGS = tf.app.flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(np.random.rand(num_centers, num_dims).astype(np.float32) *
center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(np.random.randn(num_points, num_dims).astype(np.float32) *
max_offset)
return (centers[assignments] + offsets,
assignments,
np.add.reduce(offsets * offsets, 1))
class KMeansTest(tf.test.TestCase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 10000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
self.kmeans = tf.contrib.factorization.KMeansClustering(
self.num_centers,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
use_mini_batch=self.use_mini_batch,
config=self.config(14),
random_seed=12)
@staticmethod
def config(tf_random_seed):
return tf.contrib.learn.RunConfig(tf_random_seed=tf_random_seed)
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
def test_clusters(self):
kmeans = self.kmeans
kmeans.fit(x=self.points, steps=1, batch_size=8)
clusters = kmeans.clusters()
self.assertAllEqual(list(clusters.shape),
[self.num_centers, self.num_dims])
def test_fit(self):
if self.batch_size != self.num_points:
# TODO(agarwal): Doesn't work with mini-batch.
return
kmeans = self.kmeans
kmeans.fit(x=self.points,
steps=1, batch_size=self.batch_size)
score1 = kmeans.score(x=self.points)
kmeans.fit(x=self.points,
steps=15 * self.num_points // self.batch_size,
batch_size=self.batch_size)
score2 = kmeans.score(x=self.points)
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.batch_size != self.num_points:
# TODO(agarwal): Doesn't work with mini-batch.
return
kmeans = tf.contrib.factorization.KMeansClustering(
self.num_centers,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
use_mini_batch=self.use_mini_batch,
config=tf.contrib.learn.RunConfig(tf_random_seed=14),
random_seed=12)
kmeans.fit(x=self.points,
# Force it to train forever until the monitor stops it.
steps=None,
batch_size=self.batch_size,
relative_tolerance=1e-4)
score = kmeans.score(x=self.points)
self.assertNear(self.true_score, score, self.true_score * 0.005)
def test_infer(self):
kmeans = self.kmeans
kmeans.fit(x=self.points, steps=10, batch_size=128)
clusters = kmeans.clusters()
# Make a small test set
points, true_assignments, true_offsets = make_random_points(clusters, 10)
# Test predict
assignments = kmeans.predict(points, batch_size=self.batch_size)
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(points, batch_size=128)
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = kmeans.transform(points, batch_size=128)
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1, keepdims=True) -
2 * np.dot(points, np.transpose(clusters)) +
np.transpose(np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_fit_with_cosine_distance(self):
# Create points on y=x and y=1.5x lines to check the cosine similarity.
# Note that euclidean distance will give different results in this case.
points = np.array(
[[9, 9], [0.5, 0.5], [10, 15], [0.4, 0.6]], dtype=np.float32)
# true centers are the unit vectors on lines y=x and y=1.5x
true_centers = np.array(
[[0.70710678, 0.70710678], [0.5547002, 0.83205029]], dtype=np.float32)
kmeans = tf.contrib.factorization.KMeansClustering(
2,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
distance_metric=tf.contrib.factorization.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
config=self.config(2),
random_seed=12)
kmeans.fit(x=points, steps=10, batch_size=4)
centers = normalize(kmeans.clusters())
self.assertAllClose(np.sort(centers, axis=0),
np.sort(true_centers, axis=0))
def test_transform_with_cosine_distance(self):
points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2],
[0.1, 2.5], [0.2, 2], [0.1, 3], [0.2, 4]], dtype=np.float32)
true_centers = [normalize(np.mean(normalize(points)[4:, :], axis=0,
keepdims=True))[0],
normalize(np.mean(normalize(points)[0:4, :], axis=0,
keepdims=True))[0]]
kmeans = tf.contrib.factorization.KMeansClustering(
2,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
distance_metric=tf.contrib.factorization.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
config=self.config(5))
kmeans.fit(x=points, steps=50, batch_size=8)
centers = normalize(kmeans.clusters())
self.assertAllClose(np.sort(centers, axis=0),
np.sort(true_centers, axis=0),
atol=1e-2)
true_transform = 1 - cosine_similarity(points, centers)
transform = kmeans.transform(points, batch_size=8)
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict_with_cosine_distance(self):
points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2],
[0.1, 2.5], [0.2, 2], [0.1, 3], [0.2, 4]], dtype=np.float32)
true_centers = np.array(
[normalize(np.mean(normalize(points)[0:4, :],
axis=0,
keepdims=True))[0],
normalize(np.mean(normalize(points)[4:, :],
axis=0,
keepdims=True))[0]], dtype=np.float32)
true_assignments = [0] * 4 + [1] * 4
true_score = len(points) - np.tensordot(normalize(points),
true_centers[true_assignments])
kmeans = tf.contrib.factorization.KMeansClustering(
2,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
distance_metric=tf.contrib.factorization.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
config=self.config(3))
kmeans.fit(x=points, steps=30, batch_size=8)
centers = normalize(kmeans.clusters())
self.assertAllClose(np.sort(centers, axis=0),
np.sort(true_centers, axis=0), atol=1e-2)
assignments = kmeans.predict(points, batch_size=8)
self.assertAllClose(centers[assignments],
true_centers[true_assignments], atol=1e-2)
score = kmeans.score(points, batch_size=8)
self.assertAllClose(score, true_score, atol=1e-2)
def test_predict_with_cosine_distance_and_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array([[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3],
[-3.1, -3.2], [-2.8, -3.], [-2.9, -3.1], [-3., -3.1],
[-3., -3.1], [-3.2, -3.], [-3., -3.]], dtype=np.float32)
true_centers = np.array(
[normalize(np.mean(normalize(points)[0:2, :], axis=0,
keepdims=True))[0],
normalize(np.mean(normalize(points)[2:4, :], axis=0,
keepdims=True))[0],
normalize(np.mean(normalize(points)[4:, :], axis=0,
keepdims=True))[0]], dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(normalize(points),
true_centers[true_assignments])
kmeans = tf.contrib.factorization.KMeansClustering(
3,
initial_clusters=tf.contrib.factorization.KMEANS_PLUS_PLUS_INIT,
distance_metric=tf.contrib.factorization.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
config=self.config(3))
kmeans.fit(x=points, steps=30, batch_size=12)
centers = normalize(kmeans.clusters())
self.assertAllClose(sorted(centers.tolist()),
sorted(true_centers.tolist()),
atol=1e-2)
assignments = kmeans.predict(points, batch_size=12)
self.assertAllClose(centers[assignments],
true_centers[true_assignments], atol=1e-2)
score = kmeans.score(points, batch_size=12)
self.assertAllClose(score, true_score, atol=1e-2)
def test_fit_raise_if_num_clusters_larger_than_num_points_random_init(self):
points = np.array([[2.0, 3.0], [1.6, 8.2]], dtype=np.float32)
with self.assertRaisesOpError('less'):
kmeans = tf.contrib.factorization.KMeansClustering(
num_clusters=3, initial_clusters=tf.contrib.factorization.RANDOM_INIT)
kmeans.fit(x=points, steps=10, batch_size=8)
def test_fit_raise_if_num_clusters_larger_than_num_points_kmeans_plus_plus(
self):
points = np.array([[2.0, 3.0], [1.6, 8.2]], dtype=np.float32)
with self.assertRaisesOpError(AssertionError):
kmeans = tf.contrib.factorization.KMeansClustering(
num_clusters=3,
initial_clusters=tf.contrib.factorization.KMEANS_PLUS_PLUS_INIT)
kmeans.fit(x=points, steps=10, batch_size=8)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self, dimension=50, num_clusters=50, points_per_cluster=10000,
center_norm=500, cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(self.num_clusters, dimension,
center_norm=center_norm)
self.points, _, scores = make_random_points(self.centers, self.num_points,
max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(iters=num_iters, wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = tf.contrib.factorization.KMeansClustering(
self.num_clusters,
initial_clusters=tf.contrib.factorization.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
tf_kmeans.fit(x=self.points, batch_size=self.num_points, steps=50,
relative_tolerance=1e-6)
_ = tf_kmeans.clusters()
scores.append(tf_kmeans.score(self.points))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(n_clusters=self.num_clusters,
init='k-means++',
max_iter=50, n_init=1, tol=1e-4,
random_state=i * 42)
sklearn_kmeans.fit(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
Blizzard/heroprotocol | heroprotocol/versions/protocol49278.py | 27 | 25984 | #!/usr/bin/env python
#
# Copyright 2015-2021 Blizzard Entertainment. Subject to the MIT license.
# See the included LICENSE file for more information.
#
import six
from heroprotocol.decoders import CorruptedError, BitPackedBuffer, BitPackedDecoder, VersionedDecoder
# Decoding instructions for each protocol type.
typeinfos = [
('_int',[(0,7)]), #0
('_int',[(0,4)]), #1
('_int',[(0,5)]), #2
('_int',[(0,6)]), #3
('_int',[(0,14)]), #4
('_int',[(0,22)]), #5
('_int',[(0,32)]), #6
('_choice',[(0,2),{0:('m_uint6',3),1:('m_uint14',4),2:('m_uint22',5),3:('m_uint32',6)}]), #7
('_struct',[[('m_userId',2,-1)]]), #8
('_blob',[(0,8)]), #9
('_int',[(0,8)]), #10
('_struct',[[('m_flags',10,0),('m_major',10,1),('m_minor',10,2),('m_revision',10,3),('m_build',6,4),('m_baseBuild',6,5)]]), #11
('_int',[(0,3)]), #12
('_bool',[]), #13
('_array',[(16,0),10]), #14
('_optional',[14]), #15
('_blob',[(16,0)]), #16
('_struct',[[('m_dataDeprecated',15,0),('m_data',16,1)]]), #17
('_struct',[[('m_signature',9,0),('m_version',11,1),('m_type',12,2),('m_elapsedGameLoops',6,3),('m_useScaledTime',13,4),('m_ngdpRootKey',17,5),('m_dataBuildNum',6,6),('m_fixedFileHash',17,7)]]), #18
('_fourcc',[]), #19
('_blob',[(0,7)]), #20
('_int',[(0,64)]), #21
('_struct',[[('m_region',10,0),('m_programId',19,1),('m_realm',6,2),('m_name',20,3),('m_id',21,4)]]), #22
('_struct',[[('m_a',10,0),('m_r',10,1),('m_g',10,2),('m_b',10,3)]]), #23
('_int',[(0,2)]), #24
('_optional',[10]), #25
('_struct',[[('m_name',9,0),('m_toon',22,1),('m_race',9,2),('m_color',23,3),('m_control',10,4),('m_teamId',1,5),('m_handicap',0,6),('m_observe',24,7),('m_result',24,8),('m_workingSetSlotId',25,9),('m_hero',9,10)]]), #26
('_array',[(0,5),26]), #27
('_optional',[27]), #28
('_blob',[(0,10)]), #29
('_blob',[(0,11)]), #30
('_struct',[[('m_file',30,0)]]), #31
('_optional',[13]), #32
('_int',[(-9223372036854775808,64)]), #33
('_blob',[(0,12)]), #34
('_blob',[(40,0)]), #35
('_array',[(0,6),35]), #36
('_optional',[36]), #37
('_array',[(0,6),30]), #38
('_optional',[38]), #39
('_struct',[[('m_playerList',28,0),('m_title',29,1),('m_difficulty',9,2),('m_thumbnail',31,3),('m_isBlizzardMap',13,4),('m_restartAsTransitionMap',32,16),('m_timeUTC',33,5),('m_timeLocalOffset',33,6),('m_description',34,7),('m_imageFilePath',30,8),('m_campaignIndex',10,15),('m_mapFileName',30,9),('m_cacheHandles',37,10),('m_miniSave',13,11),('m_gameSpeed',12,12),('m_defaultDifficulty',3,13),('m_modPaths',39,14)]]), #40
('_optional',[9]), #41
('_optional',[35]), #42
('_optional',[6]), #43
('_struct',[[('m_race',25,-1)]]), #44
('_struct',[[('m_team',25,-1)]]), #45
('_blob',[(0,9)]), #46
('_struct',[[('m_name',9,-18),('m_clanTag',41,-17),('m_clanLogo',42,-16),('m_highestLeague',25,-15),('m_combinedRaceLevels',43,-14),('m_randomSeed',6,-13),('m_racePreference',44,-12),('m_teamPreference',45,-11),('m_testMap',13,-10),('m_testAuto',13,-9),('m_examine',13,-8),('m_customInterface',13,-7),('m_testType',6,-6),('m_observe',24,-5),('m_hero',46,-4),('m_skin',46,-3),('m_mount',46,-2),('m_toonHandle',20,-1)]]), #47
('_array',[(0,5),47]), #48
('_struct',[[('m_lockTeams',13,-16),('m_teamsTogether',13,-15),('m_advancedSharedControl',13,-14),('m_randomRaces',13,-13),('m_battleNet',13,-12),('m_amm',13,-11),('m_competitive',13,-10),('m_practice',13,-9),('m_cooperative',13,-8),('m_noVictoryOrDefeat',13,-7),('m_heroDuplicatesAllowed',13,-6),('m_fog',24,-5),('m_observers',24,-4),('m_userDifficulty',24,-3),('m_clientDebugFlags',21,-2),('m_ammId',43,-1)]]), #49
('_int',[(1,4)]), #50
('_int',[(1,8)]), #51
('_bitarray',[(0,6)]), #52
('_bitarray',[(0,8)]), #53
('_bitarray',[(0,2)]), #54
('_bitarray',[(0,7)]), #55
('_struct',[[('m_allowedColors',52,-6),('m_allowedRaces',53,-5),('m_allowedDifficulty',52,-4),('m_allowedControls',53,-3),('m_allowedObserveTypes',54,-2),('m_allowedAIBuilds',55,-1)]]), #56
('_array',[(0,5),56]), #57
('_struct',[[('m_randomValue',6,-26),('m_gameCacheName',29,-25),('m_gameOptions',49,-24),('m_gameSpeed',12,-23),('m_gameType',12,-22),('m_maxUsers',2,-21),('m_maxObservers',2,-20),('m_maxPlayers',2,-19),('m_maxTeams',50,-18),('m_maxColors',3,-17),('m_maxRaces',51,-16),('m_maxControls',10,-15),('m_mapSizeX',10,-14),('m_mapSizeY',10,-13),('m_mapFileSyncChecksum',6,-12),('m_mapFileName',30,-11),('m_mapAuthorName',9,-10),('m_modFileSyncChecksum',6,-9),('m_slotDescriptions',57,-8),('m_defaultDifficulty',3,-7),('m_defaultAIBuild',0,-6),('m_cacheHandles',36,-5),('m_hasExtensionMod',13,-4),('m_isBlizzardMap',13,-3),('m_isPremadeFFA',13,-2),('m_isCoopMode',13,-1)]]), #58
('_optional',[1]), #59
('_optional',[2]), #60
('_struct',[[('m_color',60,-1)]]), #61
('_array',[(0,4),46]), #62
('_array',[(0,17),6]), #63
('_array',[(0,9),6]), #64
('_struct',[[('m_control',10,-20),('m_userId',59,-19),('m_teamId',1,-18),('m_colorPref',61,-17),('m_racePref',44,-16),('m_difficulty',3,-15),('m_aiBuild',0,-14),('m_handicap',0,-13),('m_observe',24,-12),('m_logoIndex',6,-11),('m_hero',46,-10),('m_skin',46,-9),('m_mount',46,-8),('m_artifacts',62,-7),('m_workingSetSlotId',25,-6),('m_rewards',63,-5),('m_toonHandle',20,-4),('m_licenses',64,-3),('m_tandemLeaderUserId',59,-2),('m_hasSilencePenalty',13,-1)]]), #65
('_array',[(0,5),65]), #66
('_struct',[[('m_phase',12,-11),('m_maxUsers',2,-10),('m_maxObservers',2,-9),('m_slots',66,-8),('m_randomSeed',6,-7),('m_hostUserId',59,-6),('m_isSinglePlayer',13,-5),('m_pickedMapTag',10,-4),('m_gameDuration',6,-3),('m_defaultDifficulty',3,-2),('m_defaultAIBuild',0,-1)]]), #67
('_struct',[[('m_userInitialData',48,-3),('m_gameDescription',58,-2),('m_lobbyState',67,-1)]]), #68
('_struct',[[('m_syncLobbyState',68,-1)]]), #69
('_struct',[[('m_name',20,-1)]]), #70
('_blob',[(0,6)]), #71
('_struct',[[('m_name',71,-1)]]), #72
('_struct',[[('m_name',71,-3),('m_type',6,-2),('m_data',20,-1)]]), #73
('_struct',[[('m_type',6,-3),('m_name',71,-2),('m_data',34,-1)]]), #74
('_array',[(0,5),10]), #75
('_struct',[[('m_signature',75,-2),('m_toonHandle',20,-1)]]), #76
('_struct',[[('m_gameFullyDownloaded',13,-14),('m_developmentCheatsEnabled',13,-13),('m_testCheatsEnabled',13,-12),('m_multiplayerCheatsEnabled',13,-11),('m_syncChecksummingEnabled',13,-10),('m_isMapToMapTransition',13,-9),('m_debugPauseEnabled',13,-8),('m_useGalaxyAsserts',13,-7),('m_platformMac',13,-6),('m_cameraFollow',13,-5),('m_baseBuildNum',6,-4),('m_buildNum',6,-3),('m_versionFlags',6,-2),('m_hotkeyProfile',46,-1)]]), #77
('_struct',[[]]), #78
('_int',[(0,16)]), #79
('_struct',[[('x',79,-2),('y',79,-1)]]), #80
('_struct',[[('m_which',12,-2),('m_target',80,-1)]]), #81
('_struct',[[('m_fileName',30,-5),('m_automatic',13,-4),('m_overwrite',13,-3),('m_name',9,-2),('m_description',29,-1)]]), #82
('_int',[(1,32)]), #83
('_struct',[[('m_sequence',83,-1)]]), #84
('_null',[]), #85
('_int',[(0,20)]), #86
('_int',[(-2147483648,32)]), #87
('_struct',[[('x',86,-3),('y',86,-2),('z',87,-1)]]), #88
('_struct',[[('m_targetUnitFlags',79,-7),('m_timer',10,-6),('m_tag',6,-5),('m_snapshotUnitLink',79,-4),('m_snapshotControlPlayerId',59,-3),('m_snapshotUpkeepPlayerId',59,-2),('m_snapshotPoint',88,-1)]]), #89
('_choice',[(0,2),{0:('None',85),1:('TargetPoint',88),2:('TargetUnit',89)}]), #90
('_struct',[[('m_target',90,-4),('m_time',87,-3),('m_verb',29,-2),('m_arguments',29,-1)]]), #91
('_struct',[[('m_data',91,-1)]]), #92
('_int',[(0,25)]), #93
('_struct',[[('m_abilLink',79,-3),('m_abilCmdIndex',2,-2),('m_abilCmdData',25,-1)]]), #94
('_optional',[94]), #95
('_choice',[(0,2),{0:('None',85),1:('TargetPoint',88),2:('TargetUnit',89),3:('Data',6)}]), #96
('_optional',[88]), #97
('_struct',[[('m_cmdFlags',93,-7),('m_abil',95,-6),('m_data',96,-5),('m_vector',97,-4),('m_sequence',83,-3),('m_otherUnit',43,-2),('m_unitGroup',43,-1)]]), #98
('_int',[(0,9)]), #99
('_bitarray',[(0,9)]), #100
('_array',[(0,9),99]), #101
('_choice',[(0,2),{0:('None',85),1:('Mask',100),2:('OneIndices',101),3:('ZeroIndices',101)}]), #102
('_struct',[[('m_unitLink',79,-4),('m_subgroupPriority',10,-3),('m_intraSubgroupPriority',10,-2),('m_count',99,-1)]]), #103
('_array',[(0,9),103]), #104
('_struct',[[('m_subgroupIndex',99,-4),('m_removeMask',102,-3),('m_addSubgroups',104,-2),('m_addUnitTags',64,-1)]]), #105
('_struct',[[('m_controlGroupId',1,-2),('m_delta',105,-1)]]), #106
('_struct',[[('m_controlGroupIndex',1,-3),('m_controlGroupUpdate',12,-2),('m_mask',102,-1)]]), #107
('_struct',[[('m_count',99,-6),('m_subgroupCount',99,-5),('m_activeSubgroupIndex',99,-4),('m_unitTagsChecksum',6,-3),('m_subgroupIndicesChecksum',6,-2),('m_subgroupsChecksum',6,-1)]]), #108
('_struct',[[('m_controlGroupId',1,-2),('m_selectionSyncData',108,-1)]]), #109
('_struct',[[('m_chatMessage',29,-1)]]), #110
('_struct',[[('m_speed',12,-1)]]), #111
('_int',[(-128,8)]), #112
('_struct',[[('m_delta',112,-1)]]), #113
('_struct',[[('x',87,-2),('y',87,-1)]]), #114
('_struct',[[('m_point',114,-4),('m_unit',6,-3),('m_pingedMinimap',13,-2),('m_option',87,-1)]]), #115
('_struct',[[('m_verb',29,-2),('m_arguments',29,-1)]]), #116
('_struct',[[('m_alliance',6,-2),('m_control',6,-1)]]), #117
('_struct',[[('m_unitTag',6,-1)]]), #118
('_struct',[[('m_unitTag',6,-2),('m_flags',10,-1)]]), #119
('_struct',[[('m_conversationId',87,-2),('m_replyId',87,-1)]]), #120
('_optional',[20]), #121
('_struct',[[('m_gameUserId',1,-6),('m_observe',24,-5),('m_name',9,-4),('m_toonHandle',121,-3),('m_clanTag',41,-2),('m_clanLogo',42,-1)]]), #122
('_array',[(0,5),122]), #123
('_int',[(0,1)]), #124
('_struct',[[('m_userInfos',123,-2),('m_method',124,-1)]]), #125
('_choice',[(0,3),{0:('None',85),1:('Checked',13),2:('ValueChanged',6),3:('SelectionChanged',87),4:('TextChanged',30),5:('MouseButton',6)}]), #126
('_struct',[[('m_controlId',87,-3),('m_eventType',87,-2),('m_eventData',126,-1)]]), #127
('_struct',[[('m_soundHash',6,-2),('m_length',6,-1)]]), #128
('_array',[(0,7),6]), #129
('_struct',[[('m_soundHash',129,-2),('m_length',129,-1)]]), #130
('_struct',[[('m_syncInfo',130,-1)]]), #131
('_struct',[[('m_queryId',79,-3),('m_lengthMs',6,-2),('m_finishGameLoop',6,-1)]]), #132
('_struct',[[('m_queryId',79,-2),('m_lengthMs',6,-1)]]), #133
('_struct',[[('m_animWaitQueryId',79,-1)]]), #134
('_struct',[[('m_sound',6,-1)]]), #135
('_struct',[[('m_transmissionId',87,-2),('m_thread',6,-1)]]), #136
('_struct',[[('m_transmissionId',87,-1)]]), #137
('_optional',[80]), #138
('_optional',[79]), #139
('_optional',[112]), #140
('_struct',[[('m_target',138,-6),('m_distance',139,-5),('m_pitch',139,-4),('m_yaw',139,-3),('m_reason',140,-2),('m_follow',13,-1)]]), #141
('_struct',[[('m_skipType',124,-1)]]), #142
('_int',[(0,11)]), #143
('_struct',[[('x',143,-2),('y',143,-1)]]), #144
('_struct',[[('m_button',6,-5),('m_down',13,-4),('m_posUI',144,-3),('m_posWorld',88,-2),('m_flags',112,-1)]]), #145
('_struct',[[('m_posUI',144,-3),('m_posWorld',88,-2),('m_flags',112,-1)]]), #146
('_struct',[[('m_achievementLink',79,-1)]]), #147
('_struct',[[('m_hotkey',6,-2),('m_down',13,-1)]]), #148
('_struct',[[('m_abilLink',79,-3),('m_abilCmdIndex',2,-2),('m_state',112,-1)]]), #149
('_struct',[[('m_soundtrack',6,-1)]]), #150
('_struct',[[('m_key',112,-2),('m_flags',112,-1)]]), #151
('_struct',[[('m_error',87,-2),('m_abil',95,-1)]]), #152
('_int',[(0,19)]), #153
('_struct',[[('m_decrementMs',153,-1)]]), #154
('_struct',[[('m_portraitId',87,-1)]]), #155
('_struct',[[('m_functionName',20,-1)]]), #156
('_struct',[[('m_result',87,-1)]]), #157
('_struct',[[('m_gameMenuItemIndex',87,-1)]]), #158
('_int',[(-32768,16)]), #159
('_struct',[[('m_wheelSpin',159,-2),('m_flags',112,-1)]]), #160
('_struct',[[('m_button',79,-1)]]), #161
('_struct',[[('m_cutsceneId',87,-2),('m_bookmarkName',20,-1)]]), #162
('_struct',[[('m_cutsceneId',87,-1)]]), #163
('_struct',[[('m_cutsceneId',87,-3),('m_conversationLine',20,-2),('m_altConversationLine',20,-1)]]), #164
('_struct',[[('m_cutsceneId',87,-2),('m_conversationLine',20,-1)]]), #165
('_struct',[[('m_leaveReason',1,-1)]]), #166
('_struct',[[('m_observe',24,-7),('m_name',9,-6),('m_toonHandle',121,-5),('m_clanTag',41,-4),('m_clanLogo',42,-3),('m_hijack',13,-2),('m_hijackCloneGameUserId',59,-1)]]), #167
('_optional',[83]), #168
('_struct',[[('m_state',24,-2),('m_sequence',168,-1)]]), #169
('_struct',[[('m_sequence',168,-2),('m_target',88,-1)]]), #170
('_struct',[[('m_sequence',168,-2),('m_target',89,-1)]]), #171
('_struct',[[('m_catalog',10,-4),('m_entry',79,-3),('m_field',9,-2),('m_value',9,-1)]]), #172
('_struct',[[('m_index',6,-1)]]), #173
('_struct',[[('m_shown',13,-1)]]), #174
('_struct',[[('m_recipient',12,-2),('m_string',30,-1)]]), #175
('_struct',[[('m_recipient',12,-2),('m_point',114,-1)]]), #176
('_struct',[[('m_progress',87,-1)]]), #177
('_struct',[[('m_status',24,-1)]]), #178
('_struct',[[('m_abilLink',79,-3),('m_abilCmdIndex',2,-2),('m_buttonLink',79,-1)]]), #179
('_struct',[[('m_behaviorLink',79,-2),('m_buttonLink',79,-1)]]), #180
('_choice',[(0,2),{0:('None',85),1:('Ability',179),2:('Behavior',180),3:('Vitals',159)}]), #181
('_struct',[[('m_announcement',181,-4),('m_announceLink',79,-3),('m_otherUnitTag',6,-2),('m_unitTag',6,-1)]]), #182
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2),('m_controlPlayerId',1,3),('m_upkeepPlayerId',1,4),('m_x',10,5),('m_y',10,6)]]), #183
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_x',10,2),('m_y',10,3)]]), #184
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_killerPlayerId',59,2),('m_x',10,3),('m_y',10,4),('m_killerUnitTagIndex',43,5),('m_killerUnitTagRecycle',43,6)]]), #185
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_controlPlayerId',1,2),('m_upkeepPlayerId',1,3)]]), #186
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2)]]), #187
('_struct',[[('m_playerId',1,0),('m_upgradeTypeName',29,1),('m_count',87,2)]]), #188
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1)]]), #189
('_array',[(0,10),87]), #190
('_struct',[[('m_firstUnitIndex',6,0),('m_items',190,1)]]), #191
('_struct',[[('m_playerId',1,0),('m_type',6,1),('m_userId',43,2),('m_slotId',43,3)]]), #192
('_struct',[[('m_key',29,0)]]), #193
('_struct',[[('__parent',193,0),('m_value',29,1)]]), #194
('_array',[(0,6),194]), #195
('_optional',[195]), #196
('_struct',[[('__parent',193,0),('m_value',87,1)]]), #197
('_array',[(0,6),197]), #198
('_optional',[198]), #199
('_struct',[[('m_eventName',29,0),('m_stringData',196,1),('m_intData',199,2),('m_fixedData',199,3)]]), #200
('_struct',[[('m_value',6,0),('m_time',6,1)]]), #201
('_array',[(0,6),201]), #202
('_array',[(0,5),202]), #203
('_struct',[[('m_name',29,0),('m_values',203,1)]]), #204
('_array',[(0,21),204]), #205
('_struct',[[('m_instanceList',205,0)]]), #206
]
# Map from protocol NNet.Game.*Event eventid to (typeid, name)
game_event_types = {
5: (78, 'NNet.Game.SUserFinishedLoadingSyncEvent'),
7: (77, 'NNet.Game.SUserOptionsEvent'),
9: (70, 'NNet.Game.SBankFileEvent'),
10: (72, 'NNet.Game.SBankSectionEvent'),
11: (73, 'NNet.Game.SBankKeyEvent'),
12: (74, 'NNet.Game.SBankValueEvent'),
13: (76, 'NNet.Game.SBankSignatureEvent'),
14: (81, 'NNet.Game.SCameraSaveEvent'),
21: (82, 'NNet.Game.SSaveGameEvent'),
22: (78, 'NNet.Game.SSaveGameDoneEvent'),
23: (78, 'NNet.Game.SLoadGameDoneEvent'),
25: (84, 'NNet.Game.SCommandManagerResetEvent'),
26: (92, 'NNet.Game.SGameCheatEvent'),
27: (98, 'NNet.Game.SCmdEvent'),
28: (106, 'NNet.Game.SSelectionDeltaEvent'),
29: (107, 'NNet.Game.SControlGroupUpdateEvent'),
30: (109, 'NNet.Game.SSelectionSyncCheckEvent'),
32: (110, 'NNet.Game.STriggerChatMessageEvent'),
34: (111, 'NNet.Game.SSetAbsoluteGameSpeedEvent'),
35: (113, 'NNet.Game.SAddAbsoluteGameSpeedEvent'),
36: (115, 'NNet.Game.STriggerPingEvent'),
37: (116, 'NNet.Game.SBroadcastCheatEvent'),
38: (117, 'NNet.Game.SAllianceEvent'),
39: (118, 'NNet.Game.SUnitClickEvent'),
40: (119, 'NNet.Game.SUnitHighlightEvent'),
41: (120, 'NNet.Game.STriggerReplySelectedEvent'),
43: (125, 'NNet.Game.SHijackReplayGameEvent'),
44: (78, 'NNet.Game.STriggerSkippedEvent'),
45: (128, 'NNet.Game.STriggerSoundLengthQueryEvent'),
46: (135, 'NNet.Game.STriggerSoundOffsetEvent'),
47: (136, 'NNet.Game.STriggerTransmissionOffsetEvent'),
48: (137, 'NNet.Game.STriggerTransmissionCompleteEvent'),
49: (141, 'NNet.Game.SCameraUpdateEvent'),
50: (78, 'NNet.Game.STriggerAbortMissionEvent'),
55: (127, 'NNet.Game.STriggerDialogControlEvent'),
56: (131, 'NNet.Game.STriggerSoundLengthSyncEvent'),
57: (142, 'NNet.Game.STriggerConversationSkippedEvent'),
58: (145, 'NNet.Game.STriggerMouseClickedEvent'),
59: (146, 'NNet.Game.STriggerMouseMovedEvent'),
60: (147, 'NNet.Game.SAchievementAwardedEvent'),
61: (148, 'NNet.Game.STriggerHotkeyPressedEvent'),
62: (149, 'NNet.Game.STriggerTargetModeUpdateEvent'),
64: (150, 'NNet.Game.STriggerSoundtrackDoneEvent'),
66: (151, 'NNet.Game.STriggerKeyPressedEvent'),
67: (156, 'NNet.Game.STriggerMovieFunctionEvent'),
76: (152, 'NNet.Game.STriggerCommandErrorEvent'),
86: (78, 'NNet.Game.STriggerMovieStartedEvent'),
87: (78, 'NNet.Game.STriggerMovieFinishedEvent'),
88: (154, 'NNet.Game.SDecrementGameTimeRemainingEvent'),
89: (155, 'NNet.Game.STriggerPortraitLoadedEvent'),
90: (157, 'NNet.Game.STriggerCustomDialogDismissedEvent'),
91: (158, 'NNet.Game.STriggerGameMenuItemSelectedEvent'),
92: (160, 'NNet.Game.STriggerMouseWheelEvent'),
95: (161, 'NNet.Game.STriggerButtonPressedEvent'),
96: (78, 'NNet.Game.STriggerGameCreditsFinishedEvent'),
97: (162, 'NNet.Game.STriggerCutsceneBookmarkFiredEvent'),
98: (163, 'NNet.Game.STriggerCutsceneEndSceneFiredEvent'),
99: (164, 'NNet.Game.STriggerCutsceneConversationLineEvent'),
100: (165, 'NNet.Game.STriggerCutsceneConversationLineMissingEvent'),
101: (166, 'NNet.Game.SGameUserLeaveEvent'),
102: (167, 'NNet.Game.SGameUserJoinEvent'),
103: (169, 'NNet.Game.SCommandManagerStateEvent'),
104: (170, 'NNet.Game.SCmdUpdateTargetPointEvent'),
105: (171, 'NNet.Game.SCmdUpdateTargetUnitEvent'),
106: (132, 'NNet.Game.STriggerAnimLengthQueryByNameEvent'),
107: (133, 'NNet.Game.STriggerAnimLengthQueryByPropsEvent'),
108: (134, 'NNet.Game.STriggerAnimOffsetEvent'),
109: (172, 'NNet.Game.SCatalogModifyEvent'),
110: (173, 'NNet.Game.SHeroTalentTreeSelectedEvent'),
111: (78, 'NNet.Game.STriggerProfilerLoggingFinishedEvent'),
112: (174, 'NNet.Game.SHeroTalentTreeSelectionPanelToggledEvent'),
}
# The typeid of the NNet.Game.EEventId enum.
game_eventid_typeid = 0
# Map from protocol NNet.Game.*Message eventid to (typeid, name)
message_event_types = {
0: (175, 'NNet.Game.SChatMessage'),
1: (176, 'NNet.Game.SPingMessage'),
2: (177, 'NNet.Game.SLoadingProgressMessage'),
3: (78, 'NNet.Game.SServerPingMessage'),
4: (178, 'NNet.Game.SReconnectNotifyMessage'),
5: (182, 'NNet.Game.SPlayerAnnounceMessage'),
}
# The typeid of the NNet.Game.EMessageId enum.
message_eventid_typeid = 1
# Map from protocol NNet.Replay.Tracker.*Event eventid to (typeid, name)
tracker_event_types = {
1: (183, 'NNet.Replay.Tracker.SUnitBornEvent'),
2: (185, 'NNet.Replay.Tracker.SUnitDiedEvent'),
3: (186, 'NNet.Replay.Tracker.SUnitOwnerChangeEvent'),
4: (187, 'NNet.Replay.Tracker.SUnitTypeChangeEvent'),
5: (188, 'NNet.Replay.Tracker.SUpgradeEvent'),
6: (183, 'NNet.Replay.Tracker.SUnitInitEvent'),
7: (189, 'NNet.Replay.Tracker.SUnitDoneEvent'),
8: (191, 'NNet.Replay.Tracker.SUnitPositionsEvent'),
9: (192, 'NNet.Replay.Tracker.SPlayerSetupEvent'),
10: (200, 'NNet.Replay.Tracker.SStatGameEvent'),
11: (206, 'NNet.Replay.Tracker.SScoreResultEvent'),
12: (184, 'NNet.Replay.Tracker.SUnitRevivedEvent'),
}
# The typeid of the NNet.Replay.Tracker.EEventId enum.
tracker_eventid_typeid = 2
# The typeid of NNet.SVarUint32 (the type used to encode gameloop deltas).
svaruint32_typeid = 7
# The typeid of NNet.Replay.SGameUserId (the type used to encode player ids).
replay_userid_typeid = 8
# The typeid of NNet.Replay.SHeader (the type used to store replay game version and length).
replay_header_typeid = 18
# The typeid of NNet.Game.SDetails (the type used to store overall replay details).
game_details_typeid = 40
# The typeid of NNet.Replay.SInitData (the type used to store the initial lobby).
replay_initdata_typeid = 69
def _varuint32_value(value):
# Returns the numeric value from a SVarUint32 instance.
for v in six.itervalues(value):
return v
return 0
def _decode_event_stream(decoder, eventid_typeid, event_types, decode_user_id):
# Decodes events prefixed with a gameloop and possibly userid
gameloop = 0
while not decoder.done():
start_bits = decoder.used_bits()
# decode the gameloop delta before each event
delta = _varuint32_value(decoder.instance(svaruint32_typeid))
gameloop += delta
# decode the userid before each event
if decode_user_id:
userid = decoder.instance(replay_userid_typeid)
# decode the event id
eventid = decoder.instance(eventid_typeid)
typeid, typename = event_types.get(eventid, (None, None))
if typeid is None:
raise CorruptedError('eventid(%d) at %s' % (eventid, decoder))
# decode the event struct instance
event = decoder.instance(typeid)
event['_event'] = typename
event['_eventid'] = eventid
# insert gameloop and userid
event['_gameloop'] = gameloop
if decode_user_id:
event['_userid'] = userid
# the next event is byte aligned
decoder.byte_align()
# insert bits used in stream
event['_bits'] = decoder.used_bits() - start_bits
yield event
def decode_replay_game_events(contents):
"""Decodes and yields each game event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
game_eventid_typeid,
game_event_types,
decode_user_id=True):
yield event
def decode_replay_message_events(contents):
"""Decodes and yields each message event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
message_eventid_typeid,
message_event_types,
decode_user_id=True):
yield event
def decode_replay_tracker_events(contents):
"""Decodes and yields each tracker event from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
tracker_eventid_typeid,
tracker_event_types,
decode_user_id=False):
yield event
def decode_replay_header(contents):
"""Decodes and return the replay header from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(replay_header_typeid)
def decode_replay_details(contents):
"""Decodes and returns the game details from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(game_details_typeid)
def decode_replay_initdata(contents):
"""Decodes and return the replay init data from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
return decoder.instance(replay_initdata_typeid)
def decode_replay_attributes_events(contents):
"""Decodes and yields each attribute from the contents byte string."""
buffer = BitPackedBuffer(contents, 'little')
attributes = {}
if not buffer.done():
attributes['source'] = buffer.read_bits(8)
attributes['mapNamespace'] = buffer.read_bits(32)
_ = buffer.read_bits(32)
attributes['scopes'] = {}
while not buffer.done():
value = {}
value['namespace'] = buffer.read_bits(32)
value['attrid'] = attrid = buffer.read_bits(32)
scope = buffer.read_bits(8)
value['value'] = buffer.read_aligned_bytes(4)[::-1].strip(b'\x00')
if not scope in attributes['scopes']:
attributes['scopes'][scope] = {}
if not attrid in attributes['scopes'][scope]:
attributes['scopes'][scope][attrid] = []
attributes['scopes'][scope][attrid].append(value)
return attributes
def unit_tag(unitTagIndex, unitTagRecycle):
return (unitTagIndex << 18) + unitTagRecycle
def unit_tag_index(unitTag):
return (unitTag >> 18) & 0x00003fff
def unit_tag_recycle(unitTag):
return (unitTag) & 0x0003ffff
| mit |
agx/linux-wpan-next | tools/perf/scripts/python/event_analyzing_sample.py | 4719 | 7393 | # event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
| gpl-2.0 |
windj007/bulbs | docs/conf.py | 2 | 7426 | # -*- coding: utf-8 -*-
#
# Bulbflow documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 7 02:02:14 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
#sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext")))
sys.path.append(os.path.abspath('_ext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode','bulbsdoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Bulbflow'
copyright = u'2011, James Thornton'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bulbflow'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bulbflowdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Bulbflow.tex', u'Bulbflow Documentation',
u'James Thornton', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bulbflow', u'Bulbflow Documentation',
[u'James Thornton'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
autodoc_member_order = 'bysource'
| bsd-3-clause |
shahar-stratoscale/nova | nova/api/openstack/compute/plugins/v3/certificates.py | 15 | 2966 | # Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
import nova.cert.rpcapi
from nova import exception
from nova import network
from nova.openstack.common.gettextutils import _
ALIAS = "os-certificates"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
def _translate_certificate_view(certificate, private_key=None):
return {
'data': certificate,
'private_key': private_key,
}
class CertificatesController(object):
"""The x509 Certificates API controller for the OpenStack API."""
def __init__(self):
self.network_api = network.API()
self.cert_rpcapi = nova.cert.rpcapi.CertAPI()
super(CertificatesController, self).__init__()
@extensions.expected_errors((404, 501))
def show(self, req, id):
"""Return certificate information."""
context = req.environ['nova.context']
authorize(context, action='show')
if id != 'root':
msg = _("Only root certificate can be retrieved.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
try:
cert = self.cert_rpcapi.fetch_ca(context,
project_id=context.project_id)
except exception.CryptoCAFileNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return {'certificate': _translate_certificate_view(cert)}
@extensions.expected_errors(())
@wsgi.response(201)
def create(self, req, body=None):
"""Create a certificate."""
context = req.environ['nova.context']
authorize(context, action='create')
pk, cert = self.cert_rpcapi.generate_x509_cert(context,
user_id=context.user_id, project_id=context.project_id)
return {'certificate': _translate_certificate_view(cert, pk)}
class Certificates(extensions.V3APIExtensionBase):
"""Certificates support."""
name = "Certificates"
alias = ALIAS
version = 1
def get_resources(self):
resources = [
extensions.ResourceExtension('os-certificates',
CertificatesController(),
member_actions={})]
return resources
def get_controller_extensions(self):
return []
| apache-2.0 |
pinry/pinry | core/models.py | 1 | 3376 | import requests
from io import BytesIO
from django.conf import settings
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.db import models
from django.dispatch import receiver
from django_images.models import Image as BaseImage, Thumbnail
from taggit.managers import TaggableManager
from users.models import User
class ImageManager(models.Manager):
_default_ua = {
'User-Agent': 'Mozilla/5.0 (Windows NT 5.1) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/48.0.2564.82 Safari/537.36',
}
# FIXME: Move this into an asynchronous task
def create_for_url(self, url, referer=None):
file_name = url.split("/")[-1].split('#')[0].split('?')[0]
buf = BytesIO()
headers = dict(self._default_ua)
if referer is not None:
headers["Referer"] = referer
response = requests.get(url, headers=headers)
buf.write(response.content)
obj = InMemoryUploadedFile(buf, 'image', file_name,
None, buf.tell(), None)
# create the image and its thumbnails in one transaction, removing
# a chance of getting Database into a inconsistent state when we
# try to create thumbnails one by one later
image = self.create(image=obj)
Thumbnail.objects.get_or_create_at_sizes(image, settings.IMAGE_SIZES.keys())
return image
class Image(BaseImage):
objects = ImageManager()
class Sizes:
standard = "standard"
thumbnail = "thumbnail"
square = "square"
class Meta:
proxy = True
@property
def standard(self):
return Thumbnail.objects.get(
original=self, size=self.Sizes.standard
)
@property
def thumbnail(self):
return Thumbnail.objects.get(
original=self, size=self.Sizes.thumbnail
)
@property
def square(self):
return Thumbnail.objects.get(
original=self, size=self.Sizes.square
)
class Board(models.Model):
class Meta:
unique_together = ("submitter", "name")
index_together = ("submitter", "name")
submitter = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=128, blank=False, null=False)
private = models.BooleanField(default=False, blank=False)
pins = models.ManyToManyField("Pin", related_name="pins", blank=True)
published = models.DateTimeField(auto_now_add=True)
class Pin(models.Model):
submitter = models.ForeignKey(User, on_delete=models.CASCADE)
private = models.BooleanField(default=False, blank=False)
url = models.CharField(null=True, blank=True, max_length=2048)
referer = models.CharField(null=True, blank=True, max_length=2048)
description = models.TextField(blank=True, null=True)
image = models.ForeignKey(Image, related_name='pin', on_delete=models.CASCADE)
published = models.DateTimeField(auto_now_add=True)
tags = TaggableManager()
def tag_list(self):
return self.tags.all()
def __unicode__(self):
return '%s - %s' % (self.submitter, self.published)
@receiver(models.signals.post_delete, sender=Pin)
def delete_pin_images(sender, instance, **kwargs):
try:
instance.image.delete()
except Image.DoesNotExist:
pass
| bsd-2-clause |
mariaantoanelam/Licenta | Lib/test/test_thread.py | 7 | 2494 | # Very rudimentary test of thread module
# Create a bunch of threads, let each do some work, wait until all are done
from test_support import *
import whrandom
import thread
import time
print_test('thread (test_thread.py)', 1)
mutex = thread.allocate_lock()
whmutex = thread.allocate_lock() # for calls to whrandom
running = 0
done = thread.allocate_lock()
done.acquire()
verbose = 0
numtasks = 5
def task(ident):
global running
whmutex.acquire()
delay = whrandom.random() * numtasks
whmutex.release()
if verbose:
print 'task', ident, 'will run for', round(delay, 1), 'sec'
time.sleep(delay)
if verbose:
print 'task', ident, 'done'
mutex.acquire()
running = running - 1
if running == 0:
done.release()
mutex.release()
next_ident = 0
def newtask():
global next_ident, running
mutex.acquire()
next_ident = next_ident + 1
if verbose:
print 'creating task', next_ident
thread.start_new_thread(task, (next_ident,))
running = running + 1
mutex.release()
print_test('concurrent threads', 2)
for i in range(numtasks):
newtask()
done.acquire()
class barrier:
def __init__(self, n):
self.n = n
self.waiting = 0
self.checkin = thread.allocate_lock()
self.checkout = thread.allocate_lock()
self.checkout.acquire()
def enter(self):
checkin, checkout = self.checkin, self.checkout
checkin.acquire()
self.waiting = self.waiting + 1
if self.waiting == self.n:
self.waiting = self.n - 1
checkout.release()
return
checkin.release()
checkout.acquire()
self.waiting = self.waiting - 1
if self.waiting == 0:
checkin.release()
return
checkout.release()
numtrips = 3
def task2(ident):
global running
for i in range(numtrips):
if ident == 0:
# give it a good chance to enter the next
# barrier before the others are all out
# of the current one
delay = 0.001
else:
whmutex.acquire()
delay = whrandom.random() * numtasks
whmutex.release()
if verbose:
print 'task', ident, 'will run for', round(delay, 1), 'sec'
time.sleep(delay)
if verbose:
print 'task', ident, 'entering barrier', i
bar.enter()
if verbose:
print 'task', ident, 'leaving barrier', i
mutex.acquire()
running = running - 1
if running == 0:
done.release()
mutex.release()
print_test('locks', 2)
if done.acquire(0):
raise ValueError, "'done' should have remained acquired"
bar = barrier(numtasks)
running = numtasks
for i in range(numtasks):
thread.start_new_thread(task2, (i,))
done.acquire()
| mit |
gregdek/ansible | lib/ansible/modules/network/cloudengine/ce_bgp_neighbor_af.py | 26 | 107417 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_bgp_neighbor_af
version_added: "2.4"
short_description: Manages BGP neighbor Address-family configuration on HUAWEI CloudEngine switches.
description:
- Manages BGP neighbor Address-family configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@QijunPan)
options:
vrf_name:
description:
- Name of a BGP instance. The name is a case-sensitive string of characters.
The BGP instance can be used only after the corresponding VPN instance is created.
required: true
af_type:
description:
- Address family type of a BGP instance.
required: true
choices: ['ipv4uni', 'ipv4multi', 'ipv4vpn', 'ipv6uni', 'ipv6vpn', 'evpn']
remote_address:
description:
- IPv4 or IPv6 peer connection address.
required: true
advertise_irb:
description:
- If the value is true, advertised IRB routes are distinguished.
If the value is false, advertised IRB routes are not distinguished.
default: no_use
choices: ['no_use','true','false']
advertise_arp:
description:
- If the value is true, advertised ARP routes are distinguished.
If the value is false, advertised ARP routes are not distinguished.
default: no_use
choices: ['no_use','true','false']
advertise_remote_nexthop:
description:
- If the value is true, the remote next-hop attribute is advertised to peers.
If the value is false, the remote next-hop attribute is not advertised to any peers.
default: no_use
choices: ['no_use','true','false']
advertise_community:
description:
- If the value is true, the community attribute is advertised to peers.
If the value is false, the community attribute is not advertised to peers.
default: no_use
choices: ['no_use','true','false']
advertise_ext_community:
description:
- If the value is true, the extended community attribute is advertised to peers.
If the value is false, the extended community attribute is not advertised to peers.
default: no_use
choices: ['no_use','true','false']
discard_ext_community:
description:
- If the value is true, the extended community attribute in the peer route information is discarded.
If the value is false, the extended community attribute in the peer route information is not discarded.
default: no_use
choices: ['no_use','true','false']
allow_as_loop_enable:
description:
- If the value is true, repetitive local AS numbers are allowed.
If the value is false, repetitive local AS numbers are not allowed.
default: no_use
choices: ['no_use','true','false']
allow_as_loop_limit:
description:
- Set the maximum number of repetitive local AS number.
The value is an integer ranging from 1 to 10.
keep_all_routes:
description:
- If the value is true, the system stores all route update messages received from all peers (groups)
after BGP connection setup.
If the value is false, the system stores only BGP update messages that are received from peers
and pass the configured import policy.
default: no_use
choices: ['no_use','true','false']
nexthop_configure:
description:
- null, The next hop is not changed.
local, The next hop is changed to the local IP address.
invariable, Prevent the device from changing the next hop of each imported IGP route
when advertising it to its BGP peers.
choices: ['null', 'local', 'invariable']
preferred_value:
description:
- Assign a preferred value for the routes learned from a specified peer.
The value is an integer ranging from 0 to 65535.
public_as_only:
description:
- If the value is true, sent BGP update messages carry only the public AS number but do not carry
private AS numbers.
If the value is false, sent BGP update messages can carry private AS numbers.
default: no_use
choices: ['no_use','true','false']
public_as_only_force:
description:
- If the value is true, sent BGP update messages carry only the public AS number but do not carry
private AS numbers.
If the value is false, sent BGP update messages can carry private AS numbers.
default: no_use
choices: ['no_use','true','false']
public_as_only_limited:
description:
- Limited use public as number.
default: no_use
choices: ['no_use','true','false']
public_as_only_replace:
description:
- Private as replaced by public as number.
default: no_use
choices: ['no_use','true','false']
public_as_only_skip_peer_as:
description:
- Public as only skip peer as.
default: no_use
choices: ['no_use','true','false']
route_limit:
description:
- Configure the maximum number of routes that can be accepted from a peer.
The value is an integer ranging from 1 to 4294967295.
route_limit_percent:
description:
- Specify the percentage of routes when a router starts to generate an alarm.
The value is an integer ranging from 1 to 100.
route_limit_type:
description:
- Noparameter, After the number of received routes exceeds the threshold and the timeout
timer expires,no action.
AlertOnly, An alarm is generated and no additional routes will be accepted if the maximum
number of routes allowed have been received.
IdleForever, The connection that is interrupted is not automatically re-established if the
maximum number of routes allowed have been received.
IdleTimeout, After the number of received routes exceeds the threshold and the timeout timer
expires, the connection that is interrupted is automatically re-established.
choices: ['noparameter', 'alertOnly', 'idleForever', 'idleTimeout']
route_limit_idle_timeout:
description:
- Specify the value of the idle-timeout timer to automatically reestablish the connections after
they are cut off when the number of routes exceeds the set threshold.
The value is an integer ranging from 1 to 1200.
rt_updt_interval:
description:
- Specify the minimum interval at which Update packets are sent. The value is an integer, in seconds.
The value is an integer ranging from 0 to 600.
redirect_ip:
description:
- Redirect ip.
default: no_use
choices: ['no_use','true','false']
redirect_ip_vaildation:
description:
- Redirect ip vaildation.
default: no_use
choices: ['no_use','true','false']
reflect_client:
description:
- If the value is true, the local device functions as the route reflector and a peer functions
as a client of the route reflector.
If the value is false, the route reflector and client functions are not configured.
default: no_use
choices: ['no_use','true','false']
substitute_as_enable:
description:
- If the value is true, the function to replace a specified peer's AS number in the AS-Path attribute with
the local AS number is enabled.
If the value is false, the function to replace a specified peer's AS number in the AS-Path attribute with
the local AS number is disabled.
default: no_use
choices: ['no_use','true','false']
import_rt_policy_name:
description:
- Specify the filtering policy applied to the routes learned from a peer.
The value is a string of 1 to 40 characters.
export_rt_policy_name:
description:
- Specify the filtering policy applied to the routes to be advertised to a peer.
The value is a string of 1 to 40 characters.
import_pref_filt_name:
description:
- Specify the IPv4 filtering policy applied to the routes received from a specified peer.
The value is a string of 1 to 169 characters.
export_pref_filt_name:
description:
- Specify the IPv4 filtering policy applied to the routes to be advertised to a specified peer.
The value is a string of 1 to 169 characters.
import_as_path_filter:
description:
- Apply an AS_Path-based filtering policy to the routes received from a specified peer.
The value is an integer ranging from 1 to 256.
export_as_path_filter:
description:
- Apply an AS_Path-based filtering policy to the routes to be advertised to a specified peer.
The value is an integer ranging from 1 to 256.
import_as_path_name_or_num:
description:
- A routing strategy based on the AS path list for routing received by a designated peer.
export_as_path_name_or_num:
description:
- Application of a AS path list based filtering policy to the routing of a specified peer.
import_acl_name_or_num:
description:
- Apply an IPv4 ACL-based filtering policy to the routes received from a specified peer.
The value is a string of 1 to 32 characters.
export_acl_name_or_num:
description:
- Apply an IPv4 ACL-based filtering policy to the routes to be advertised to a specified peer.
The value is a string of 1 to 32 characters.
ipprefix_orf_enable:
description:
- If the value is true, the address prefix-based Outbound Route Filter (ORF) capability is
enabled for peers.
If the value is false, the address prefix-based Outbound Route Filter (ORF) capability is
disabled for peers.
default: no_use
choices: ['no_use','true','false']
is_nonstd_ipprefix_mod:
description:
- If the value is true, Non-standard capability codes are used during capability negotiation.
If the value is false, RFC-defined standard ORF capability codes are used during capability negotiation.
default: no_use
choices: ['no_use','true','false']
orftype:
description:
- ORF Type.
The value is an integer ranging from 0 to 65535.
orf_mode:
description:
- ORF mode.
null, Default value.
receive, ORF for incoming packets.
send, ORF for outgoing packets.
both, ORF for incoming and outgoing packets.
choices: ['null', 'receive', 'send', 'both']
soostring:
description:
- Configure the Site-of-Origin (SoO) extended community attribute.
The value is a string of 3 to 21 characters.
default_rt_adv_enable:
description:
- If the value is true, the function to advertise default routes to peers is enabled.
If the value is false, the function to advertise default routes to peers is disabled.
default: no_use
choices: ['no_use','true', 'false']
default_rt_adv_policy:
description:
- Specify the name of a used policy. The value is a string.
The value is a string of 1 to 40 characters.
default_rt_match_mode:
description:
- null, Null.
matchall, Advertise the default route if all matching conditions are met.
matchany, Advertise the default route if any matching condition is met.
choices: ['null', 'matchall', 'matchany']
add_path_mode:
description:
- null, Null.
receive, Support receiving Add-Path routes.
send, Support sending Add-Path routes.
both, Support receiving and sending Add-Path routes.
choices: ['null', 'receive', 'send', 'both']
adv_add_path_num:
description:
- The number of addPath advertise route.
The value is an integer ranging from 2 to 64.
origin_as_valid:
description:
- If the value is true, Application results of route announcement.
If the value is false, Routing application results are not notified.
default: no_use
choices: ['no_use','true', 'false']
vpls_enable:
description:
- If the value is true, vpls enable.
If the value is false, vpls disable.
default: no_use
choices: ['no_use','true', 'false']
vpls_ad_disable:
description:
- If the value is true, enable vpls-ad.
If the value is false, disable vpls-ad.
default: no_use
choices: ['no_use','true', 'false']
update_pkt_standard_compatible:
description:
- If the value is true, When the vpnv4 multicast neighbor receives and updates the message,
the message has no label.
If the value is false, When the vpnv4 multicast neighbor receives and updates the message,
the message has label.
default: no_use
choices: ['no_use','true', 'false']
'''
EXAMPLES = '''
- name: CloudEngine BGP neighbor address family test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config BGP peer Address_Family"
ce_bgp_neighbor_af:
state: present
vrf_name: js
af_type: ipv4uni
remote_address: 192.168.10.10
nexthop_configure: local
provider: "{{ cli }}"
- name: "Undo BGP peer Address_Family"
ce_bgp_neighbor_af:
state: absent
vrf_name: js
af_type: ipv4uni
remote_address: 192.168.10.10
nexthop_configure: local
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"af_type": "ipv4uni", "nexthop_configure": "local",
"remote_address": "192.168.10.10",
"state": "present", "vrf_name": "js"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {"bgp neighbor af": {"af_type": "ipv4uni", "remote_address": "192.168.10.10",
"vrf_name": "js"},
"bgp neighbor af other": {"af_type": "ipv4uni", "nexthop_configure": "null",
"vrf_name": "js"}}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"bgp neighbor af": {"af_type": "ipv4uni", "remote_address": "192.168.10.10",
"vrf_name": "js"},
"bgp neighbor af other": {"af_type": "ipv4uni", "nexthop_configure": "local",
"vrf_name": "js"}}
updates:
description: command sent to the device
returned: always
type: list
sample: ["peer 192.168.10.10 next-hop-local"]
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr
# get bgp peer af
CE_GET_BGP_PEER_AF_HEADER = """
<filter type="subtree">
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpVrfAFs>
<bgpVrfAF>
<afType>%s</afType>
<peerAFs>
<peerAF>
<remoteAddress></remoteAddress>
"""
CE_GET_BGP_PEER_AF_TAIL = """
</peerAF>
</peerAFs>
</bgpVrfAF>
</bgpVrfAFs>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</filter>
"""
# merge bgp peer af
CE_MERGE_BGP_PEER_AF_HEADER = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpVrfAFs>
<bgpVrfAF>
<afType>%s</afType>
<peerAFs>
<peerAF operation="merge">
<remoteAddress>%s</remoteAddress>
"""
CE_MERGE_BGP_PEER_AF_TAIL = """
</peerAF>
</peerAFs>
</bgpVrfAF>
</bgpVrfAFs>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
# create bgp peer af
CE_CREATE_BGP_PEER_AF = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpVrfAFs>
<bgpVrfAF>
<afType>%s</afType>
<peerAFs>
<peerAF operation="create">
<remoteAddress>%s</remoteAddress>
</peerAF>
</peerAFs>
</bgpVrfAF>
</bgpVrfAFs>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
# delete bgp peer af
CE_DELETE_BGP_PEER_AF = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpVrfAFs>
<bgpVrfAF>
<afType>%s</afType>
<peerAFs>
<peerAF operation="delete">
<remoteAddress>%s</remoteAddress>
</peerAF>
</peerAFs>
</bgpVrfAF>
</bgpVrfAFs>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
class BgpNeighborAf(object):
""" Manages BGP neighbor Address-family configuration """
def netconf_get_config(self, **kwargs):
""" netconf_get_config """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = get_nc_config(module, conf_str)
return xml_str
def netconf_set_config(self, **kwargs):
""" netconf_set_config """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = set_nc_config(module, conf_str)
return xml_str
def check_bgp_neighbor_af_args(self, **kwargs):
""" check_bgp_neighbor_af_args """
module = kwargs["module"]
result = dict()
need_cfg = False
vrf_name = module.params['vrf_name']
if vrf_name:
if len(vrf_name) > 31 or len(vrf_name) == 0:
module.fail_json(
msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name)
state = module.params['state']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
if not check_ip_addr(ipaddr=remote_address):
module.fail_json(
msg='Error: The remote_address %s is invalid.' % remote_address)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<remoteAddress>(.*)</remoteAddress>.*', recv_xml)
if re_find:
result["remote_address"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != remote_address:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<remoteAddress>(.*)</remoteAddress>.*', recv_xml)
if re_find:
result["remote_address"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] == remote_address:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def check_bgp_neighbor_af_other(self, **kwargs):
""" check_bgp_neighbor_af_other """
module = kwargs["module"]
result = dict()
need_cfg = False
state = module.params['state']
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
if state == "absent":
result["need_cfg"] = need_cfg
return result
advertise_irb = module.params['advertise_irb']
if advertise_irb != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseIrb></advertiseIrb>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseIrb>(.*)</advertiseIrb>.*', recv_xml)
if re_find:
result["advertise_irb"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_irb:
need_cfg = True
else:
need_cfg = True
advertise_arp = module.params['advertise_arp']
if advertise_arp != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseArp></advertiseArp>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseArp>(.*)</advertiseArp>.*', recv_xml)
if re_find:
result["advertise_arp"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_arp:
need_cfg = True
else:
need_cfg = True
advertise_remote_nexthop = module.params['advertise_remote_nexthop']
if advertise_remote_nexthop != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseRemoteNexthop></advertiseRemoteNexthop>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseRemoteNexthop>(.*)</advertiseRemoteNexthop>.*', recv_xml)
if re_find:
result["advertise_remote_nexthop"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_remote_nexthop:
need_cfg = True
else:
need_cfg = True
advertise_community = module.params['advertise_community']
if advertise_community != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseCommunity></advertiseCommunity>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseCommunity>(.*)</advertiseCommunity>.*', recv_xml)
if re_find:
result["advertise_community"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_community:
need_cfg = True
else:
need_cfg = True
advertise_ext_community = module.params['advertise_ext_community']
if advertise_ext_community != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseExtCommunity></advertiseExtCommunity>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseExtCommunity>(.*)</advertiseExtCommunity>.*', recv_xml)
if re_find:
result["advertise_ext_community"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_ext_community:
need_cfg = True
else:
need_cfg = True
discard_ext_community = module.params['discard_ext_community']
if discard_ext_community != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<discardExtCommunity></discardExtCommunity>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<discardExtCommunity>(.*)</discardExtCommunity>.*', recv_xml)
if re_find:
result["discard_ext_community"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != discard_ext_community:
need_cfg = True
else:
need_cfg = True
allow_as_loop_enable = module.params['allow_as_loop_enable']
if allow_as_loop_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<allowAsLoopEnable></allowAsLoopEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<allowAsLoopEnable>(.*)</allowAsLoopEnable>.*', recv_xml)
if re_find:
result["allow_as_loop_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != allow_as_loop_enable:
need_cfg = True
else:
need_cfg = True
allow_as_loop_limit = module.params['allow_as_loop_limit']
if allow_as_loop_limit:
if int(allow_as_loop_limit) > 10 or int(allow_as_loop_limit) < 1:
module.fail_json(
msg='the value of allow_as_loop_limit %s is out of [1 - 10].' % allow_as_loop_limit)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<allowAsLoopLimit></allowAsLoopLimit>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<allowAsLoopLimit>(.*)</allowAsLoopLimit>.*', recv_xml)
if re_find:
result["allow_as_loop_limit"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != allow_as_loop_limit:
need_cfg = True
else:
need_cfg = True
keep_all_routes = module.params['keep_all_routes']
if keep_all_routes != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<keepAllRoutes></keepAllRoutes>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<keepAllRoutes>(.*)</keepAllRoutes>.*', recv_xml)
if re_find:
result["keep_all_routes"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != keep_all_routes:
need_cfg = True
else:
need_cfg = True
nexthop_configure = module.params['nexthop_configure']
if nexthop_configure:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<nextHopConfigure></nextHopConfigure>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<nextHopConfigure>(.*)</nextHopConfigure>.*', recv_xml)
if re_find:
result["nexthop_configure"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != nexthop_configure:
need_cfg = True
else:
need_cfg = True
preferred_value = module.params['preferred_value']
if preferred_value:
if int(preferred_value) > 65535 or int(preferred_value) < 0:
module.fail_json(
msg='the value of preferred_value %s is out of [0 - 65535].' % preferred_value)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<preferredValue></preferredValue>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<preferredValue>(.*)</preferredValue>.*', recv_xml)
if re_find:
result["preferred_value"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != preferred_value:
need_cfg = True
else:
need_cfg = True
public_as_only = module.params['public_as_only']
if public_as_only != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnly></publicAsOnly>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnly>(.*)</publicAsOnly>.*', recv_xml)
if re_find:
result["public_as_only"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only:
need_cfg = True
else:
need_cfg = True
public_as_only_force = module.params['public_as_only_force']
if public_as_only_force != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnlyForce></publicAsOnlyForce>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnlyForce>(.*)</publicAsOnlyForce>.*', recv_xml)
if re_find:
result["public_as_only_force"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only_force:
need_cfg = True
else:
need_cfg = True
public_as_only_limited = module.params['public_as_only_limited']
if public_as_only_limited != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnlyLimited></publicAsOnlyLimited>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnlyLimited>(.*)</publicAsOnlyLimited>.*', recv_xml)
if re_find:
result["public_as_only_limited"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only_limited:
need_cfg = True
else:
need_cfg = True
public_as_only_replace = module.params['public_as_only_replace']
if public_as_only_replace != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnlyReplace></publicAsOnlyReplace>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnlyReplace>(.*)</publicAsOnlyReplace>.*', recv_xml)
if re_find:
result["public_as_only_replace"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only_replace:
need_cfg = True
else:
need_cfg = True
public_as_only_skip_peer_as = module.params[
'public_as_only_skip_peer_as']
if public_as_only_skip_peer_as != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnlySkipPeerAs></publicAsOnlySkipPeerAs>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnlySkipPeerAs>(.*)</publicAsOnlySkipPeerAs>.*', recv_xml)
if re_find:
result["public_as_only_skip_peer_as"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only_skip_peer_as:
need_cfg = True
else:
need_cfg = True
route_limit = module.params['route_limit']
if route_limit:
if int(route_limit) < 1:
module.fail_json(
msg='the value of route_limit %s is out of [1 - 4294967295].' % route_limit)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<routeLimit></routeLimit>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routeLimit>(.*)</routeLimit>.*', recv_xml)
if re_find:
result["route_limit"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != route_limit:
need_cfg = True
else:
need_cfg = True
route_limit_percent = module.params['route_limit_percent']
if route_limit_percent:
if int(route_limit_percent) < 1 or int(route_limit_percent) > 100:
module.fail_json(
msg='Error: The value of route_limit_percent %s is out of [1 - 100].' % route_limit_percent)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<routeLimitPercent></routeLimitPercent>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routeLimitPercent>(.*)</routeLimitPercent>.*', recv_xml)
if re_find:
result["route_limit_percent"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != route_limit_percent:
need_cfg = True
else:
need_cfg = True
route_limit_type = module.params['route_limit_type']
if route_limit_type:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<routeLimitType></routeLimitType>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routeLimitType>(.*)</routeLimitType>.*', recv_xml)
if re_find:
result["route_limit_type"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != route_limit_type:
need_cfg = True
else:
need_cfg = True
route_limit_idle_timeout = module.params['route_limit_idle_timeout']
if route_limit_idle_timeout:
if int(route_limit_idle_timeout) < 1 or int(route_limit_idle_timeout) > 1200:
module.fail_json(
msg='Error: The value of route_limit_idle_timeout %s is out of '
'[1 - 1200].' % route_limit_idle_timeout)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<routeLimitIdleTimeout></routeLimitPercent>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routeLimitIdleTimeout>(.*)</routeLimitIdleTimeout>.*', recv_xml)
if re_find:
result["route_limit_idle_timeout"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != route_limit_idle_timeout:
need_cfg = True
else:
need_cfg = True
rt_updt_interval = module.params['rt_updt_interval']
if rt_updt_interval:
if int(rt_updt_interval) < 0 or int(rt_updt_interval) > 600:
module.fail_json(
msg='Error: The value of rt_updt_interval %s is out of [0 - 600].' % rt_updt_interval)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<rtUpdtInterval></rtUpdtInterval>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<rtUpdtInterval>(.*)</rtUpdtInterval>.*', recv_xml)
if re_find:
result["rt_updt_interval"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != rt_updt_interval:
need_cfg = True
else:
need_cfg = True
redirect_ip = module.params['redirect_ip']
if redirect_ip != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<redirectIP></redirectIP>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<redirectIP>(.*)</redirectIP>.*', recv_xml)
if re_find:
result["redirect_ip"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != redirect_ip:
need_cfg = True
else:
need_cfg = True
redirect_ip_vaildation = module.params['redirect_ip_vaildation']
if redirect_ip_vaildation != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<redirectIPVaildation></redirectIPVaildation>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<redirectIPVaildation>(.*)</redirectIPVaildation>.*', recv_xml)
if re_find:
result["redirect_ip_vaildation"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != redirect_ip_vaildation:
need_cfg = True
else:
need_cfg = True
reflect_client = module.params['reflect_client']
if reflect_client != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<reflectClient></reflectClient>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<reflectClient>(.*)</reflectClient>.*', recv_xml)
if re_find:
result["reflect_client"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != reflect_client:
need_cfg = True
else:
need_cfg = True
substitute_as_enable = module.params['substitute_as_enable']
if substitute_as_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<substituteAsEnable></substituteAsEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<substituteAsEnable>(.*)</substituteAsEnable>.*', recv_xml)
if re_find:
result["substitute_as_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != substitute_as_enable:
need_cfg = True
else:
need_cfg = True
import_rt_policy_name = module.params['import_rt_policy_name']
if import_rt_policy_name:
if len(import_rt_policy_name) < 1 or len(import_rt_policy_name) > 40:
module.fail_json(
msg='Error: The len of import_rt_policy_name %s is out of [1 - 40].' % import_rt_policy_name)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importRtPolicyName></importRtPolicyName>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importRtPolicyName>(.*)</importRtPolicyName>.*', recv_xml)
if re_find:
result["import_rt_policy_name"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_rt_policy_name:
need_cfg = True
else:
need_cfg = True
export_rt_policy_name = module.params['export_rt_policy_name']
if export_rt_policy_name:
if len(export_rt_policy_name) < 1 or len(export_rt_policy_name) > 40:
module.fail_json(
msg='Error: The len of export_rt_policy_name %s is out of [1 - 40].' % export_rt_policy_name)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportRtPolicyName></exportRtPolicyName>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportRtPolicyName>(.*)</exportRtPolicyName>.*', recv_xml)
if re_find:
result["export_rt_policy_name"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_rt_policy_name:
need_cfg = True
else:
need_cfg = True
import_pref_filt_name = module.params['import_pref_filt_name']
if import_pref_filt_name:
if len(import_pref_filt_name) < 1 or len(import_pref_filt_name) > 169:
module.fail_json(
msg='Error: The len of import_pref_filt_name %s is out of [1 - 169].' % import_pref_filt_name)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importPrefFiltName></importPrefFiltName>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importPrefFiltName>(.*)</importPrefFiltName>.*', recv_xml)
if re_find:
result["import_pref_filt_name"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_pref_filt_name:
need_cfg = True
else:
need_cfg = True
export_pref_filt_name = module.params['export_pref_filt_name']
if export_pref_filt_name:
if len(export_pref_filt_name) < 1 or len(export_pref_filt_name) > 169:
module.fail_json(
msg='Error: The len of export_pref_filt_name %s is out of [1 - 169].' % export_pref_filt_name)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportPrefFiltName></exportPrefFiltName>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportPrefFiltName>(.*)</exportPrefFiltName>.*', recv_xml)
if re_find:
result["export_pref_filt_name"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_pref_filt_name:
need_cfg = True
else:
need_cfg = True
import_as_path_filter = module.params['import_as_path_filter']
if import_as_path_filter:
if int(import_as_path_filter) < 1 or int(import_as_path_filter) > 256:
module.fail_json(
msg='Error: The value of import_as_path_filter %s is out of [1 - 256].' % import_as_path_filter)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importAsPathFilter></importAsPathFilter>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importAsPathFilter>(.*)</importAsPathFilter>.*', recv_xml)
if re_find:
result["import_as_path_filter"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_as_path_filter:
need_cfg = True
else:
need_cfg = True
export_as_path_filter = module.params['export_as_path_filter']
if export_as_path_filter:
if int(export_as_path_filter) < 1 or int(export_as_path_filter) > 256:
module.fail_json(
msg='Error: The value of export_as_path_filter %s is out of [1 - 256].' % export_as_path_filter)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportAsPathFilter></exportAsPathFilter>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportAsPathFilter>(.*)</exportAsPathFilter>.*', recv_xml)
if re_find:
result["export_as_path_filter"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_as_path_filter:
need_cfg = True
else:
need_cfg = True
import_as_path_name_or_num = module.params[
'import_as_path_name_or_num']
if import_as_path_name_or_num:
if len(import_as_path_name_or_num) < 1 or len(import_as_path_name_or_num) > 51:
module.fail_json(
msg='Error: The len of import_as_path_name_or_num %s is out '
'of [1 - 51].' % import_as_path_name_or_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importAsPathNameOrNum></importAsPathNameOrNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importAsPathNameOrNum>(.*)</importAsPathNameOrNum>.*', recv_xml)
if re_find:
result["import_as_path_name_or_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_as_path_name_or_num:
need_cfg = True
else:
need_cfg = True
export_as_path_name_or_num = module.params[
'export_as_path_name_or_num']
if export_as_path_name_or_num:
if len(export_as_path_name_or_num) < 1 or len(export_as_path_name_or_num) > 51:
module.fail_json(
msg='Error: The len of export_as_path_name_or_num %s is out '
'of [1 - 51].' % export_as_path_name_or_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportAsPathNameOrNum></exportAsPathNameOrNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportAsPathNameOrNum>(.*)</exportAsPathNameOrNum>.*', recv_xml)
if re_find:
result["export_as_path_name_or_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_as_path_name_or_num:
need_cfg = True
else:
need_cfg = True
import_acl_name_or_num = module.params['import_acl_name_or_num']
if import_acl_name_or_num:
if len(import_acl_name_or_num) < 1 or len(import_acl_name_or_num) > 32:
module.fail_json(
msg='Error: The len of import_acl_name_or_num %s is out of [1 - 32].' % import_acl_name_or_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importAclNameOrNum></importAclNameOrNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importAclNameOrNum>(.*)</importAclNameOrNum>.*', recv_xml)
if re_find:
result["import_acl_name_or_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_acl_name_or_num:
need_cfg = True
else:
need_cfg = True
export_acl_name_or_num = module.params['export_acl_name_or_num']
if export_acl_name_or_num:
if len(export_acl_name_or_num) < 1 or len(export_acl_name_or_num) > 32:
module.fail_json(
msg='Error: The len of export_acl_name_or_num %s is out of [1 - 32].' % export_acl_name_or_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportAclNameOrNum></exportAclNameOrNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportAclNameOrNum>(.*)</exportAclNameOrNum>.*', recv_xml)
if re_find:
result["export_acl_name_or_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_acl_name_or_num:
need_cfg = True
else:
need_cfg = True
ipprefix_orf_enable = module.params['ipprefix_orf_enable']
if ipprefix_orf_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<ipprefixOrfEnable></ipprefixOrfEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<ipprefixOrfEnable>(.*)</ipprefixOrfEnable>.*', recv_xml)
if re_find:
result["ipprefix_orf_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != ipprefix_orf_enable:
need_cfg = True
else:
need_cfg = True
is_nonstd_ipprefix_mod = module.params['is_nonstd_ipprefix_mod']
if is_nonstd_ipprefix_mod != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<isNonstdIpprefixMod></isNonstdIpprefixMod>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<isNonstdIpprefixMod>(.*)</isNonstdIpprefixMod>.*', recv_xml)
if re_find:
result["is_nonstd_ipprefix_mod"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != is_nonstd_ipprefix_mod:
need_cfg = True
else:
need_cfg = True
orftype = module.params['orftype']
if orftype:
if int(orftype) < 0 or int(orftype) > 65535:
module.fail_json(
msg='Error: The value of orftype %s is out of [0 - 65535].' % orftype)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<orftype></orftype>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<orftype>(.*)</orftype>.*', recv_xml)
if re_find:
result["orftype"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != orftype:
need_cfg = True
else:
need_cfg = True
orf_mode = module.params['orf_mode']
if orf_mode:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<orfMode></orfMode>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<orfMode>(.*)</orfMode>.*', recv_xml)
if re_find:
result["orf_mode"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != orf_mode:
need_cfg = True
else:
need_cfg = True
soostring = module.params['soostring']
if soostring:
if len(soostring) < 3 or len(soostring) > 21:
module.fail_json(
msg='Error: The len of soostring %s is out of [3 - 21].' % soostring)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<soostring></soostring>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<soostring>(.*)</soostring>.*', recv_xml)
if re_find:
result["soostring"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != soostring:
need_cfg = True
else:
need_cfg = True
default_rt_adv_enable = module.params['default_rt_adv_enable']
if default_rt_adv_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<defaultRtAdvEnable></defaultRtAdvEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<defaultRtAdvEnable>(.*)</defaultRtAdvEnable>.*', recv_xml)
if re_find:
result["default_rt_adv_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != default_rt_adv_enable:
need_cfg = True
else:
need_cfg = True
default_rt_adv_policy = module.params['default_rt_adv_policy']
if default_rt_adv_policy:
if len(default_rt_adv_policy) < 1 or len(default_rt_adv_policy) > 40:
module.fail_json(
msg='Error: The len of default_rt_adv_policy %s is out of [1 - 40].' % default_rt_adv_policy)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<defaultRtAdvPolicy></defaultRtAdvPolicy>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<defaultRtAdvPolicy>(.*)</defaultRtAdvPolicy>.*', recv_xml)
if re_find:
result["default_rt_adv_policy"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != default_rt_adv_policy:
need_cfg = True
else:
need_cfg = True
default_rt_match_mode = module.params['default_rt_match_mode']
if default_rt_match_mode:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<defaultRtMatchMode></defaultRtMatchMode>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<defaultRtMatchMode>(.*)</defaultRtMatchMode>.*', recv_xml)
if re_find:
result["default_rt_match_mode"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != default_rt_match_mode:
need_cfg = True
else:
need_cfg = True
add_path_mode = module.params['add_path_mode']
if add_path_mode:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<addPathMode></addPathMode>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<addPathMode>(.*)</addPathMode>.*', recv_xml)
if re_find:
result["add_path_mode"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != add_path_mode:
need_cfg = True
else:
need_cfg = True
adv_add_path_num = module.params['adv_add_path_num']
if adv_add_path_num:
if int(orftype) < 2 or int(orftype) > 64:
module.fail_json(
msg='Error: The value of adv_add_path_num %s is out of [2 - 64].' % adv_add_path_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advAddPathNum></advAddPathNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advAddPathNum>(.*)</advAddPathNum>.*', recv_xml)
if re_find:
result["adv_add_path_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != adv_add_path_num:
need_cfg = True
else:
need_cfg = True
origin_as_valid = module.params['origin_as_valid']
if origin_as_valid != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<originAsValid></originAsValid>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<originAsValid>(.*)</originAsValid>.*', recv_xml)
if re_find:
result["origin_as_valid"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != origin_as_valid:
need_cfg = True
else:
need_cfg = True
vpls_enable = module.params['vpls_enable']
if vpls_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<vplsEnable></vplsEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<vplsEnable>(.*)</vplsEnable>.*', recv_xml)
if re_find:
result["vpls_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != vpls_enable:
need_cfg = True
else:
need_cfg = True
vpls_ad_disable = module.params['vpls_ad_disable']
if vpls_ad_disable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<vplsAdDisable></vplsAdDisable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<vplsAdDisable>(.*)</vplsAdDisable>.*', recv_xml)
if re_find:
result["vpls_ad_disable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != vpls_ad_disable:
need_cfg = True
else:
need_cfg = True
update_pkt_standard_compatible = module.params[
'update_pkt_standard_compatible']
if update_pkt_standard_compatible != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<updatePktStandardCompatible></updatePktStandardCompatible>" + \
CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<updatePktStandardCompatible>(.*)</updatePktStandardCompatible>.*', recv_xml)
if re_find:
result["update_pkt_standard_compatible"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != update_pkt_standard_compatible:
need_cfg = True
else:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def merge_bgp_peer_af(self, **kwargs):
""" merge_bgp_peer_af """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = CE_MERGE_BGP_PEER_AF_HEADER % (
vrf_name, af_type, remote_address) + CE_MERGE_BGP_PEER_AF_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp peer address family failed.')
cmds = []
if af_type == "ipv4uni":
cmd = "ipv4-family unicast"
elif af_type == "ipv4multi":
cmd = "ipv4-family multicast"
elif af_type == "ipv6uni":
cmd = "ipv6-family unicast"
cmds.append(cmd)
cmd = "peer %s" % remote_address
cmds.append(cmd)
return cmds
def create_bgp_peer_af(self, **kwargs):
""" create_bgp_peer_af """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = CE_CREATE_BGP_PEER_AF % (vrf_name, af_type, remote_address)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Create bgp peer address family failed.')
cmds = []
if af_type == "ipv4uni":
cmd = "ipv4-family unicast"
elif af_type == "ipv4multi":
cmd = "ipv4-family multicast"
elif af_type == "ipv6uni":
cmd = "ipv6-family unicast"
cmds.append(cmd)
cmd = "peer %s" % remote_address
cmds.append(cmd)
return cmds
def delete_bgp_peer_af(self, **kwargs):
""" delete_bgp_peer_af """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = CE_DELETE_BGP_PEER_AF % (vrf_name, af_type, remote_address)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Delete bgp peer address family failed.')
cmds = []
if af_type == "ipv4uni":
cmd = "ipv4-family unicast"
elif af_type == "ipv4multi":
cmd = "ipv4-family multicast"
elif af_type == "ipv6uni":
cmd = "ipv6-family unicast"
cmds.append(cmd)
cmd = "undo peer %s" % remote_address
cmds.append(cmd)
return cmds
def merge_bgp_peer_af_other(self, **kwargs):
""" merge_bgp_peer_af_other """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = CE_MERGE_BGP_PEER_AF_HEADER % (
vrf_name, af_type, remote_address)
cmds = []
advertise_irb = module.params['advertise_irb']
if advertise_irb != 'no_use':
conf_str += "<advertiseIrb>%s</advertiseIrb>" % advertise_irb
if advertise_irb == "ture":
cmd = "peer %s advertise irb" % remote_address
else:
cmd = "undo peer %s advertise irb" % remote_address
cmds.append(cmd)
advertise_arp = module.params['advertise_arp']
if advertise_arp != 'no_use':
conf_str += "<advertiseArp>%s</advertiseArp>" % advertise_arp
if advertise_arp == "ture":
cmd = "peer %s advertise arp" % remote_address
else:
cmd = "undo peer %s advertise arp" % remote_address
cmds.append(cmd)
advertise_remote_nexthop = module.params['advertise_remote_nexthop']
if advertise_remote_nexthop != 'no_use':
conf_str += "<advertiseRemoteNexthop>%s</advertiseRemoteNexthop>" % advertise_remote_nexthop
if advertise_remote_nexthop == "true":
cmd = "peer %s advertise remote-nexthop" % remote_address
else:
cmd = "undo peer %s advertise remote-nexthop" % remote_address
cmds.append(cmd)
advertise_community = module.params['advertise_community']
if advertise_community != 'no_use':
conf_str += "<advertiseCommunity>%s</advertiseCommunity>" % advertise_community
if advertise_community == "true":
cmd = "peer %s advertise-community" % remote_address
else:
cmd = "undo peer %s advertise-community" % remote_address
cmds.append(cmd)
advertise_ext_community = module.params['advertise_ext_community']
if advertise_ext_community != 'no_use':
conf_str += "<advertiseExtCommunity>%s</advertiseExtCommunity>" % advertise_ext_community
if advertise_ext_community == "true":
cmd = "peer %s advertise-ext-community" % remote_address
else:
cmd = "undo peer %s advertise-ext-community" % remote_address
cmds.append(cmd)
discard_ext_community = module.params['discard_ext_community']
if discard_ext_community != 'no_use':
conf_str += "<discardExtCommunity>%s</discardExtCommunity>" % discard_ext_community
if discard_ext_community == "true":
cmd = "peer %s discard-ext-community" % remote_address
else:
cmd = "undo peer %s discard-ext-community" % remote_address
cmds.append(cmd)
allow_as_loop_enable = module.params['allow_as_loop_enable']
if allow_as_loop_enable != 'no_use':
conf_str += "<allowAsLoopEnable>%s</allowAsLoopEnable>" % allow_as_loop_enable
if allow_as_loop_enable == "true":
cmd = "peer %s allow-as-loop" % remote_address
else:
cmd = "undo peer %s allow-as-loop" % remote_address
cmds.append(cmd)
allow_as_loop_limit = module.params['allow_as_loop_limit']
if allow_as_loop_limit:
conf_str += "<allowAsLoopLimit>%s</allowAsLoopLimit>" % allow_as_loop_limit
if allow_as_loop_enable == "true":
cmd = "peer %s allow-as-loop %s" % (remote_address, allow_as_loop_limit)
else:
cmd = "undo peer %s allow-as-loop" % remote_address
cmds.append(cmd)
keep_all_routes = module.params['keep_all_routes']
if keep_all_routes != 'no_use':
conf_str += "<keepAllRoutes>%s</keepAllRoutes>" % keep_all_routes
if keep_all_routes == "true":
cmd = "peer %s keep-all-routes" % remote_address
else:
cmd = "undo peer %s keep-all-routes" % remote_address
cmds.append(cmd)
nexthop_configure = module.params['nexthop_configure']
if nexthop_configure:
conf_str += "<nextHopConfigure>%s</nextHopConfigure>" % nexthop_configure
if nexthop_configure == "local":
cmd = "peer %s next-hop-local" % remote_address
cmds.append(cmd)
elif nexthop_configure == "invariable":
cmd = "peer %s next-hop-invariable" % remote_address
cmds.append(cmd)
preferred_value = module.params['preferred_value']
if preferred_value:
conf_str += "<preferredValue>%s</preferredValue>" % preferred_value
cmd = "peer %s preferred-value %s" % (remote_address, preferred_value)
cmds.append(cmd)
public_as_only = module.params['public_as_only']
if public_as_only != 'no_use':
conf_str += "<publicAsOnly>%s</publicAsOnly>" % public_as_only
if public_as_only == "true":
cmd = "peer %s public-as-only" % remote_address
else:
cmd = "undo peer %s public-as-only" % remote_address
cmds.append(cmd)
public_as_only_force = module.params['public_as_only_force']
if public_as_only_force != 'no_use':
conf_str += "<publicAsOnlyForce>%s</publicAsOnlyForce>" % public_as_only_force
if public_as_only_force == "true":
cmd = "peer %s public-as-only force" % remote_address
else:
cmd = "undo peer %s public-as-only force" % remote_address
cmds.append(cmd)
public_as_only_limited = module.params['public_as_only_limited']
if public_as_only_limited != 'no_use':
conf_str += "<publicAsOnlyLimited>%s</publicAsOnlyLimited>" % public_as_only_limited
if public_as_only_limited == "true":
cmd = "peer %s public-as-only limited" % remote_address
else:
cmd = "undo peer %s public-as-only limited" % remote_address
cmds.append(cmd)
public_as_only_replace = module.params['public_as_only_replace']
if public_as_only_replace != 'no_use':
conf_str += "<publicAsOnlyReplace>%s</publicAsOnlyReplace>" % public_as_only_replace
if public_as_only_replace == "true":
cmd = "peer %s public-as-only force replace" % remote_address
else:
cmd = "undo peer %s public-as-only force replace" % remote_address
cmds.append(cmd)
public_as_only_skip_peer_as = module.params[
'public_as_only_skip_peer_as']
if public_as_only_skip_peer_as != 'no_use':
conf_str += "<publicAsOnlySkipPeerAs>%s</publicAsOnlySkipPeerAs>" % public_as_only_skip_peer_as
if public_as_only_skip_peer_as == "true":
cmd = "peer %s public-as-only force include-peer-as" % remote_address
else:
cmd = "undo peer %s public-as-only force include-peer-as" % remote_address
cmds.append(cmd)
route_limit = module.params['route_limit']
if route_limit:
conf_str += "<routeLimit>%s</routeLimit>" % route_limit
cmd = "peer %s route-limit %s" % (remote_address, route_limit)
cmds.append(cmd)
route_limit_percent = module.params['route_limit_percent']
if route_limit_percent:
conf_str += "<routeLimitPercent>%s</routeLimitPercent>" % route_limit_percent
cmd = "peer %s route-limit %s %s" % (remote_address, route_limit, route_limit_percent)
cmds.append(cmd)
route_limit_type = module.params['route_limit_type']
if route_limit_type:
conf_str += "<routeLimitType>%s</routeLimitType>" % route_limit_type
if route_limit_type == "alertOnly":
cmd = "peer %s route-limit %s %s alert-only" % (remote_address, route_limit, route_limit_percent)
cmds.append(cmd)
elif route_limit_type == "idleForever":
cmd = "peer %s route-limit %s %s idle-forever" % (remote_address, route_limit, route_limit_percent)
cmds.append(cmd)
elif route_limit_type == "idleTimeout":
cmd = "peer %s route-limit %s %s idle-timeout" % (remote_address, route_limit, route_limit_percent)
cmds.append(cmd)
route_limit_idle_timeout = module.params['route_limit_idle_timeout']
if route_limit_idle_timeout:
conf_str += "<routeLimitIdleTimeout>%s</routeLimitIdleTimeout>" % route_limit_idle_timeout
cmd = "peer %s route-limit %s %s idle-timeout %s" % (remote_address, route_limit,
route_limit_percent, route_limit_idle_timeout)
cmds.append(cmd)
rt_updt_interval = module.params['rt_updt_interval']
if rt_updt_interval:
conf_str += "<rtUpdtInterval>%s</rtUpdtInterval>" % rt_updt_interval
cmd = "peer %s route-update-interval %s" % (remote_address, rt_updt_interval)
cmds.append(cmd)
redirect_ip = module.params['redirect_ip']
if redirect_ip != 'no_use':
conf_str += "<redirectIP>%s</redirectIP>" % redirect_ip
redirect_ip_vaildation = module.params['redirect_ip_vaildation']
if redirect_ip_vaildation != 'no_use':
conf_str += "<redirectIPVaildation>%s</redirectIPVaildation>" % redirect_ip_vaildation
reflect_client = module.params['reflect_client']
if reflect_client != 'no_use':
conf_str += "<reflectClient>%s</reflectClient>" % reflect_client
if reflect_client == "true":
cmd = "peer %s reflect-client" % remote_address
else:
cmd = "undo peer %s reflect-client" % remote_address
cmds.append(cmd)
substitute_as_enable = module.params['substitute_as_enable']
if substitute_as_enable != 'no_use':
conf_str += "<substituteAsEnable>%s</substituteAsEnable>" % substitute_as_enable
import_rt_policy_name = module.params['import_rt_policy_name']
if import_rt_policy_name:
conf_str += "<importRtPolicyName>%s</importRtPolicyName>" % import_rt_policy_name
cmd = "peer %s route-policy %s import" % (remote_address, import_rt_policy_name)
cmds.append(cmd)
export_rt_policy_name = module.params['export_rt_policy_name']
if export_rt_policy_name:
conf_str += "<exportRtPolicyName>%s</exportRtPolicyName>" % export_rt_policy_name
cmd = "peer %s route-policy %s export" % (remote_address, export_rt_policy_name)
cmds.append(cmd)
import_pref_filt_name = module.params['import_pref_filt_name']
if import_pref_filt_name:
conf_str += "<importPrefFiltName>%s</importPrefFiltName>" % import_pref_filt_name
cmd = "peer %s filter-policy %s import" % (remote_address, import_pref_filt_name)
cmds.append(cmd)
export_pref_filt_name = module.params['export_pref_filt_name']
if export_pref_filt_name:
conf_str += "<exportPrefFiltName>%s</exportPrefFiltName>" % export_pref_filt_name
cmd = "peer %s filter-policy %s export" % (remote_address, export_pref_filt_name)
cmds.append(cmd)
import_as_path_filter = module.params['import_as_path_filter']
if import_as_path_filter:
conf_str += "<importAsPathFilter>%s</importAsPathFilter>" % import_as_path_filter
cmd = "peer %s as-path-filter %s import" % (remote_address, import_as_path_filter)
cmds.append(cmd)
export_as_path_filter = module.params['export_as_path_filter']
if export_as_path_filter:
conf_str += "<exportAsPathFilter>%s</exportAsPathFilter>" % export_as_path_filter
cmd = "peer %s as-path-filter %s export" % (remote_address, export_as_path_filter)
cmds.append(cmd)
import_as_path_name_or_num = module.params[
'import_as_path_name_or_num']
if import_as_path_name_or_num:
conf_str += "<importAsPathNameOrNum>%s</importAsPathNameOrNum>" % import_as_path_name_or_num
cmd = "peer %s as-path-filter %s import" % (remote_address, import_as_path_name_or_num)
cmds.append(cmd)
export_as_path_name_or_num = module.params[
'export_as_path_name_or_num']
if export_as_path_name_or_num:
conf_str += "<exportAsPathNameOrNum>%s</exportAsPathNameOrNum>" % export_as_path_name_or_num
cmd = "peer %s as-path-filter %s export" % (remote_address, export_as_path_name_or_num)
cmds.append(cmd)
import_acl_name_or_num = module.params['import_acl_name_or_num']
if import_acl_name_or_num:
conf_str += "<importAclNameOrNum>%s</importAclNameOrNum>" % import_acl_name_or_num
cmd = "peer %s filter-policy %s import" % (remote_address, import_acl_name_or_num)
cmds.append(cmd)
export_acl_name_or_num = module.params['export_acl_name_or_num']
if export_acl_name_or_num:
conf_str += "<exportAclNameOrNum>%s</exportAclNameOrNum>" % export_acl_name_or_num
cmd = "peer %s filter-policy %s export" % (remote_address, export_acl_name_or_num)
cmds.append(cmd)
ipprefix_orf_enable = module.params['ipprefix_orf_enable']
if ipprefix_orf_enable != 'no_use':
conf_str += "<ipprefixOrfEnable>%s</ipprefixOrfEnable>" % ipprefix_orf_enable
if ipprefix_orf_enable == "true":
cmd = "peer %s capability-advertise orf ip-prefix" % remote_address
else:
cmd = "undo peer %s capability-advertise orf ip-prefix" % remote_address
cmds.append(cmd)
is_nonstd_ipprefix_mod = module.params['is_nonstd_ipprefix_mod']
if is_nonstd_ipprefix_mod != 'no_use':
conf_str += "<isNonstdIpprefixMod>%s</isNonstdIpprefixMod>" % is_nonstd_ipprefix_mod
if is_nonstd_ipprefix_mod == "true":
if ipprefix_orf_enable == "true":
cmd = "peer %s capability-advertise orf non-standard-compatible" % remote_address
else:
cmd = "undo peer %s capability-advertise orf non-standard-compatible" % remote_address
cmds.append(cmd)
else:
if ipprefix_orf_enable == "true":
cmd = "peer %s capability-advertise orf" % remote_address
else:
cmd = "undo peer %s capability-advertise orf" % remote_address
cmds.append(cmd)
orftype = module.params['orftype']
if orftype:
conf_str += "<orftype>%s</orftype>" % orftype
orf_mode = module.params['orf_mode']
if orf_mode:
conf_str += "<orfMode>%s</orfMode>" % orf_mode
if ipprefix_orf_enable == "true":
cmd = "peer %s capability-advertise orf ip-prefix %s" % (remote_address, orf_mode)
else:
cmd = "undo peer %s capability-advertise orf ip-prefix %s" % (remote_address, orf_mode)
cmds.append(cmd)
soostring = module.params['soostring']
if soostring:
conf_str += "<soostring>%s</soostring>" % soostring
cmd = "peer %s soo %s" % (remote_address, soostring)
cmds.append(cmd)
cmd = ""
default_rt_adv_enable = module.params['default_rt_adv_enable']
if default_rt_adv_enable != 'no_use':
conf_str += "<defaultRtAdvEnable>%s</defaultRtAdvEnable>" % default_rt_adv_enable
if default_rt_adv_enable == "true":
cmd += "peer %s default-route-advertise" % remote_address
else:
cmd += "undo peer %s default-route-advertise" % remote_address
default_rt_adv_policy = module.params['default_rt_adv_policy']
if default_rt_adv_policy:
conf_str += "<defaultRtAdvPolicy>%s</defaultRtAdvPolicy>" % default_rt_adv_policy
cmd += " route-policy %s" % default_rt_adv_policy
default_rt_match_mode = module.params['default_rt_match_mode']
if default_rt_match_mode:
conf_str += "<defaultRtMatchMode>%s</defaultRtMatchMode>" % default_rt_match_mode
if default_rt_match_mode == "matchall":
cmd += " conditional-route-match-all"
elif default_rt_match_mode == "matchany":
cmd += " conditional-route-match-any"
if cmd:
cmds.append(cmd)
add_path_mode = module.params['add_path_mode']
if add_path_mode:
conf_str += "<addPathMode>%s</addPathMode>" % add_path_mode
adv_add_path_num = module.params['adv_add_path_num']
if adv_add_path_num:
conf_str += "<advAddPathNum>%s</advAddPathNum>" % adv_add_path_num
origin_as_valid = module.params['origin_as_valid']
if origin_as_valid != 'no_use':
conf_str += "<originAsValid>%s</originAsValid>" % origin_as_valid
vpls_enable = module.params['vpls_enable']
if vpls_enable != 'no_use':
conf_str += "<vplsEnable>%s</vplsEnable>" % vpls_enable
vpls_ad_disable = module.params['vpls_ad_disable']
if vpls_ad_disable != 'no_use':
conf_str += "<vplsAdDisable>%s</vplsAdDisable>" % vpls_ad_disable
update_pkt_standard_compatible = module.params[
'update_pkt_standard_compatible']
if update_pkt_standard_compatible != 'no_use':
conf_str += "<updatePktStandardCompatible>%s</updatePktStandardCompatible>" % update_pkt_standard_compatible
conf_str += CE_MERGE_BGP_PEER_AF_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp peer address family other failed.')
return cmds
def main():
""" main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
vrf_name=dict(type='str', required=True),
af_type=dict(choices=['ipv4uni', 'ipv4multi', 'ipv4vpn',
'ipv6uni', 'ipv6vpn', 'evpn'], required=True),
remote_address=dict(type='str', required=True),
advertise_irb=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
advertise_arp=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
advertise_remote_nexthop=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
advertise_community=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
advertise_ext_community=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
discard_ext_community=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
allow_as_loop_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
allow_as_loop_limit=dict(type='str'),
keep_all_routes=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
nexthop_configure=dict(choices=['null', 'local', 'invariable']),
preferred_value=dict(type='str'),
public_as_only=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
public_as_only_force=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
public_as_only_limited=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
public_as_only_replace=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
public_as_only_skip_peer_as=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
route_limit=dict(type='str'),
route_limit_percent=dict(type='str'),
route_limit_type=dict(
choices=['noparameter', 'alertOnly', 'idleForever', 'idleTimeout']),
route_limit_idle_timeout=dict(type='str'),
rt_updt_interval=dict(type='str'),
redirect_ip=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
redirect_ip_vaildation=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
reflect_client=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
substitute_as_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
import_rt_policy_name=dict(type='str'),
export_rt_policy_name=dict(type='str'),
import_pref_filt_name=dict(type='str'),
export_pref_filt_name=dict(type='str'),
import_as_path_filter=dict(type='str'),
export_as_path_filter=dict(type='str'),
import_as_path_name_or_num=dict(type='str'),
export_as_path_name_or_num=dict(type='str'),
import_acl_name_or_num=dict(type='str'),
export_acl_name_or_num=dict(type='str'),
ipprefix_orf_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
is_nonstd_ipprefix_mod=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
orftype=dict(type='str'),
orf_mode=dict(choices=['null', 'receive', 'send', 'both']),
soostring=dict(type='str'),
default_rt_adv_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
default_rt_adv_policy=dict(type='str'),
default_rt_match_mode=dict(choices=['null', 'matchall', 'matchany']),
add_path_mode=dict(choices=['null', 'receive', 'send', 'both']),
adv_add_path_num=dict(type='str'),
origin_as_valid=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
vpls_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
vpls_ad_disable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
update_pkt_standard_compatible=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']))
argument_spec.update(ce_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
changed = False
proposed = dict()
existing = dict()
end_state = dict()
updates = []
state = module.params['state']
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
advertise_irb = module.params['advertise_irb']
advertise_arp = module.params['advertise_arp']
advertise_remote_nexthop = module.params['advertise_remote_nexthop']
advertise_community = module.params['advertise_community']
advertise_ext_community = module.params['advertise_ext_community']
discard_ext_community = module.params['discard_ext_community']
allow_as_loop_enable = module.params['allow_as_loop_enable']
allow_as_loop_limit = module.params['allow_as_loop_limit']
keep_all_routes = module.params['keep_all_routes']
nexthop_configure = module.params['nexthop_configure']
preferred_value = module.params['preferred_value']
public_as_only = module.params['public_as_only']
public_as_only_force = module.params['public_as_only_force']
public_as_only_limited = module.params['public_as_only_limited']
public_as_only_replace = module.params['public_as_only_replace']
public_as_only_skip_peer_as = module.params['public_as_only_skip_peer_as']
route_limit = module.params['route_limit']
route_limit_percent = module.params['route_limit_percent']
route_limit_type = module.params['route_limit_type']
route_limit_idle_timeout = module.params['route_limit_idle_timeout']
rt_updt_interval = module.params['rt_updt_interval']
redirect_ip = module.params['redirect_ip']
redirect_ip_vaildation = module.params['redirect_ip_vaildation']
reflect_client = module.params['reflect_client']
substitute_as_enable = module.params['substitute_as_enable']
import_rt_policy_name = module.params['import_rt_policy_name']
export_rt_policy_name = module.params['export_rt_policy_name']
import_pref_filt_name = module.params['import_pref_filt_name']
export_pref_filt_name = module.params['export_pref_filt_name']
import_as_path_filter = module.params['import_as_path_filter']
export_as_path_filter = module.params['export_as_path_filter']
import_as_path_name_or_num = module.params['import_as_path_name_or_num']
export_as_path_name_or_num = module.params['export_as_path_name_or_num']
import_acl_name_or_num = module.params['import_acl_name_or_num']
export_acl_name_or_num = module.params['export_acl_name_or_num']
ipprefix_orf_enable = module.params['ipprefix_orf_enable']
is_nonstd_ipprefix_mod = module.params['is_nonstd_ipprefix_mod']
orftype = module.params['orftype']
orf_mode = module.params['orf_mode']
soostring = module.params['soostring']
default_rt_adv_enable = module.params['default_rt_adv_enable']
default_rt_adv_policy = module.params['default_rt_adv_policy']
default_rt_match_mode = module.params['default_rt_match_mode']
add_path_mode = module.params['add_path_mode']
adv_add_path_num = module.params['adv_add_path_num']
origin_as_valid = module.params['origin_as_valid']
vpls_enable = module.params['vpls_enable']
vpls_ad_disable = module.params['vpls_ad_disable']
update_pkt_standard_compatible = module.params[
'update_pkt_standard_compatible']
ce_bgp_peer_af_obj = BgpNeighborAf()
# get proposed
proposed["state"] = state
if vrf_name:
proposed["vrf_name"] = vrf_name
if af_type:
proposed["af_type"] = af_type
if remote_address:
proposed["remote_address"] = remote_address
if advertise_irb != 'no_use':
proposed["advertise_irb"] = advertise_irb
if advertise_arp != 'no_use':
proposed["advertise_arp"] = advertise_arp
if advertise_remote_nexthop != 'no_use':
proposed["advertise_remote_nexthop"] = advertise_remote_nexthop
if advertise_community != 'no_use':
proposed["advertise_community"] = advertise_community
if advertise_ext_community != 'no_use':
proposed["advertise_ext_community"] = advertise_ext_community
if discard_ext_community != 'no_use':
proposed["discard_ext_community"] = discard_ext_community
if allow_as_loop_enable != 'no_use':
proposed["allow_as_loop_enable"] = allow_as_loop_enable
if allow_as_loop_limit:
proposed["allow_as_loop_limit"] = allow_as_loop_limit
if keep_all_routes != 'no_use':
proposed["keep_all_routes"] = keep_all_routes
if nexthop_configure:
proposed["nexthop_configure"] = nexthop_configure
if preferred_value:
proposed["preferred_value"] = preferred_value
if public_as_only != 'no_use':
proposed["public_as_only"] = public_as_only
if public_as_only_force != 'no_use':
proposed["public_as_only_force"] = public_as_only_force
if public_as_only_limited != 'no_use':
proposed["public_as_only_limited"] = public_as_only_limited
if public_as_only_replace != 'no_use':
proposed["public_as_only_replace"] = public_as_only_replace
if public_as_only_skip_peer_as != 'no_use':
proposed["public_as_only_skip_peer_as"] = public_as_only_skip_peer_as
if route_limit:
proposed["route_limit"] = route_limit
if route_limit_percent:
proposed["route_limit_percent"] = route_limit_percent
if route_limit_type:
proposed["route_limit_type"] = route_limit_type
if route_limit_idle_timeout:
proposed["route_limit_idle_timeout"] = route_limit_idle_timeout
if rt_updt_interval:
proposed["rt_updt_interval"] = rt_updt_interval
if redirect_ip != 'no_use':
proposed["redirect_ip"] = redirect_ip
if redirect_ip_vaildation != 'no_use':
proposed["redirect_ip_vaildation"] = redirect_ip_vaildation
if reflect_client != 'no_use':
proposed["reflect_client"] = reflect_client
if substitute_as_enable != 'no_use':
proposed["substitute_as_enable"] = substitute_as_enable
if import_rt_policy_name:
proposed["import_rt_policy_name"] = import_rt_policy_name
if export_rt_policy_name:
proposed["export_rt_policy_name"] = export_rt_policy_name
if import_pref_filt_name:
proposed["import_pref_filt_name"] = import_pref_filt_name
if export_pref_filt_name:
proposed["export_pref_filt_name"] = export_pref_filt_name
if import_as_path_filter:
proposed["import_as_path_filter"] = import_as_path_filter
if export_as_path_filter:
proposed["export_as_path_filter"] = export_as_path_filter
if import_as_path_name_or_num:
proposed["import_as_path_name_or_num"] = import_as_path_name_or_num
if export_as_path_name_or_num:
proposed["export_as_path_name_or_num"] = export_as_path_name_or_num
if import_acl_name_or_num:
proposed["import_acl_name_or_num"] = import_acl_name_or_num
if export_acl_name_or_num:
proposed["export_acl_name_or_num"] = export_acl_name_or_num
if ipprefix_orf_enable != 'no_use':
proposed["ipprefix_orf_enable"] = ipprefix_orf_enable
if is_nonstd_ipprefix_mod != 'no_use':
proposed["is_nonstd_ipprefix_mod"] = is_nonstd_ipprefix_mod
if orftype:
proposed["orftype"] = orftype
if orf_mode:
proposed["orf_mode"] = orf_mode
if soostring:
proposed["soostring"] = soostring
if default_rt_adv_enable != 'no_use':
proposed["default_rt_adv_enable"] = default_rt_adv_enable
if default_rt_adv_policy:
proposed["default_rt_adv_policy"] = default_rt_adv_policy
if default_rt_match_mode:
proposed["default_rt_match_mode"] = default_rt_match_mode
if add_path_mode:
proposed["add_path_mode"] = add_path_mode
if adv_add_path_num:
proposed["adv_add_path_num"] = adv_add_path_num
if origin_as_valid != 'no_use':
proposed["origin_as_valid"] = origin_as_valid
if vpls_enable != 'no_use':
proposed["vpls_enable"] = vpls_enable
if vpls_ad_disable != 'no_use':
proposed["vpls_ad_disable"] = vpls_ad_disable
if update_pkt_standard_compatible != 'no_use':
proposed["update_pkt_standard_compatible"] = update_pkt_standard_compatible
if not ce_bgp_peer_af_obj:
module.fail_json(msg='Error: Init module failed.')
bgp_peer_af_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_args(
module=module)
bgp_peer_af_other_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_other(
module=module)
# state exist bgp peer address family config
exist_tmp = dict()
for item in bgp_peer_af_rst:
if item != "need_cfg":
exist_tmp[item] = bgp_peer_af_rst[item]
if exist_tmp:
existing["bgp neighbor af"] = exist_tmp
# state exist bgp peer address family other config
exist_tmp = dict()
for item in bgp_peer_af_other_rst:
if item != "need_cfg":
exist_tmp[item] = bgp_peer_af_other_rst[item]
if exist_tmp:
existing["bgp neighbor af other"] = exist_tmp
if state == "present":
if bgp_peer_af_rst["need_cfg"]:
if "remote_address" in bgp_peer_af_rst.keys():
cmd = ce_bgp_peer_af_obj.merge_bgp_peer_af(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
cmd = ce_bgp_peer_af_obj.create_bgp_peer_af(module=module)
changed = True
for item in cmd:
updates.append(item)
if bgp_peer_af_other_rst["need_cfg"]:
cmd = ce_bgp_peer_af_obj.merge_bgp_peer_af_other(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
if bgp_peer_af_rst["need_cfg"]:
cmd = ce_bgp_peer_af_obj.delete_bgp_peer_af(module=module)
changed = True
for item in cmd:
updates.append(item)
if bgp_peer_af_other_rst["need_cfg"]:
pass
# state end bgp peer address family config
bgp_peer_af_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_args(
module=module)
end_tmp = dict()
for item in bgp_peer_af_rst:
if item != "need_cfg":
end_tmp[item] = bgp_peer_af_rst[item]
if end_tmp:
end_state["bgp neighbor af"] = end_tmp
# state end bgp peer address family other config
bgp_peer_af_other_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_other(
module=module)
end_tmp = dict()
for item in bgp_peer_af_other_rst:
if item != "need_cfg":
end_tmp[item] = bgp_peer_af_other_rst[item]
if end_tmp:
end_state["bgp neighbor af other"] = end_tmp
results = dict()
results['proposed'] = proposed
results['existing'] = existing
results['changed'] = changed
results['end_state'] = end_state
results['updates'] = updates
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
moniqx4/bite-project | deps/gdata-python-client/src/gdata/tlslite/SessionCache.py | 358 | 3470 | """Class for caching TLS sessions."""
import thread
import time
class SessionCache:
"""This class is used by the server to cache TLS sessions.
Caching sessions allows the client to use TLS session resumption
and avoid the expense of a full handshake. To use this class,
simply pass a SessionCache instance into the server handshake
function.
This class is thread-safe.
"""
#References to these instances
#are also held by the caller, who may change the 'resumable'
#flag, so the SessionCache must return the same instances
#it was passed in.
def __init__(self, maxEntries=10000, maxAge=14400):
"""Create a new SessionCache.
@type maxEntries: int
@param maxEntries: The maximum size of the cache. When this
limit is reached, the oldest sessions will be deleted as
necessary to make room for new ones. The default is 10000.
@type maxAge: int
@param maxAge: The number of seconds before a session expires
from the cache. The default is 14400 (i.e. 4 hours)."""
self.lock = thread.allocate_lock()
# Maps sessionIDs to sessions
self.entriesDict = {}
#Circular list of (sessionID, timestamp) pairs
self.entriesList = [(None,None)] * maxEntries
self.firstIndex = 0
self.lastIndex = 0
self.maxAge = maxAge
def __getitem__(self, sessionID):
self.lock.acquire()
try:
self._purge() #Delete old items, so we're assured of a new one
session = self.entriesDict[sessionID]
#When we add sessions they're resumable, but it's possible
#for the session to be invalidated later on (if a fatal alert
#is returned), so we have to check for resumability before
#returning the session.
if session.valid():
return session
else:
raise KeyError()
finally:
self.lock.release()
def __setitem__(self, sessionID, session):
self.lock.acquire()
try:
#Add the new element
self.entriesDict[sessionID] = session
self.entriesList[self.lastIndex] = (sessionID, time.time())
self.lastIndex = (self.lastIndex+1) % len(self.entriesList)
#If the cache is full, we delete the oldest element to make an
#empty space
if self.lastIndex == self.firstIndex:
del(self.entriesDict[self.entriesList[self.firstIndex][0]])
self.firstIndex = (self.firstIndex+1) % len(self.entriesList)
finally:
self.lock.release()
#Delete expired items
def _purge(self):
currentTime = time.time()
#Search through the circular list, deleting expired elements until
#we reach a non-expired element. Since elements in list are
#ordered in time, we can break once we reach the first non-expired
#element
index = self.firstIndex
while index != self.lastIndex:
if currentTime - self.entriesList[index][1] > self.maxAge:
del(self.entriesDict[self.entriesList[index][0]])
index = (index+1) % len(self.entriesList)
else:
break
self.firstIndex = index
def _test():
import doctest, SessionCache
return doctest.testmod(SessionCache)
if __name__ == "__main__":
_test()
| apache-2.0 |
Andrew-McNab-UK/DIRAC | DataManagementSystem/scripts/dirac-dms-user-lfns.py | 1 | 5368 | #!/usr/bin/env python
########################################################################
# $HeadURL$
########################################################################
"""
Get the list of all the user files.
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
days = 0
months = 0
years = 0
wildcard = None
baseDir = ''
emptyDirsFlag = False
Script.registerSwitch( "D:", "Days=", "Match files older than number of days [%s]" % days )
Script.registerSwitch( "M:", "Months=", "Match files older than number of months [%s]" % months )
Script.registerSwitch( "Y:", "Years=", "Match files older than number of years [%s]" % years )
Script.registerSwitch( "w:", "Wildcard=", "Wildcard for matching filenames [All]" )
Script.registerSwitch( "b:", "BaseDir=", "Base directory to begin search (default /[vo]/user/[initial]/[username])" )
Script.registerSwitch( "e", "EmptyDirs", "Create a list of empty directories" )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ...' % Script.scriptName, ] ) )
Script.parseCommandLine( ignoreErrors = False )
for switch in Script.getUnprocessedSwitches():
if switch[0] == "D" or switch[0].lower() == "days":
days = int( switch[1] )
if switch[0] == "M" or switch[0].lower() == "months":
months = int( switch[1] )
if switch[0] == "Y" or switch[0].lower() == "years":
years = int( switch[1] )
if switch[0].lower() == "w" or switch[0].lower() == "wildcard":
wildcard = switch[1]
if switch[0].lower() == "b" or switch[0].lower() == "basedir":
baseDir = switch[1]
if switch[0].lower() == "e" or switch[0].lower() == "emptydirs":
emptyDirsFlag = True
import DIRAC
from DIRAC import gLogger
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from datetime import datetime, timedelta
import sys, os, time, fnmatch
fc = FileCatalog()
def isOlderThan( cTimeStruct, days ):
timeDelta = timedelta( days = days )
maxCTime = datetime.utcnow() - timeDelta
if cTimeStruct < maxCTime:
return True
return False
withMetadata = False
if days or months or years:
withMetadata = True
totalDays = 0
if years:
totalDays += 365 * years
if months:
totalDays += 30 * months
if days:
totalDays += days
res = getProxyInfo( False, False )
if not res['OK']:
gLogger.error( "Failed to get client proxy information.", res['Message'] )
DIRAC.exit( 2 )
proxyInfo = res['Value']
if proxyInfo['secondsLeft'] == 0:
gLogger.error( "Proxy expired" )
DIRAC.exit( 2 )
username = proxyInfo['username']
vo = ''
if 'group' in proxyInfo:
vo = getVOForGroup( proxyInfo['group'] )
if not baseDir:
if not vo:
gLogger.error( 'Could not determine VO' )
Script.showHelp()
baseDir = '/%s/user/%s/%s' % ( vo, username[0], username )
baseDir = baseDir.rstrip( '/' )
gLogger.notice( 'Will search for files in %s%s' % ( baseDir, ( ' matching %s' % wildcard ) if wildcard else '' ) )
activeDirs = [baseDir]
allFiles = []
emptyDirs = []
while len( activeDirs ) > 0:
currentDir = activeDirs.pop()
res = fc.listDirectory( currentDir, withMetadata, timeout = 360 )
if not res['OK']:
gLogger.error( "Error retrieving directory contents", "%s %s" % ( currentDir, res['Message'] ) )
elif currentDir in res['Value']['Failed']:
gLogger.error( "Error retrieving directory contents", "%s %s" % ( currentDir, res['Value']['Failed'][currentDir] ) )
else:
dirContents = res['Value']['Successful'][currentDir]
subdirs = dirContents['SubDirs']
files = dirContents['Files']
if not subdirs and not files:
emptyDirs.append( currentDir )
gLogger.notice( '%s: empty directory' % currentDir )
else:
for subdir in sorted( subdirs, reverse = True ):
if ( not withMetadata ) or isOlderThan( subdirs[subdir]['CreationDate'], totalDays ):
activeDirs.append( subdir )
for filename in sorted( files ):
fileOK = False
if ( not withMetadata ) or isOlderThan( files[filename]['MetaData']['CreationDate'], totalDays ):
if wildcard is None or fnmatch.fnmatch( filename, wildcard ):
fileOK = True
if not fileOK:
files.pop( filename )
allFiles += sorted( files )
if len( files ) or len( subdirs ):
gLogger.notice( "%s: %d files%s, %d sub-directories" % ( currentDir, len( files ), ' matching' if withMetadata or wildcard else '', len( subdirs ) ) )
outputFileName = '%s.lfns' % baseDir.replace( '/%s' % vo, '%s' % vo ).replace( '/', '-' )
outputFile = open( outputFileName, 'w' )
for lfn in sorted( allFiles ):
outputFile.write( lfn + '\n' )
outputFile.close()
gLogger.notice( '%d matched files have been put in %s' % ( len( allFiles ), outputFileName ) )
if emptyDirsFlag:
outputFileName = '%s.emptydirs' % baseDir.replace( '/%s' % vo, '%s' % vo ).replace( '/', '-' )
outputFile = open( outputFileName, 'w' )
for dir in sorted( emptyDirs ):
outputFile.write( dir + '\n' )
outputFile.close()
gLogger.notice( '%d empty directories have been put in %s' % ( len( emptyDirs ), outputFileName ) )
DIRAC.exit( 0 )
| gpl-3.0 |
noox-/stbgui-1 | lib/python/Components/opkg.py | 1 | 1470 | import os
def enumFeeds():
for fn in os.listdir('/etc/opkg'):
if fn.endswith('-feed.conf'):
try:
for feed in open(os.path.join('/etc/opkg', fn)):
yield feed.split()[1]
except IndexError:
pass
except IOError:
pass
def enumPlugins(filter_start=''):
for feed in enumFeeds():
package = None
try:
for line in open('/var/lib/opkg/lists/%s' % feed, 'r'):
if line.startswith('Package:'):
package = line.split(":",1)[1].strip()
version = ''
description = ''
if package.startswith(filter_start) and not package.endswith('-dev') and not package.endswith('-staticdev') and not package.endswith('-dbg') and not package.endswith('-doc'):
continue
package = None
if package is None:
continue
if line.startswith('Version:'):
version = line.split(":",1)[1].strip()
elif line.startswith('Description:'):
description = line.split(":",1)[1].strip()
elif description and line.startswith(' '):
description += line[:-1]
elif len(line) <= 1:
d = description.split(' ',3)
if len(d) > 3:
# Get rid of annoying "version" and package repeating strings
if d[1] == 'version':
description = d[3]
if description.startswith('gitAUTOINC'):
description = description.split(' ',1)[1]
yield package, version, description.strip()
package = None
except IOError:
pass
if __name__ == '__main__':
for p in enumPlugins('enigma'):
print p
| gpl-2.0 |
StratusLab/client | api/code/src/test/python/ConfigHolderTest.py | 1 | 2592 | #
# Created as part of the StratusLab project (http://stratuslab.eu),
# co-funded by the European Commission under the Grant Agreement
# INFSO-RI-261552."
#
# Copyright (c) 2011, SixSq Sarl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from stratuslab.ConfigHolder import ConfigHolder
class ConfigHolderTest(unittest.TestCase):
def testCamelCase(self):
configHolder = ConfigHolder()
self.assertEqual('oneTwoThree', configHolder._camelCase('One_Two_Three'))
self.assertEqual('oneTwoThree', configHolder._camelCase('one_two_three'))
self.assertEqual('a', configHolder._camelCase('a'))
self.assertEqual('', configHolder._camelCase(''))
def testConfigToDict(self):
configHolder = ConfigHolder()
config = {'one_two_three': '123'}
key, value = configHolder._formatConfigKeys(config).items()[0]
self.assertEqual('oneTwoThree', key)
self.assertEqual('123', value)
def testCopy(self):
original = ConfigHolder({'a':'A'},{'b':'B'})
copy = original.copy()
copy.options['a'] = '_A'
copy.config['b'] = '_B'
self.assertEqual('A', original.options['a'])
self.assertEqual('B', original.config['b'])
def testToString(self):
configHolder = ConfigHolder({'a':'A'},{'b':'B'})
result = """* ConfigHolder:
** OPTIONS:
a = A
** CONFIG:
b = B
"""
self.assertEqual(str(configHolder), result)
def testGetterSetter(self):
configHolder = ConfigHolder({'a':'A'},{'b':'B'})
configHolder.c = 'C'
self.assertEqual('A', configHolder.a)
self.assertEqual('B', configHolder.b)
self.assertEqual('C', configHolder.c)
def testGetterSetterEmpty(self):
configHolder = ConfigHolder()
try:
configHolder.a
except AttributeError:
pass
else:
self.fail()
def testGetterSetterEmpty2(self):
self.assertRaises(TypeError, ConfigHolder, None, None)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/scipy/io/arff/arffread.py | 25 | 19838 | #! /usr/bin/env python
# Last Change: Mon Aug 20 08:00 PM 2007 J
from __future__ import division, print_function, absolute_import
import re
import itertools
import datetime
from functools import partial
import numpy as np
from scipy._lib.six import next
"""A module to read arff files."""
__all__ = ['MetaData', 'loadarff', 'ArffError', 'ParseArffError']
# An Arff file is basically two parts:
# - header
# - data
#
# A header has each of its components starting by @META where META is one of
# the keyword (attribute of relation, for now).
# TODO:
# - both integer and reals are treated as numeric -> the integer info
# is lost!
# - Replace ValueError by ParseError or something
# We know can handle the following:
# - numeric and nominal attributes
# - missing values for numeric attributes
r_meta = re.compile('^\s*@')
# Match a comment
r_comment = re.compile(r'^%')
# Match an empty line
r_empty = re.compile(r'^\s+$')
# Match a header line, that is a line which starts by @ + a word
r_headerline = re.compile(r'^@\S*')
r_datameta = re.compile(r'^@[Dd][Aa][Tt][Aa]')
r_relation = re.compile(r'^@[Rr][Ee][Ll][Aa][Tt][Ii][Oo][Nn]\s*(\S*)')
r_attribute = re.compile(r'^@[Aa][Tt][Tt][Rr][Ii][Bb][Uu][Tt][Ee]\s*(..*$)')
# To get attributes name enclosed with ''
r_comattrval = re.compile(r"'(..+)'\s+(..+$)")
# To get normal attributes
r_wcomattrval = re.compile(r"(\S+)\s+(..+$)")
#-------------------------
# Module defined exception
#-------------------------
class ArffError(IOError):
pass
class ParseArffError(ArffError):
pass
#------------------
# Various utilities
#------------------
# An attribute is defined as @attribute name value
def parse_type(attrtype):
"""Given an arff attribute value (meta data), returns its type.
Expect the value to be a name."""
uattribute = attrtype.lower().strip()
if uattribute[0] == '{':
return 'nominal'
elif uattribute[:len('real')] == 'real':
return 'numeric'
elif uattribute[:len('integer')] == 'integer':
return 'numeric'
elif uattribute[:len('numeric')] == 'numeric':
return 'numeric'
elif uattribute[:len('string')] == 'string':
return 'string'
elif uattribute[:len('relational')] == 'relational':
return 'relational'
elif uattribute[:len('date')] == 'date':
return 'date'
else:
raise ParseArffError("unknown attribute %s" % uattribute)
def get_nominal(attribute):
"""If attribute is nominal, returns a list of the values"""
return attribute.split(',')
def read_data_list(ofile):
"""Read each line of the iterable and put it in a list."""
data = [next(ofile)]
if data[0].strip()[0] == '{':
raise ValueError("This looks like a sparse ARFF: not supported yet")
data.extend([i for i in ofile])
return data
def get_ndata(ofile):
"""Read the whole file to get number of data attributes."""
data = [next(ofile)]
loc = 1
if data[0].strip()[0] == '{':
raise ValueError("This looks like a sparse ARFF: not supported yet")
for i in ofile:
loc += 1
return loc
def maxnomlen(atrv):
"""Given a string containing a nominal type definition, returns the
string len of the biggest component.
A nominal type is defined as seomthing framed between brace ({}).
Parameters
----------
atrv : str
Nominal type definition
Returns
-------
slen : int
length of longest component
Examples
--------
maxnomlen("{floup, bouga, fl, ratata}") returns 6 (the size of
ratata, the longest nominal value).
>>> maxnomlen("{floup, bouga, fl, ratata}")
6
"""
nomtp = get_nom_val(atrv)
return max(len(i) for i in nomtp)
def get_nom_val(atrv):
"""Given a string containing a nominal type, returns a tuple of the
possible values.
A nominal type is defined as something framed between braces ({}).
Parameters
----------
atrv : str
Nominal type definition
Returns
-------
poss_vals : tuple
possible values
Examples
--------
>>> get_nom_val("{floup, bouga, fl, ratata}")
('floup', 'bouga', 'fl', 'ratata')
"""
r_nominal = re.compile('{(.+)}')
m = r_nominal.match(atrv)
if m:
return tuple(i.strip() for i in m.group(1).split(','))
else:
raise ValueError("This does not look like a nominal string")
def get_date_format(atrv):
r_date = re.compile(r"[Dd][Aa][Tt][Ee]\s+[\"']?(.+?)[\"']?$")
m = r_date.match(atrv)
if m:
pattern = m.group(1).strip()
# convert time pattern from Java's SimpleDateFormat to C's format
datetime_unit = None
if "yyyy" in pattern:
pattern = pattern.replace("yyyy", "%Y")
datetime_unit = "Y"
elif "yy":
pattern = pattern.replace("yy", "%y")
datetime_unit = "Y"
if "MM" in pattern:
pattern = pattern.replace("MM", "%m")
datetime_unit = "M"
if "dd" in pattern:
pattern = pattern.replace("dd", "%d")
datetime_unit = "D"
if "HH" in pattern:
pattern = pattern.replace("HH", "%H")
datetime_unit = "h"
if "mm" in pattern:
pattern = pattern.replace("mm", "%M")
datetime_unit = "m"
if "ss" in pattern:
pattern = pattern.replace("ss", "%S")
datetime_unit = "s"
if "z" in pattern or "Z" in pattern:
raise ValueError("Date type attributes with time zone not "
"supported, yet")
if datetime_unit is None:
raise ValueError("Invalid or unsupported date format")
return pattern, datetime_unit
else:
raise ValueError("Invalid or no date format")
def go_data(ofile):
"""Skip header.
the first next() call of the returned iterator will be the @data line"""
return itertools.dropwhile(lambda x: not r_datameta.match(x), ofile)
#----------------
# Parsing header
#----------------
def tokenize_attribute(iterable, attribute):
"""Parse a raw string in header (eg starts by @attribute).
Given a raw string attribute, try to get the name and type of the
attribute. Constraints:
* The first line must start with @attribute (case insensitive, and
space like characters before @attribute are allowed)
* Works also if the attribute is spread on multilines.
* Works if empty lines or comments are in between
Parameters
----------
attribute : str
the attribute string.
Returns
-------
name : str
name of the attribute
value : str
value of the attribute
next : str
next line to be parsed
Examples
--------
If attribute is a string defined in python as r"floupi real", will
return floupi as name, and real as value.
>>> iterable = iter([0] * 10) # dummy iterator
>>> tokenize_attribute(iterable, r"@attribute floupi real")
('floupi', 'real', 0)
If attribute is r"'floupi 2' real", will return 'floupi 2' as name,
and real as value.
>>> tokenize_attribute(iterable, r" @attribute 'floupi 2' real ")
('floupi 2', 'real', 0)
"""
sattr = attribute.strip()
mattr = r_attribute.match(sattr)
if mattr:
# atrv is everything after @attribute
atrv = mattr.group(1)
if r_comattrval.match(atrv):
name, type = tokenize_single_comma(atrv)
next_item = next(iterable)
elif r_wcomattrval.match(atrv):
name, type = tokenize_single_wcomma(atrv)
next_item = next(iterable)
else:
# Not sure we should support this, as it does not seem supported by
# weka.
raise ValueError("multi line not supported yet")
#name, type, next_item = tokenize_multilines(iterable, atrv)
else:
raise ValueError("First line unparsable: %s" % sattr)
if type == 'relational':
raise ValueError("relational attributes not supported yet")
return name, type, next_item
def tokenize_single_comma(val):
# XXX we match twice the same string (here and at the caller level). It is
# stupid, but it is easier for now...
m = r_comattrval.match(val)
if m:
try:
name = m.group(1).strip()
type = m.group(2).strip()
except IndexError:
raise ValueError("Error while tokenizing attribute")
else:
raise ValueError("Error while tokenizing single %s" % val)
return name, type
def tokenize_single_wcomma(val):
# XXX we match twice the same string (here and at the caller level). It is
# stupid, but it is easier for now...
m = r_wcomattrval.match(val)
if m:
try:
name = m.group(1).strip()
type = m.group(2).strip()
except IndexError:
raise ValueError("Error while tokenizing attribute")
else:
raise ValueError("Error while tokenizing single %s" % val)
return name, type
def read_header(ofile):
"""Read the header of the iterable ofile."""
i = next(ofile)
# Pass first comments
while r_comment.match(i):
i = next(ofile)
# Header is everything up to DATA attribute ?
relation = None
attributes = []
while not r_datameta.match(i):
m = r_headerline.match(i)
if m:
isattr = r_attribute.match(i)
if isattr:
name, type, i = tokenize_attribute(ofile, i)
attributes.append((name, type))
else:
isrel = r_relation.match(i)
if isrel:
relation = isrel.group(1)
else:
raise ValueError("Error parsing line %s" % i)
i = next(ofile)
else:
i = next(ofile)
return relation, attributes
#--------------------
# Parsing actual data
#--------------------
def safe_float(x):
"""given a string x, convert it to a float. If the stripped string is a ?,
return a Nan (missing value).
Parameters
----------
x : str
string to convert
Returns
-------
f : float
where float can be nan
Examples
--------
>>> safe_float('1')
1.0
>>> safe_float('1\\n')
1.0
>>> safe_float('?\\n')
nan
"""
if '?' in x:
return np.nan
else:
return float(x)
def safe_nominal(value, pvalue):
svalue = value.strip()
if svalue in pvalue:
return svalue
elif svalue == '?':
return svalue
else:
raise ValueError("%s value not in %s" % (str(svalue), str(pvalue)))
def safe_date(value, date_format, datetime_unit):
date_str = value.strip().strip("'").strip('"')
if date_str == '?':
return np.datetime64('NaT', datetime_unit)
else:
dt = datetime.datetime.strptime(date_str, date_format)
return np.datetime64(dt).astype("datetime64[%s]" % datetime_unit)
class MetaData(object):
"""Small container to keep useful informations on a ARFF dataset.
Knows about attributes names and types.
Examples
--------
::
data, meta = loadarff('iris.arff')
# This will print the attributes names of the iris.arff dataset
for i in meta:
print i
# This works too
meta.names()
# Getting attribute type
types = meta.types()
Notes
-----
Also maintains the list of attributes in order, i.e. doing for i in
meta, where meta is an instance of MetaData, will return the
different attribute names in the order they were defined.
"""
def __init__(self, rel, attr):
self.name = rel
# We need the dictionary to be ordered
# XXX: may be better to implement an ordered dictionary
self._attributes = {}
self._attrnames = []
for name, value in attr:
tp = parse_type(value)
self._attrnames.append(name)
if tp == 'nominal':
self._attributes[name] = (tp, get_nom_val(value))
elif tp == 'date':
self._attributes[name] = (tp, get_date_format(value)[0])
else:
self._attributes[name] = (tp, None)
def __repr__(self):
msg = ""
msg += "Dataset: %s\n" % self.name
for i in self._attrnames:
msg += "\t%s's type is %s" % (i, self._attributes[i][0])
if self._attributes[i][1]:
msg += ", range is %s" % str(self._attributes[i][1])
msg += '\n'
return msg
def __iter__(self):
return iter(self._attrnames)
def __getitem__(self, key):
return self._attributes[key]
def names(self):
"""Return the list of attribute names."""
return self._attrnames
def types(self):
"""Return the list of attribute types."""
attr_types = [self._attributes[name][0] for name in self._attrnames]
return attr_types
def loadarff(f):
"""
Read an arff file.
The data is returned as a record array, which can be accessed much like
a dictionary of numpy arrays. For example, if one of the attributes is
called 'pressure', then its first 10 data points can be accessed from the
``data`` record array like so: ``data['pressure'][0:10]``
Parameters
----------
f : file-like or str
File-like object to read from, or filename to open.
Returns
-------
data : record array
The data of the arff file, accessible by attribute names.
meta : `MetaData`
Contains information about the arff file such as name and
type of attributes, the relation (name of the dataset), etc...
Raises
------
ParseArffError
This is raised if the given file is not ARFF-formatted.
NotImplementedError
The ARFF file has an attribute which is not supported yet.
Notes
-----
This function should be able to read most arff files. Not
implemented functionality include:
* date type attributes
* string type attributes
It can read files with numeric and nominal attributes. It cannot read
files with sparse data ({} in the file). However, this function can
read files with missing data (? in the file), representing the data
points as NaNs.
Examples
--------
>>> from scipy.io import arff
>>> from cStringIO import StringIO
>>> content = \"\"\"
... @relation foo
... @attribute width numeric
... @attribute height numeric
... @attribute color {red,green,blue,yellow,black}
... @data
... 5.0,3.25,blue
... 4.5,3.75,green
... 3.0,4.00,red
... \"\"\"
>>> f = StringIO(content)
>>> data, meta = arff.loadarff(f)
>>> data
array([(5.0, 3.25, 'blue'), (4.5, 3.75, 'green'), (3.0, 4.0, 'red')],
dtype=[('width', '<f8'), ('height', '<f8'), ('color', '|S6')])
>>> meta
Dataset: foo
\twidth's type is numeric
\theight's type is numeric
\tcolor's type is nominal, range is ('red', 'green', 'blue', 'yellow', 'black')
"""
if hasattr(f, 'read'):
ofile = f
else:
ofile = open(f, 'rt')
try:
return _loadarff(ofile)
finally:
if ofile is not f: # only close what we opened
ofile.close()
def _loadarff(ofile):
# Parse the header file
try:
rel, attr = read_header(ofile)
except ValueError as e:
msg = "Error while parsing header, error was: " + str(e)
raise ParseArffError(msg)
# Check whether we have a string attribute (not supported yet)
hasstr = False
for name, value in attr:
type = parse_type(value)
if type == 'string':
hasstr = True
meta = MetaData(rel, attr)
# XXX The following code is not great
# Build the type descriptor descr and the list of convertors to convert
# each attribute to the suitable type (which should match the one in
# descr).
# This can be used once we want to support integer as integer values and
# not as numeric anymore (using masked arrays ?).
acls2dtype = {'real': float, 'integer': float, 'numeric': float}
acls2conv = {'real': safe_float,
'integer': safe_float,
'numeric': safe_float}
descr = []
convertors = []
if not hasstr:
for name, value in attr:
type = parse_type(value)
if type == 'date':
date_format, datetime_unit = get_date_format(value)
descr.append((name, "datetime64[%s]" % datetime_unit))
convertors.append(partial(safe_date, date_format=date_format,
datetime_unit=datetime_unit))
elif type == 'nominal':
n = maxnomlen(value)
descr.append((name, 'S%d' % n))
pvalue = get_nom_val(value)
convertors.append(partial(safe_nominal, pvalue=pvalue))
else:
descr.append((name, acls2dtype[type]))
convertors.append(safe_float)
#dc.append(acls2conv[type])
#sdescr.append((name, acls2sdtype[type]))
else:
# How to support string efficiently ? Ideally, we should know the max
# size of the string before allocating the numpy array.
raise NotImplementedError("String attributes not supported yet, sorry")
ni = len(convertors)
def generator(row_iter, delim=','):
# TODO: this is where we are spending times (~80%). I think things
# could be made more efficiently:
# - We could for example "compile" the function, because some values
# do not change here.
# - The function to convert a line to dtyped values could also be
# generated on the fly from a string and be executed instead of
# looping.
# - The regex are overkill: for comments, checking that a line starts
# by % should be enough and faster, and for empty lines, same thing
# --> this does not seem to change anything.
# 'compiling' the range since it does not change
# Note, I have already tried zipping the converters and
# row elements and got slightly worse performance.
elems = list(range(ni))
for raw in row_iter:
# We do not abstract skipping comments and empty lines for
# performance reasons.
if r_comment.match(raw) or r_empty.match(raw):
continue
row = raw.split(delim)
yield tuple([convertors[i](row[i]) for i in elems])
a = generator(ofile)
# No error should happen here: it is a bug otherwise
data = np.fromiter(a, descr)
return data, meta
#-----
# Misc
#-----
def basic_stats(data):
nbfac = data.size * 1. / (data.size - 1)
return np.nanmin(data), np.nanmax(data), np.mean(data), np.std(data) * nbfac
def print_attribute(name, tp, data):
type = tp[0]
if type == 'numeric' or type == 'real' or type == 'integer':
min, max, mean, std = basic_stats(data)
print("%s,%s,%f,%f,%f,%f" % (name, type, min, max, mean, std))
else:
msg = name + ",{"
for i in range(len(tp[1])-1):
msg += tp[1][i] + ","
msg += tp[1][-1]
msg += "}"
print(msg)
def test_weka(filename):
data, meta = loadarff(filename)
print(len(data.dtype))
print(data.size)
for i in meta:
print_attribute(i, meta[i], data[i])
# make sure nose does not find this as a test
test_weka.__test__ = False
if __name__ == '__main__':
import sys
filename = sys.argv[1]
test_weka(filename)
| gpl-3.0 |
Stranger6667/pytest-browserstack | setup.py | 1 | 2618 | #!/usr/bin/env python
# coding: utf-8
import sys
import platform
from setuptools import setup
from setuptools.command.test import test as TestCommand
PYPY = hasattr(sys, 'pypy_translation_info')
PYPY3 = PYPY and sys.version_info[0] == 3
JYTHON = platform.system() == 'Java'
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', 'Arguments to pass into py.test')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
requirements = [
'browserstacker',
'pytest>=2.8.5',
'requests',
'selenium>=2.49.2',
]
test_requirements = []
if sys.version_info < (3, 3):
test_requirements.append('mock==1.0.1')
if sys.version_info[:2] == (3, 2):
test_requirements.append('coverage==3.7.1')
if not JYTHON:
test_requirements.append('pytest-cov==1.8')
setup(
name='pytest_browserstack',
version='0.1',
packages=['pytest_browserstack'],
url='https://github.com/Stranger6667/pytest-browserstack',
license='MIT',
author='Dmitry Dygalo',
author_email='[email protected]',
maintainer='Dmitry Dygalo',
maintainer_email='[email protected]',
description='Py.test plugin for BrowserStack',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: Implementation :: Jython',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: User Interfaces',
],
cmdclass={'test': PyTest},
include_package_data=True,
install_requires=requirements,
tests_require=test_requirements,
entry_points={
'pytest11': [
'pytest_browserstack = pytest_browserstack',
]
},
)
| mit |
honglioslo/NorwayIndexFlood | General10.py | 1 | 10720 | # -*- coding: latin_1 -*-
# General.py
#-----------------------------------------------------------------------------------
# Opprettet: 26.09.2008, IOPE
# Revidert:
#-----------------------------------------------------------------------------------
# Beskrivelse:
# Module for forkjellige generelle funksjoner
#-----------------------------------------------------------------------------------
# Kontekst: Generelt
#-----------------------------------------------------------------------------------
# Argumenter:
# <argument 1>: ScriptName, the name of the script this module is called from
# <argument 2>: logtekst, the text to be printed in the logfile
#-----------------------------------------------------------------------------------
# Resultater: logfile with the name of the script it is called from
#-----------------------------------------------------------------------------------
# Import system modules
import sys, os, smtplib,arcpy, time, ConfigParser
#***********************************************************************************
def Log(LogFile, logtekst):
try:
#---Open the logfile
if not arcpy.Exists(LogFile):
OpenLogFile = open(LogFile, 'w')
else:
OpenLogFile = open(LogFile, 'a')
#---Write tekst to the logfile
Time = time.strftime("%Y-%m-%d,%I:%M:%S", time.localtime())
OpenLogFile.write(Time+" ")
OpenLogFile.write(logtekst)
OpenLogFile.write("\n")
#---Close the logfile
OpenLogFile.close()
except:
#If an error occured print the message to the screen
print "an error has occurred in function " + "Log"
sys.exit(1)
#***********************************************************************************
def Kill(GeoDatasetList):
try:
for GeoDataset in GeoDatasetList:
if arcpy.Exists(GeoDataset):
arcpy.Delete_management(GeoDataset)
#print GeoDataset + " is pushing up the daisies"
if arcpy.Exists(GeoDataset + ".shp"):
arcpy.Delete_management(GeoDataset + ".shp")
#print GeoDataset + ".shp is pushing up the daisies"
except:
#If an error occured print the message to the screen
print "an error has occurred in function " + "Kill"
print arcpy.GetMessages()
sys.exit(2)
#*****************************************************************************
def sett_snapextent(lExtent, lRaster):
try:
dsc = arcpy.Describe(lRaster)
iCell = dsc.MeanCellWidth
xmin = round(float(lExtent.XMin) / iCell) * iCell
ymin = round(float(lExtent.YMin) / iCell) * iCell
xmax = round(float(lExtent.XMax) / iCell) * iCell
ymax = round(float(lExtent.YMax) / iCell) * iCell
extent = "%s %s %s %s" %(xmin,ymin,xmax,ymax)
return extent
except:
#If an error occured print the message to the screen
print "an error has occurred in function " + "sett_snapextent"
print arcpy.GetMessages()
sys.exit(3)
#***********************************************************************************
def FieldExist(Featureclass, FieldName):
try:
FieldList = arcpy.ListFields(Featureclass, FieldName)
FieldCount = len(FieldList)
if FieldCount == 1:
return True
else:
return False
except:
#If an error occured print the message to the screen
print "an error has occurred in function " + "FieldExist"
print arcpy.GetMessages()
return False
#***********************************************************************************
def NeededFieldWidth(InFeatureClass,FieldName,LogFile):
try:
MaxFieldWidth = 0
cur = arcpy.SearchCursor(InFeatureClass,"","",FieldName)
for row in cur:
if row.getValue(FieldName):
if len(row.getValue(FieldName)) > MaxFieldWidth:
MaxFieldWidth = len(row.getValue(FieldName))
del cur
print MaxFieldWidth
return MaxFieldWidth
except:
ErrorHandling(LogFile)
sys.exit(1)
#***********************************************************************************
def CheckWorkspaceForEmptyFeatureClasses(Workspace):
try:
# get the current workspace
CurrentWorkspace = arcpy.env.workspace
arcpy.env.workspace = Workspace
EmptyFeatureClassList = []
for FeatureClass in arcpy.ListFeatureClasses():
if arcpy.GetCount_management(FeatureClass).getOutput(0) == 0:
EmptyFeatureClassList.append(FeatureClass)
if len(EmptyFeatureClassList) > 0:
return True
else:
return False
except:
#If an error occured print the message to the screen
print "an error has occurred in function " + "CheckWorkspaceForEmptyFeatureClasses"
print arcpy.GetMessages()
return False
#***********************************************************************************
def StringIsAap(String):
try:
if String == "Aap":
return True
else:
return False
except:
#If an error occured print the message to the screen
print "an error has occurred in function " + "StringIsAap"
print arcpy.GetMessages()
return False
#***********************************************************************************
##def ReadIni(IniFile,Var):
## #-------------------------------------------------------------------#
## # Module trenger Navn til Inifile og variable #
## # Inifile må ser slikt ut: #
## # #
## # [Sectionname] #
## # variable:verdie eller variable = verdie #
## # #
## # Secions kan brukes f.e for å kategorisere i stier, filer, verdier #
## #-------------------------------------------------------------------#
## try:
## if not gp.exists(IniFile):
## print "%s does not exist" %(IniFile)
## sys.exit(2)
## else:
## var = Var.lower()
## ItemCheck = 0
## config = ConfigParser.ConfigParser()
## config.read(IniFile)
## # loop through all sections
## for section in config.sections():
## # and listet variables to find the value of Var
## for Item in config.items(section):
## if Item[0] == var:
## Item = config.get(section,var)
## ItemCheck = 1
## return Item
##
## if ItemCheck == 0:
## print "Variable %s does not exist in %s" %(Var,IniFile)
## sys.exit(2)
##
## except:
## #If an error occured print the message to the screen
## print "an error has occurred in function " + "ReadIni"
## print gp.GetMessages()
## sys.exit(1)
#**********************************************************************************
def ReadIni(IniFile,Var):
#-------------------------------------------------------------------#
# Module trenger Navn til Inifile og variable #
# Inifile må ser slikt ut: #
# #
# [Sectionname] #
# variable:verdie eller variable = verdie #
# #
# Secions kan brukes f.e for å kategorisere i stier, filer, verdier #
#-------------------------------------------------------------------#
try:
if not arcpy.Exists(IniFile):
print "%s does not exist" %(IniFile)
sys.exit(2)
else:
var = Var.lower()
ItemCheck = 0
config = ConfigParser.ConfigParser()
config.read(IniFile)
# loop through all sections"
for section in config.sections():
# and listet variables to find the value of Var
for Item in config.items(section):
if Item[0] == var:
Item = config.get(section,var)
ItemCheck = 1
#print Item
return Item
if ItemCheck == 0:
Item = "Variable %s does not exist in %s" %(Var,IniFile)
return Item
except:
#If an error occured print the message to the screen
print "an error has occurred in function " + "ReadIni"
print "OBS! sjekk om inifilen er en ren txt og ikke rft!!!!"
print arcpy.GetMessages()
sys.exit(1)
#***********************************************************************************
def Email(FromEmailAdress,ToEmailAdress,Subject,EmailText):
try:
TEXT = """
%s""" %(EmailText)
print TEXT
#----------- Prepare actual message --------------
msg = """\
From: %s
To: %s
Subject: %s
%s
""" % (FromEmailAdress, ToEmailAdress, Subject, TEXT)
# The actual mail send
# server = smtplib.SMTP("exch-post.nve.no")
server = smtplib.SMTP("smtput.nve.no")
server.sendmail(FromEmailAdress, ToEmailAdress, msg)
server.quit()
# -------------------------------------------------
except:
#If an error occured print the message to the screen
print "an error has occurred in function " + "Email"
print "if i remember correctly so works this only from GISEN"
sys.exit(1)
#***********************************************************************************
def ExtentSelected(lyrFeatures, LogFile):
try:
cur = arcpy.SearchCursor(lyrFeatures)
lstExt = [0,0,0,0]
for row in cur:
FeatureExt = row.Shape.Extent
if lstExt[0] > FeatureExt.XMin: lstExt[0] = FeatureExt.XMin
if lstExt[1] > FeatureExt.YMin: lstExt[1] = FeatureExt.YMin
if lstExt[2] < FeatureExt.XMax: lstExt[2] = FeatureExt.XMax
if lstExt[3] < FeatureExt.YMax: lstExt[3] = FeatureExt.YMax
del cur
extent = "%s %s %s %s" % tuple(lstExt)
return extent
except:
ErrorHandling(LogFile)
sys.exit(1)
| gpl-3.0 |
evanbiederstedt/RRBSfun | scripts/repeat_finder_scripts/faster_repeats/temp_RRBS_NormalBCD19pcell45_66.py | 1 | 1380 | import glob
import pandas as pd
import numpy as np
df1 = pd.read_csv("repeats_hg19.csv")
RRBS_files = glob.glob("RRBS_NormalBCD19pcell45_66*")
df_dict = {group : df for group, df in df1.groupby(by="chr")}
# In[11]:
from numpy import nan
def between_range(row, group_dict):
# get sub dataframe from dictionary, if key not found return nan
subset = group_dict.get(row['chr'], None)
if subset is None:
return ''
# slice subset to get even smaller group
subset = subset.loc[subset['start'] <= row['start'], :]
# check if subset has any values
if subset.empty:
return ''
# now slice the second time and return only a series of 'labels'
subset = subset.loc[subset['end'] >= row['start'], 'repeat_class']
# now you need to decide what to do if there are more than one labels, i have only taken here the first one
# return the first label, if empty return nan
if subset.empty:
return ''
else:
return subset.iloc[0]
# In[12]:
from functools import partial
from time import time
between_range_partial = partial(between_range, group_dict=df_dict)
cols = ['chr', 'start']
for filename in RRBS_files:
df2 = pd.read_csv(filename, sep="\t")
labels = df2.loc[:, cols].apply(between_range_partial, axis=1)
df2["repeat_class"] = labels
df2.to_csv(filename, sep='\t', index=False)
| mit |
bright-sparks/wpull | wpull/item.py | 1 | 8298 | # encoding=utf-8
'''URL items.'''
import collections
import gettext
import logging
from wpull.backport.logging import BraceMessage as __
from wpull.url import URLInfo
_ = gettext.gettext
_logger = logging.getLogger(__name__)
class Status(object):
'''URL status.'''
todo = 'todo'
'''The item has not yet been processed.'''
in_progress = 'in_progress'
'''The item is in progress of being processed.'''
done = 'done'
'''The item has been processed successfully.'''
error = 'error'
'''The item encountered an error during processing.'''
skipped = 'skipped'
'''The item was excluded from processing due to some rejection filters.'''
_URLRecordType = collections.namedtuple(
'URLRecordType',
[
'url',
'status',
'try_count',
'level',
'top_url',
'status_code',
'referrer',
'inline',
'link_type',
'post_data',
'filename',
]
)
class LinkType(object):
'''The type of contents that a link is expected to have.'''
html = 'html'
'''HTML document.'''
css = 'css'
'''Stylesheet file. Recursion on links is usually safe.'''
javascript = 'javascript'
'''JavaScript file. Possible to recurse links on this file.'''
media = 'media'
'''Image or video file. Recursion on this type will not be useful.'''
sitemap = 'sitemap'
'''A Sitemap.xml file.'''
file = 'file'
'''FTP File.'''
directory = 'directory'
'''FTP directory.'''
class URLRecord(_URLRecordType):
'''An entry in the URL table describing a URL to be downloaded.
Attributes:
url (str): The URL.
status (str): The status as specified from :class:`Status`.
try_count (int): The number of attempts on this URL.
level (int): The recursive depth of this URL. A level of ``0``
indicates the URL was initially supplied to the program (the
top URL).
Level ``1`` means the URL was linked from the top URL.
top_url (str): The earliest ancestor URL of this URL. The `top_url`
is typically the URL supplied at the start of the program.
status_code (int): The HTTP status code.
referrer (str): The parent URL that linked to this URL.
inline (int): Whether this URL was an embedded object (such as an
image or a stylesheet) of the parent URL.
The value represents the recursive depth of the object. For
example, an iframe is depth 1 and the images in the iframe
is depth 2.
link_type (str): Describes the document type. Values are:
* ``html``: HTML document
* ``css``: CSS document
post_data (str): If given, the URL should be fetched as a
POST request containing `post_data`.
filename (str): The path to where the file was saved.
'''
@property
def url_info(self):
'''Return an :class:`.url.URLInfo` for the ``url``.'''
return URLInfo.parse(self.url)
@property
def referrer_info(self):
'''Return an :class:`.url.URLInfo` for the ``referrer``.'''
return URLInfo.parse(self.referrer)
def to_dict(self):
'''Return the values as a ``dict``.
In addition to the attributes, it also includes the ``url_info`` and
``referrer_info`` properties converted to ``dict`` as well.
'''
return {
'url': self.url,
'status': self.status,
'url_info': self.url_info.to_dict(),
'try_count': self.try_count,
'level': self.level,
'top_url': self.top_url,
'status_code': self.status_code,
'referrer': self.referrer,
'referrer_info':
self.referrer_info.to_dict() if self.referrer else None,
'inline': self.inline,
'link_type': self.link_type,
'post_data': self.post_data,
'filename': self.filename,
}
class URLItem(object):
'''Item for a URL that needs to processed.'''
def __init__(self, url_table, url_info, url_record):
self._url_table = url_table
self._url_info = url_info
self._url_record = url_record
self._url = self._url_record.url
self._processed = False
self._try_count_incremented = False
@property
def url_info(self):
'''Return the :class:`.url.URLInfo`.'''
return self._url_info
@property
def url_record(self):
'''Return the :class:`URLRecord`.'''
return self._url_record
@property
def url_table(self):
'''Return the :class:`.database.URLTable`.'''
return self._url_table
@property
def is_processed(self):
'''Return whether the item has been processed.'''
return self._processed
def skip(self):
'''Mark the item as processed without download.'''
_logger.debug(__(_('Skipping ‘{url}’.'), url=self._url))
self._url_table.check_in(self._url, Status.skipped)
self._processed = True
def set_status(self, status, increment_try_count=True, filename=None):
'''Mark the item with the given status.
Args:
status (int): a value from :class:`Status`.
increment_try_count (bool): if True, increment the ``try_count``
value
'''
assert not self._try_count_incremented, (self._url, status)
if increment_try_count:
self._try_count_incremented = True
_logger.debug(__('Marking URL {0} status {1}.', self._url, status))
self._url_table.check_in(
self._url,
status,
increment_try_count=increment_try_count,
filename=filename,
)
self._processed = True
def set_value(self, **kwargs):
'''Set values for the URL in table.'''
self._url_table.update_one(self._url, **kwargs)
def add_child_url(self, url, inline=False, **kwargs):
'''Add a single URL as a child of this item.
See :meth:`add_child_urls` for argument details.
'''
self.add_child_urls([{'url': url}], inline=inline, **kwargs)
def add_child_urls(self, urls, inline=False, level=None, **kwargs):
'''Add links scraped from the document with automatic values.
Args:
urls: An iterable of `str` or `dict`. When a `str` is provided,
it is a URL. When a `dict` is provided, it is a mapping
of table column names to values.
inline (bool): Whether the URL is an embedded object. This
function automatically calculates the value needed for
the table column "inline".
kwargs: Additional column value to be apllied for all URLs
provided.
This function provides values automatically for:
* ``inline``
* ``level``
* ``referrer``
* ``top_url``
See also :meth:`.database.base.BaseSQLURLTable.add_many`.
'''
self._url_table.add_many(
[item if isinstance(item, dict) else {'url': item} for item in urls],
inline=(self._url_record.inline or 0) + 1 if inline else None,
level=self._url_record.level + 1 if level is None else level,
referrer=self._url_record.url,
top_url=self._url_record.top_url or self._url_record.url,
**kwargs
)
def child_url_record(self, url_info, inline=False,
link_type=None, post_data=None, level=None):
'''Return a child URLRecord.
This function is useful for testing filters before adding to table.
'''
return URLRecord(
url_info.url, # url
Status.todo, # status
0, # try_count
self._url_record.level + 1 if level is None else level, # level
self._url_record.top_url or self._url_record.url, # top_url
None, # status_code
self._url_record.url, # referrer
(self._url_record.inline or 0) + 1 if inline else 0, # inline
link_type, # link_type
post_data, # post_data
None # filename
)
| gpl-3.0 |
Mozu/mozu-python-sdk | mozurestsdk/commerce/catalog/admin/pricelists/pricelistentry.py | 1 | 8239 |
"""
This code was generated by Codezu.
Changes to this file may cause incorrect behavior and will be lost if
the code is regenerated.
"""
from mozurestsdk.mozuclient import default as default_client
from mozurestsdk.mozuurl import MozuUrl;
from mozurestsdk.urllocation import UrlLocation
from mozurestsdk.apicontext import ApiContext;
class PriceListEntry(object):
def __init__(self, apiContext: ApiContext = None, mozuClient = None):
self.client = mozuClient or default_client();
if (apiContext is not None):
self.client.withApiContext(apiContext);
else:
self.client.withApiContext(ApiContext());
def getPriceListEntry(self,priceListCode, productCode, currencyCode, startDate = None, responseFields = None):
""" Retrieves the details of a price list entry.
Args:
| priceListCode (string) - The unique code of the price list associated with the price list entry.
| productCode (string) - The unique, user-defined product code of a product, used throughout to reference and associate to a product.
| currencyCode (string) - The three character ISO currency code, such as USD for US Dollars.
| startDate (DateTime) - The start date of the price list entry.
| responseFields (string) - Filtering syntax appended to an API call to increase or decrease the amount of data returned inside a JSON object. This parameter should only be used to retrieve data. Attempting to update data using this parameter may cause data loss.
Returns:
| PriceListEntry
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/catalog/admin/pricelists/{priceListCode}/entries/{productCode}/{currencyCode}?startDate={startDate}&responseFields={responseFields}", "GET", UrlLocation.TenantPod, False);
url.formatUrl("currencyCode", currencyCode);
url.formatUrl("priceListCode", priceListCode);
url.formatUrl("productCode", productCode);
url.formatUrl("responseFields", responseFields);
url.formatUrl("startDate", startDate);
self.client.withResourceUrl(url).execute();
return self.client.result();
def getPriceListEntries(self,priceListCode, startIndex = None, pageSize = None, sortBy = None, filter = None, responseFields = None):
""" Retrieves a list of price list entries associated with the specified price list according to any specified facets, filter criteria, and sort options.
Args:
| priceListCode (string) - The unique code of the price list associated with the price list entry.
| startIndex (int) - When creating paged results from a query, this value indicates the zero-based offset in the complete result set where the returned entities begin. For example, with pageSize set to 25, to get the 51st through the 75th items, set this parameter to 50.
| pageSize (int) - When creating paged results from a query, this value indicates the zero-based offset in the complete result set where the returned entities begin. For example, with this parameter set to 25, to get the 51st through the 75th items, set startIndex to 50.
| sortBy (string) - The element to sort the results by and the channel in which the results appear. Either ascending (a-z) or descending (z-a) channel. Optional. Refer to [Sorting and Filtering](../../../../Developer/api-guides/sorting-filtering.htm) for more information.
| filter (string) - A set of filter expressions representing the search parameters for a query. This parameter is optional. Refer to [Sorting and Filtering](../../../../Developer/api-guides/sorting-filtering.htm) for a list of supported filters.
| responseFields (string) - Filtering syntax appended to an API call to increase or decrease the amount of data returned inside a JSON object. This parameter should only be used to retrieve data. Attempting to update data using this parameter may cause data loss.
Returns:
| PriceListEntryCollection
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/catalog/admin/pricelists/{priceListCode}/entries?startIndex={startIndex}&pageSize={pageSize}&sortBy={sortBy}&filter={filter}&responseFields={responseFields}", "GET", UrlLocation.TenantPod, False);
url.formatUrl("filter", filter);
url.formatUrl("pageSize", pageSize);
url.formatUrl("priceListCode", priceListCode);
url.formatUrl("responseFields", responseFields);
url.formatUrl("sortBy", sortBy);
url.formatUrl("startIndex", startIndex);
self.client.withResourceUrl(url).execute();
return self.client.result();
def addPriceListEntry(self,priceListEntryIn, priceListCode, responseFields = None):
""" Adds a new price list entry to the specified price list.
Args:
| priceListEntryIn(priceListEntryIn) - The details of the new price list entry.
| priceListCode (string) - The specified price list to which you want to add the price list entry.
| responseFields (string) - Filtering syntax appended to an API call to increase or decrease the amount of data returned inside a JSON object. This parameter should only be used to retrieve data. Attempting to update data using this parameter may cause data loss.
Returns:
| PriceListEntry
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/catalog/admin/pricelists/{priceListCode}/entries?responseFields={responseFields}", "POST", UrlLocation.TenantPod, False);
url.formatUrl("priceListCode", priceListCode);
url.formatUrl("responseFields", responseFields);
self.client.withResourceUrl(url).withBody(priceListEntryIn).execute();
return self.client.result();
def updatePriceListEntry(self,priceListEntryIn, priceListCode, productCode, currencyCode, startDate = None, responseFields = None):
""" Updates the details of a price list entry.
Args:
| priceListEntryIn(priceListEntryIn) - The updated details of the price list entry.
| priceListCode (string) - The unique code of the price list associated with the price list entry.
| productCode (string) - The unique, user-defined product code of a product, used throughout to reference and associate to a product.
| currencyCode (string) - The three character ISO currency code, such as USD for US Dollars.
| startDate (DateTime) - The start date of the price list entry.
| responseFields (string) - Filtering syntax appended to an API call to increase or decrease the amount of data returned inside a JSON object. This parameter should only be used to retrieve data. Attempting to update data using this parameter may cause data loss.
Returns:
| PriceListEntry
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/catalog/admin/pricelists/{priceListCode}/entries/{productCode}/{currencyCode}?startDate={startDate}&responseFields={responseFields}", "PUT", UrlLocation.TenantPod, False);
url.formatUrl("currencyCode", currencyCode);
url.formatUrl("priceListCode", priceListCode);
url.formatUrl("productCode", productCode);
url.formatUrl("responseFields", responseFields);
url.formatUrl("startDate", startDate);
self.client.withResourceUrl(url).withBody(priceListEntryIn).execute();
return self.client.result();
def deletePriceListEntry(self,priceListCode, productCode, currencyCode, startDate = None):
""" Deletes a price list entry.
Args:
| priceListCode (string) - The code of the specified price list associated with the price list entry.
| productCode (string) - The unique, user-defined product code of a product, used throughout to reference and associate to a product.
| currencyCode (string) - The three character ISO currency code, such as USD for US Dollars.
| startDate (DateTime) - The start date of the price list entry.
Raises:
| ApiException
"""
url = MozuUrl("/api/commerce/catalog/admin/pricelists/{priceListCode}/entries/{productCode}/{currencyCode}?startDate={startDate}", "DELETE", UrlLocation.TenantPod, False);
url.formatUrl("currencyCode", currencyCode);
url.formatUrl("priceListCode", priceListCode);
url.formatUrl("productCode", productCode);
url.formatUrl("startDate", startDate);
self.client.withResourceUrl(url).execute();
| apache-2.0 |
5outh/nanoscope-site | _site/bower_components/bootstrap/node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py | 372 | 89149 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
# TODO: figure out how to not build extra host objects in the non-cross-compile
# case when this is enabled, and enable unconditionally.
generator_supports_multiple_toolsets = (
os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target:
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here.
self.component_objs = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter:
def __init__(self, qualified_target, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.qualified_target = qualified_target
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
return None
if len(targets) > 1:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
self.target.actions_stamp or actions_depends)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRules(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
self.WriteMacInfoPlist(mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv(
'$!PRODUCT_DIR', config=self.config_name)
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'],
hashlib.md5(self.qualified_target).hexdigest())
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env=env)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetSortedXcodeEnv()
all_outputs = []
for rule in rules:
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'],
hashlib.md5(self.qualified_target).hexdigest())
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env=env)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if ('${%s}' % var) in argument:
needed_variables.add(var)
def cygwin_munge(path):
if is_cygwin:
return path.replace('\\', '/')
return path
# For each source file, write an edge that generates all the outputs.
for source in rule.get('rule_sources', []):
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
inputs = [self.ExpandRuleVariables(i, root, dirname,
source, ext, basename)
for i in rule.get('inputs', [])]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
inputs = [self.GypPathToNinja(i, env) for i in inputs]
outputs = [self.GypPathToNinja(o, env) for o in outputs]
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetSortedXcodeEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource')])
bundle_depends.append(output)
def WriteMacInfoPlist(self, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=config_name)
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
self.msvs_settings.GetArch(config_name) == 'x86' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
# Asm files only get auto assembled for x86 (not x64).
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
self.ninja.build(output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_base_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_base_name,
output, is_executable,
self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append('-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
gyp.common.uniquer(map(self.ExpandSpecial, ldflags)))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor == 'win':
extra_bindings.append(('binary', output))
if '/NOENTRY' not in ldflags:
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
output = [output, self.target.import_lib]
if pdbname:
output.append(pdbname)
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
elif self.flavor == 'win':
extra_bindings.append(('binary', output))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
if pdbname:
output = [output, pdbname]
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, restat=True,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.exe'
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.lib'
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.dll'
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
mem_limit = max(1, stat.ullTotalPhys / (4 * (2 ** 30))) # total / 4GB
hard_cap = max(1, int(os.getenv('GYP_LINK_CONCURRENCY_MAX', 2**32)))
# return min(mem_limit, hard_cap)
# TODO(scottmg): Temporary speculative fix for OOM on builders
# See http://crbug.com/333000.
return 2
elif sys.platform.startswith('linux'):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
use_separate_mspdbsrv = (
int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo $implibflag /DLL /OUT:$binary '
'@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo /OUT:$binary @$binary.rsp' %
(sys.executable, use_separate_mspdbsrv))
exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $binary' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$binary.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
# Overridden by local arch choice in the use_deps case.
# Chromium's ffmpeg c99conv.py currently looks for a 'cc =' line in
# build.ninja so needs something valid here. http://crbug.com/233985
cc = 'cl.exe'
cxx = 'cl.exe'
ld = 'link.exe'
ld_host = '$ld'
else:
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'CC':
cc = os.path.join(build_to_root, value)
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, OpenOutput)
for arch, path in cl_paths.iteritems():
master_ninja.variable(
'cl_' + arch, CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, flavor)))
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', 'lib.exe')
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('asm', 'ml.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], 'ar'))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], 'ar'))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $in',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ]; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then mv ${lib}.tmp ${lib}.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ readelf -d ${lib} | grep SONAME ; '
'nm -gD -f p ${lib} | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--start-group $in $solibs -Wl,--end-group '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in $solibs -Wl,--end-group $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch False '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; '
'else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then '
'mv ${lib}.tmp ${lib}.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '$in $solibs $libs$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
pool='link_pool')
solink_module_suffix = '$in $solibs $libs$postbuilds'
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_module_suffix,
'type': '-bundle'},
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_module_suffix, 'type': '-bundle'},
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $keys')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -af $in $out')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir)
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(qualified_target, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| apache-2.0 |
dockeroo/dockeroo | dockeroo/docker/gentoo_diskimage.py | 1 | 7879 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, Giacomo Cariello. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dockeroo import BaseGroupRecipe
from dockeroo.docker import BaseDockerSubRecipe
from dockeroo.utils import string_as_bool
class GentooDiskImageSubRecipe(BaseDockerSubRecipe): # pylint: disable=too-many-instance-attributes
def initialize(self):
super(GentooDiskImageSubRecipe, self).initialize()
self.build_command = self.options.get('build-command', "/bin/freeze")
self.build_container = "{}_build".format(self.name)
self.build_image = self.options['build-image']
self.build_volumes_from = self.options.get('build-volumes-from', None)
self.build_script_user = self.options.get('build-script-user', None)
self.build_script_shell = self.options.get(
'build-script-shell', self.shell)
self.prepare_script = "#!{}\n{}".format(
self.build_script_shell, '\n'.join(
[_f for _f in
[x.strip() for x in
self.options.get('prepare-script').replace('$$', '$').splitlines()]
if _f])) if self.options.get('prepare-script', None) is not None else None
self.build_script = "#!{}\n{}".format(
self.build_script_shell, '\n'.join(
[_f for _f in
[x.strip() for x in
self.options.get('build-script').replace('$$', '$').splitlines()]
if _f])) if self.options.get('build-script', None) is not None else None
self.build_root = self.options['build-root']
self.base_image = self.options['base-image']
self.image_file = self.options['image-file']
self.platform = self.options.get('platform', self.engine.platform)
self.arch = self.options.get('arch', self.platform)
self.tty = string_as_bool(self.options.get('tty', False))
def install(self):
if self.platform != self.engine.platform:
if self.engine.machine is not None:
self.engine.machine.config_binfmt(self.platform)
else:
raise UserError("docker-machine is not defined but binfmt configuration is needed.")
self.engine.remove_container(self.build_container)
self.engine.create_container(self.build_container, self.build_image,
command=self.build_command,
privileged=True, tty=self.tty,
volumes_from=self.build_volumes_from)
self.engine.start_container(self.build_container)
if self.prepare_script:
self.engine.run_script(self.build_container, self.prepare_script,
shell=self.build_script_shell,
user=self.build_script_user)
self.engine.copy_image_to_container(
self.base_image, self.build_container, "/", dst=self.build_root)
if self.build_script:
self.engine.run_script(self.build_container, self.build_script,
shell=self.build_script_shell,
user=self.build_script_user)
self.recipe.mkdir(self.location)
self.engine.export_files(self.build_container, self.image_file, self.location)
self.engine.remove_container(self.build_container)
self.engine.clean_stale_images()
return self.mark_completed()
def update(self):
if self.is_image_updated(self.build_image) or \
self.is_image_updated(self.base_image):
return self.install()
else:
return (self.completed, )
def uninstall(self):
self.engine.remove_container(self.build_container)
class DockerGentooDiskImageRecipe(BaseGroupRecipe):
"""
This recipe executes the following tasks:
1. Creates a temporary container from **builder-image** docker image.
2. Executes **prepare-script** on the builder container.
3. Extracts **base-image** docker image into **build-root** folder.
4. Executes **build-script** on the builder container.
5. Extracts **image-file** from the builder container and saves it into **${:location}**.
.. describe:: Usage
The following example buildout part shows how to build a linux disk image
from a **base** image using a **builder** image produced with :py:class:`dockeroo.docker.gentoo_bootstrap.DockerGentooBootstrapRecipe`.
.. code-block:: ini
[disk-image]
recipe = dockeroo:docker.gentoo-diskimage
build-image = builder:latest
base-image = base:latest
build-root = /mnt/
image-file = /tmp/disk.img
prepare-script =
mkdir -p /tmp && dd if=/dev/zero of=${:image-file} bs=1M count=2048
parted -a optimal ${:image-file} mklabel msdos
parted -a optimal ${:image-file} unit mib mkpart primary fat32 1 131
parted -a optimal ${:image-file} set 1 boot on
parted -a optimal ${:image-file} unit mib mkpart primary linux-swap 131 643
parted -a optimal ${:image-file} unit mib mkpart primary ext2 643 100%
rm -f /dev/loop0; mknod /dev/loop0 b 7 0
rm -f /dev/loop0p1
rm -f /dev/loop0p2
rm -f /dev/loop0p3
losetup --show -P /dev/loop0 ${:image-file}
mknod /dev/loop0p1 b 259 0
mknod /dev/loop0p2 b 259 1
mknod /dev/loop0p3 b 259 2
mkfs.vfat -F 32 -n BOOT /dev/loop0p1
mkswap /dev/loop0p2
mkfs.ext4 -T small /dev/loop0p3
mount -t ext4 /dev/loop0p3 /mnt
mkdir -p /mnt/boot
mount -t vfat /dev/loop0p1 /mnt/boot
build-script =
umount /dev/loop0p1
umount /dev/loop0p3
losetup -d /dev/loop0 >/dev/null 2>&1
.. describe:: Configuration options
This recipe accepts the following options:
base-image
Docker image to use as base for disk creation.
build-command
Command to launch on builder container upon creation. Defaults to "/bin/freeze".
build-image
Docker image to use as builder.
build-root
Root folder where **base-image** is extracted.
build-script
This shell script is executed after **base-image** extraction.
build-script-shell
Shell to use for script execution. Defaults to "/bin/sh".
build-script-user
User which executes the **prepare-script** and **build-script**. If unset, docker default is applied.
build-volumes-from
Volumes to be mounted on build container upon creation.
image-file
Disk image file which is extracted from build container.
location
Path where disk image will be saved. Defaults to ${buildout:parts-directory}/${:name}.
machine-name
Docker machine where **build-image** and **base-image** reside.
Defaults to DOCKER_MACHINE_NAME environment variable or "default" if unset.
prepare-script
This shell script is executed before **base-image** extraction.
timeout
**docker** command timeout.
"""
subrecipe_class = GentooDiskImageSubRecipe
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.