repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
mansilladev/zulip | zerver/lib/context_managers.py | 120 | 1090 | """
Context managers, i.e. things you can use with the 'with' statement.
"""
from __future__ import absolute_import
import fcntl
import os
from contextlib import contextmanager
@contextmanager
def flock(lockfile, shared=False):
"""Lock a file object using flock(2) for the duration of a 'with' statement.
If shared is True, use a LOCK_SH lock, otherwise LOCK_EX."""
fcntl.flock(lockfile, fcntl.LOCK_SH if shared else fcntl.LOCK_EX)
try:
yield
finally:
fcntl.flock(lockfile, fcntl.LOCK_UN)
@contextmanager
def lockfile(filename, shared=False):
"""Lock a file using flock(2) for the duration of a 'with' statement.
If shared is True, use a LOCK_SH lock, otherwise LOCK_EX.
The file is given by name and will be created if it does not exist."""
if not os.path.exists(filename):
with open(filename, 'w') as lock:
lock.write('0')
# TODO: Can we just open the file for writing, and skip the above check?
with open(filename, 'r') as lock:
with flock(lock, shared=shared):
yield
| apache-2.0 |
MinchinWeb/topydo | test/DeleteCommandTest.py | 1 | 9064 | # Topydo - A todo.txt client written in Python.
# Copyright (C) 2014 - 2015 Bram Schoenmakers <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from six import u
from test.CommandTest import CommandTest
from topydo.lib.Config import config
from topydo.commands.DeleteCommand import DeleteCommand
from topydo.lib.TodoList import TodoList
from topydo.lib.TodoListBase import InvalidTodoException
def _yes_prompt(self):
return "y"
def _no_prompt(self):
return "n"
class DeleteCommandTest(CommandTest):
def setUp(self):
super(DeleteCommandTest, self).setUp()
todos = [
"Foo id:1",
"Bar p:1",
"a @test with due:2015-06-03",
"a @test with +project",
]
self.todolist = TodoList(todos)
def test_del1(self):
command = DeleteCommand(["1"], self.todolist, self.out, self.error, _no_prompt)
command.execute()
self.assertTrue(self.todolist.is_dirty())
self.assertEqual(self.todolist.todo(1).source(), "Bar")
self.assertEqual(self.output, "| 2| Bar p:1\nRemoved: Foo id:1\n")
self.assertEqual(self.errors, "")
def test_del1_regex(self):
command = DeleteCommand(["Foo"], self.todolist, self.out, self.error, _no_prompt)
command.execute()
self.assertTrue(self.todolist.is_dirty())
self.assertEqual(self.todolist.todo(1).source(), "Bar")
self.assertEqual(self.output, "| 2| Bar p:1\nRemoved: Foo id:1\n")
self.assertEqual(self.errors, "")
def test_del2(self):
command = DeleteCommand(["1"], self.todolist, self.out, self.error, _yes_prompt)
command.execute()
self.assertTrue(self.todolist.is_dirty())
self.assertEqual(self.todolist.count(), 2)
self.assertEqual(self.output, "| 2| Bar p:1\nRemoved: Bar\nRemoved: Foo\n")
self.assertEqual(self.errors, "")
def test_del3(self):
command = DeleteCommand(["-f", "1"], self.todolist, self.out, self.error, _yes_prompt)
command.execute()
self.assertTrue(self.todolist.is_dirty())
self.assertEqual(self.todolist.count(), 3) # force won't delete subtasks
self.assertEqual(self.output, "| 2| Bar p:1\nRemoved: Foo id:1\n")
self.assertEqual(self.errors, "")
def test_del4(self):
command = DeleteCommand(["--force", "1"], self.todolist, self.out, self.error, _yes_prompt)
command.execute()
self.assertTrue(self.todolist.is_dirty())
self.assertEqual(self.todolist.count(), 3) # force won't delete subtasks
self.assertEqual(self.output, "| 2| Bar p:1\nRemoved: Foo id:1\n")
self.assertEqual(self.errors, "")
def test_del5(self):
command = DeleteCommand(["2"], self.todolist, self.out, self.error)
command.execute()
self.assertTrue(self.todolist.is_dirty())
self.assertEqual(self.todolist.todo(1).source(), "Foo")
self.assertEqual(self.output, "Removed: Bar p:1\nThe following todo item(s) became active:\n| 1| Foo\n")
self.assertEqual(self.errors, "")
def test_del7(self):
command = DeleteCommand(["99"], self.todolist, self.out, self.error)
command.execute()
self.assertFalse(self.todolist.is_dirty())
self.assertEqual(self.output, "")
self.assertEqual(self.errors, "Invalid todo number given.\n")
def test_del8(self):
command = DeleteCommand(["A"], self.todolist, self.out, self.error)
command.execute()
self.assertFalse(self.todolist.is_dirty())
self.assertEqual(self.output, "")
self.assertEqual(self.errors, "Invalid todo number given.\n")
def test_del9(self):
""" Test deletion with textual IDs. """
config("test/data/todolist-uid.conf")
command = DeleteCommand(["8to"], self.todolist, self.out, self.error)
command.execute()
result = "Foo\na @test with due:2015-06-03\na @test with +project"
self.assertEqual(self.todolist.print_todos(), result)
self.assertRaises(InvalidTodoException, self.todolist.todo, 'b0n')
def test_multi_del1(self):
""" Test deletion of multiple items. """
command = DeleteCommand(["1", "2"], self.todolist, self.out, self.error, _no_prompt)
command.execute()
result = "a @test with due:2015-06-03\na @test with +project"
self.assertEqual(self.todolist.count(), 2)
self.assertEqual(self.todolist.print_todos(), result)
def test_multi_del2(self):
""" Test deletion of multiple items. """
command = DeleteCommand(["1", "2"], self.todolist, self.out, self.error, _yes_prompt)
command.execute()
result = "a @test with due:2015-06-03\na @test with +project"
self.assertEqual(self.todolist.count(), 2)
self.assertEqual(self.todolist.print_todos(), result)
def test_multi_del3(self):
""" Fail if any of supplied todo numbers is invalid. """
command = DeleteCommand(["99", "2"], self.todolist, self.out, self.error, _yes_prompt)
command.execute()
self.assertFalse(self.todolist.is_dirty())
self.assertEqual(self.output, "")
self.assertEqual(self.errors, "Invalid todo number given: 99.\n")
def test_multi_del4(self):
""" Check output when all supplied todo numbers are invalid. """
command = DeleteCommand(["99", "A"], self.todolist, self.out, self.error, _yes_prompt)
command.execute()
self.assertFalse(self.todolist.is_dirty())
self.assertEqual(self.output, "")
self.assertEqual(self.errors, "Invalid todo number given: 99.\nInvalid todo number given: A.\n")
def test_multi_del5(self):
""" Throw an error with invalid argument containing special characters. """
command = DeleteCommand([u("Fo\u00d3B\u0105r"), "Bar"], self.todolist, self.out, self.error, None)
command.execute()
self.assertFalse(self.todolist.is_dirty())
self.assertEqual(self.output, "")
self.assertEqual(self.errors, u("Invalid todo number given: Fo\u00d3B\u0105r.\n"))
def test_expr_del1(self):
command = DeleteCommand(["-e", "@test"], self.todolist, self.out, self.error, None)
command.execute()
result = "Removed: a @test with due:2015-06-03\nRemoved: a @test with +project\n"
self.assertTrue(self.todolist.is_dirty())
self.assertEqual(self.todolist.count(), 2)
self.assertEqual(self.output, result)
self.assertEqual(self.errors, "")
def test_expr_del2(self):
command = DeleteCommand(["-e", "@test", "due:2015-06-03"], self.todolist, self.out, self.error, None)
command.execute()
self.assertTrue(self.todolist.is_dirty())
self.assertEqual(self.output, "Removed: a @test with due:2015-06-03\n")
self.assertEqual(self.errors, "")
def test_expr_del3(self):
command = DeleteCommand(["-e", "@test", "due:2015-06-03", "+project"], self.todolist, self.out, self.error, None)
command.execute()
self.assertFalse(self.todolist.is_dirty())
def test_expr_del4(self):
""" Remove only relevant todo items. """
command = DeleteCommand(["-e", ""], self.todolist, self.out, self.error, None)
command.execute()
result = "Foo"
self.assertTrue(self.todolist.is_dirty())
self.assertEqual(self.todolist.count(), 1)
self.assertEqual(self.todolist.print_todos(), result)
def test_expr_del5(self):
""" Force deleting unrelevant items with additional -x flag. """
command = DeleteCommand(["-xe", ""], self.todolist, self.out, self.error, _yes_prompt)
command.execute()
self.assertTrue(self.todolist.is_dirty())
self.assertEqual(self.todolist.count(), 0)
def test_empty(self):
command = DeleteCommand([], self.todolist, self.out, self.error)
command.execute()
self.assertFalse(self.todolist.is_dirty())
self.assertFalse(self.output)
self.assertEqual(self.errors, command.usage() + "\n")
def test_help(self):
command = DeleteCommand(["help"], self.todolist, self.out, self.error)
command.execute()
self.assertEqual(self.output, "")
self.assertEqual(self.errors, command.usage() + "\n\n" + command.help() + "\n")
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
thomazs/geraldo | site/newsite/django_1_0/django/contrib/auth/tests/forms.py | 10 | 2992 |
FORM_TESTS = """
>>> from django.contrib.auth.models import User
>>> from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
>>> from django.contrib.auth.forms import PasswordChangeForm
The user already exists.
>>> user = User.objects.create_user("jsmith", "[email protected]", "test123")
>>> data = {
... 'username': 'jsmith',
... 'password1': 'test123',
... 'password2': 'test123',
... }
>>> form = UserCreationForm(data)
>>> form.is_valid()
False
>>> form["username"].errors
[u'A user with that username already exists.']
The username contains invalid data.
>>> data = {
... 'username': '[email protected]',
... 'password1': 'test123',
... 'password2': 'test123',
... }
>>> form = UserCreationForm(data)
>>> form.is_valid()
False
>>> form["username"].errors
[u'This value must contain only letters, numbers and underscores.']
The verification password is incorrect.
>>> data = {
... 'username': 'jsmith2',
... 'password1': 'test123',
... 'password2': 'test',
... }
>>> form = UserCreationForm(data)
>>> form.is_valid()
False
>>> form["password2"].errors
[u"The two password fields didn't match."]
The success case.
>>> data = {
... 'username': 'jsmith2',
... 'password1': 'test123',
... 'password2': 'test123',
... }
>>> form = UserCreationForm(data)
>>> form.is_valid()
True
>>> form.save()
<User: jsmith2>
The user submits an invalid username.
>>> data = {
... 'username': 'jsmith_does_not_exist',
... 'password': 'test123',
... }
>>> form = AuthenticationForm(None, data)
>>> form.is_valid()
False
>>> form.non_field_errors()
[u'Please enter a correct username and password. Note that both fields are case-sensitive.']
The user is inactive.
>>> data = {
... 'username': 'jsmith',
... 'password': 'test123',
... }
>>> user.is_active = False
>>> user.save()
>>> form = AuthenticationForm(None, data)
>>> form.is_valid()
False
>>> form.non_field_errors()
[u'This account is inactive.']
>>> user.is_active = True
>>> user.save()
The success case
>>> form = AuthenticationForm(None, data)
>>> form.is_valid()
True
>>> form.non_field_errors()
[]
The old password is incorrect.
>>> data = {
... 'old_password': 'test',
... 'new_password1': 'abc123',
... 'new_password2': 'abc123',
... }
>>> form = PasswordChangeForm(user, data)
>>> form.is_valid()
False
>>> form["old_password"].errors
[u'Your old password was entered incorrectly. Please enter it again.']
The two new passwords do not match.
>>> data = {
... 'old_password': 'test123',
... 'new_password1': 'abc123',
... 'new_password2': 'abc',
... }
>>> form = PasswordChangeForm(user, data)
>>> form.is_valid()
False
>>> form["new_password2"].errors
[u"The two password fields didn't match."]
The success case.
>>> data = {
... 'old_password': 'test123',
... 'new_password1': 'abc123',
... 'new_password2': 'abc123',
... }
>>> form = PasswordChangeForm(user, data)
>>> form.is_valid()
True
"""
| lgpl-3.0 |
yan12125/youtube-dl | youtube_dl/extractor/vrak.py | 61 | 2943 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .brightcove import BrightcoveNewIE
from ..utils import (
int_or_none,
parse_age_limit,
smuggle_url,
unescapeHTML,
)
class VrakIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?vrak\.tv/videos\?.*?\btarget=(?P<id>[\d.]+)'
_TEST = {
'url': 'http://www.vrak.tv/videos?target=1.2306782&filtre=emission&id=1.1806721',
'info_dict': {
'id': '5345661243001',
'ext': 'mp4',
'title': 'Obésité, film de hockey et Roseline Filion',
'timestamp': 1488492126,
'upload_date': '20170302',
'uploader_id': '2890187628001',
'creator': 'VRAK.TV',
'age_limit': 8,
'series': 'ALT (Actualité Légèrement Tordue)',
'episode': 'Obésité, film de hockey et Roseline Filion',
'tags': list,
},
'params': {
'skip_download': True,
},
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/2890187628001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<h\d\b[^>]+\bclass=["\']videoTitle["\'][^>]*>([^<]+)',
webpage, 'title', default=None) or self._og_search_title(webpage)
content = self._parse_json(
self._search_regex(
r'data-player-options-content=(["\'])(?P<content>{.+?})\1',
webpage, 'content', default='{}', group='content'),
video_id, transform_source=unescapeHTML)
ref_id = content.get('refId') or self._search_regex(
r'refId":"([^&]+)"', webpage, 'ref id')
brightcove_id = self._search_regex(
r'''(?x)
java\.lang\.String\s+value\s*=\s*["']brightcove\.article\.\d+\.%s
[^>]*
java\.lang\.String\s+value\s*=\s*["'](\d+)
''' % re.escape(ref_id), webpage, 'brightcove id')
return {
'_type': 'url_transparent',
'ie_key': BrightcoveNewIE.ie_key(),
'url': smuggle_url(
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
{'geo_countries': ['CA']}),
'id': brightcove_id,
'description': content.get('description'),
'creator': content.get('brand'),
'age_limit': parse_age_limit(content.get('rating')),
'series': content.get('showName') or content.get(
'episodeName'), # this is intentional
'season_number': int_or_none(content.get('seasonNumber')),
'episode': title,
'episode_number': int_or_none(content.get('episodeNumber')),
'tags': content.get('tags', []),
}
| unlicense |
ataylor32/django | django/db/transaction.py | 186 | 11823 | from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, Error, ProgrammingError, connections,
)
from django.utils.decorators import ContextDecorator
class TransactionManagementError(ProgrammingError):
"""
This exception is thrown when transaction management is used improperly.
"""
pass
def get_connection(using=None):
"""
Get a database connection by name, or the default database connection
if no name is provided. This is a private API.
"""
if using is None:
using = DEFAULT_DB_ALIAS
return connections[using]
def get_autocommit(using=None):
"""
Get the autocommit status of the connection.
"""
return get_connection(using).get_autocommit()
def set_autocommit(autocommit, using=None):
"""
Set the autocommit status of the connection.
"""
return get_connection(using).set_autocommit(autocommit)
def commit(using=None):
"""
Commits a transaction.
"""
get_connection(using).commit()
def rollback(using=None):
"""
Rolls back a transaction.
"""
get_connection(using).rollback()
def savepoint(using=None):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
return get_connection(using).savepoint()
def savepoint_rollback(sid, using=None):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_rollback(sid)
def savepoint_commit(sid, using=None):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_commit(sid)
def clean_savepoints(using=None):
"""
Resets the counter used to generate unique savepoint ids in this thread.
"""
get_connection(using).clean_savepoints()
def get_rollback(using=None):
"""
Gets the "needs rollback" flag -- for *advanced use* only.
"""
return get_connection(using).get_rollback()
def set_rollback(rollback, using=None):
"""
Sets or unsets the "needs rollback" flag -- for *advanced use* only.
When `rollback` is `True`, it triggers a rollback when exiting the
innermost enclosing atomic block that has `savepoint=True` (that's the
default). Use this to force a rollback without raising an exception.
When `rollback` is `False`, it prevents such a rollback. Use this only
after rolling back to a known-good state! Otherwise, you break the atomic
block and data corruption may occur.
"""
return get_connection(using).set_rollback(rollback)
def on_commit(func, using=None):
"""
Register `func` to be called when the current transaction is committed.
If the current transaction is rolled back, `func` will not be called.
"""
get_connection(using).on_commit(func)
#################################
# Decorators / context managers #
#################################
class Atomic(ContextDecorator):
"""
This class guarantees the atomic execution of a given block.
An instance can be used either as a decorator or as a context manager.
When it's used as a decorator, __call__ wraps the execution of the
decorated function in the instance itself, used as a context manager.
When it's used as a context manager, __enter__ creates a transaction or a
savepoint, depending on whether a transaction is already in progress, and
__exit__ commits the transaction or releases the savepoint on normal exit,
and rolls back the transaction or to the savepoint on exceptions.
It's possible to disable the creation of savepoints if the goal is to
ensure that some code runs within a transaction without creating overhead.
A stack of savepoints identifiers is maintained as an attribute of the
connection. None denotes the absence of a savepoint.
This allows reentrancy even if the same AtomicWrapper is reused. For
example, it's possible to define `oa = @atomic('other')` and use `@oa` or
`with oa:` multiple times.
Since database connections are thread-local, this is thread-safe.
This is a private API.
"""
def __init__(self, using, savepoint):
self.using = using
self.savepoint = savepoint
def __enter__(self):
connection = get_connection(self.using)
if not connection.in_atomic_block:
# Reset state when entering an outermost atomic block.
connection.commit_on_exit = True
connection.needs_rollback = False
if not connection.get_autocommit():
# Some database adapters (namely sqlite3) don't handle
# transactions and savepoints properly when autocommit is off.
# Turning autocommit back on isn't an option; it would trigger
# a premature commit. Give up if that happens.
if connection.features.autocommits_when_autocommit_is_off:
raise TransactionManagementError(
"Your database backend doesn't behave properly when "
"autocommit is off. Turn it on before using 'atomic'.")
# When entering an atomic block with autocommit turned off,
# Django should only use savepoints and shouldn't commit.
# This requires at least a savepoint for the outermost block.
if not self.savepoint:
raise TransactionManagementError(
"The outermost 'atomic' block cannot use "
"savepoint = False when autocommit is off.")
# Pretend we're already in an atomic block to bypass the code
# that disables autocommit to enter a transaction, and make a
# note to deal with this case in __exit__.
connection.in_atomic_block = True
connection.commit_on_exit = False
if connection.in_atomic_block:
# We're already in a transaction; create a savepoint, unless we
# were told not to or we're already waiting for a rollback. The
# second condition avoids creating useless savepoints and prevents
# overwriting needs_rollback until the rollback is performed.
if self.savepoint and not connection.needs_rollback:
sid = connection.savepoint()
connection.savepoint_ids.append(sid)
else:
connection.savepoint_ids.append(None)
else:
connection.set_autocommit(False, force_begin_transaction_with_broken_autocommit=True)
connection.in_atomic_block = True
def __exit__(self, exc_type, exc_value, traceback):
connection = get_connection(self.using)
if connection.savepoint_ids:
sid = connection.savepoint_ids.pop()
else:
# Prematurely unset this flag to allow using commit or rollback.
connection.in_atomic_block = False
try:
if connection.closed_in_transaction:
# The database will perform a rollback by itself.
# Wait until we exit the outermost block.
pass
elif exc_type is None and not connection.needs_rollback:
if connection.in_atomic_block:
# Release savepoint if there is one
if sid is not None:
try:
connection.savepoint_commit(sid)
except DatabaseError:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
raise
else:
# Commit transaction
try:
connection.commit()
except DatabaseError:
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
raise
else:
# This flag will be set to True again if there isn't a savepoint
# allowing to perform the rollback at this level.
connection.needs_rollback = False
if connection.in_atomic_block:
# Roll back to savepoint if there is one, mark for rollback
# otherwise.
if sid is None:
connection.needs_rollback = True
else:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
else:
# Roll back transaction
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
finally:
# Outermost block exit when autocommit was enabled.
if not connection.in_atomic_block:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.set_autocommit(True)
# Outermost block exit when autocommit was disabled.
elif not connection.savepoint_ids and not connection.commit_on_exit:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.in_atomic_block = False
def atomic(using=None, savepoint=True):
# Bare decorator: @atomic -- although the first argument is called
# `using`, it's actually the function being decorated.
if callable(using):
return Atomic(DEFAULT_DB_ALIAS, savepoint)(using)
# Decorator: @atomic(...) or context manager: with atomic(...): ...
else:
return Atomic(using, savepoint)
def _non_atomic_requests(view, using):
try:
view._non_atomic_requests.add(using)
except AttributeError:
view._non_atomic_requests = {using}
return view
def non_atomic_requests(using=None):
if callable(using):
return _non_atomic_requests(using, DEFAULT_DB_ALIAS)
else:
if using is None:
using = DEFAULT_DB_ALIAS
return lambda view: _non_atomic_requests(view, using)
| bsd-3-clause |
Architektor/PySnip | venv/lib/python2.7/site-packages/twisted/python/test/test_url.py | 3 | 29330 | # -*- test-case-name: twisted.python.test.test_url -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.url}.
"""
from __future__ import unicode_literals
from ..url import URL
unicode = type(u'')
from unittest import TestCase
theurl = "http://www.foo.com/a/nice/path/?zot=23&zut"
# Examples from RFC 3986 section 5.4, Reference Resolution Examples
relativeLinkBaseForRFC3986 = 'http://a/b/c/d;p?q'
relativeLinkTestsForRFC3986 = [
# "Normal"
#('g:h', 'g:h'), # Not supported: scheme with relative path
('g', 'http://a/b/c/g'),
('./g', 'http://a/b/c/g'),
('g/', 'http://a/b/c/g/'),
('/g', 'http://a/g'),
('//g', 'http://g'),
('?y', 'http://a/b/c/d;p?y'),
('g?y', 'http://a/b/c/g?y'),
('#s', 'http://a/b/c/d;p?q#s'),
('g#s', 'http://a/b/c/g#s'),
('g?y#s', 'http://a/b/c/g?y#s'),
(';x', 'http://a/b/c/;x'),
('g;x', 'http://a/b/c/g;x'),
('g;x?y#s', 'http://a/b/c/g;x?y#s'),
('', 'http://a/b/c/d;p?q'),
('.', 'http://a/b/c/'),
('./', 'http://a/b/c/'),
('..', 'http://a/b/'),
('../', 'http://a/b/'),
('../g', 'http://a/b/g'),
('../..', 'http://a/'),
('../../', 'http://a/'),
('../../g', 'http://a/g'),
# Abnormal examples
# ".." cannot be used to change the authority component of a URI.
('../../../g', 'http://a/g'),
('../../../../g', 'http://a/g'),
# Only include "." and ".." when they are only part of a larger segment,
# not by themselves.
('/./g', 'http://a/g'),
('/../g', 'http://a/g'),
('g.', 'http://a/b/c/g.'),
('.g', 'http://a/b/c/.g'),
('g..', 'http://a/b/c/g..'),
('..g', 'http://a/b/c/..g'),
# Unnecessary or nonsensical forms of "." and "..".
('./../g', 'http://a/b/g'),
('./g/.', 'http://a/b/c/g/'),
('g/./h', 'http://a/b/c/g/h'),
('g/../h', 'http://a/b/c/h'),
('g;x=1/./y', 'http://a/b/c/g;x=1/y'),
('g;x=1/../y', 'http://a/b/c/y'),
# Separating the reference's query and fragment components from the path.
('g?y/./x', 'http://a/b/c/g?y/./x'),
('g?y/../x', 'http://a/b/c/g?y/../x'),
('g#s/./x', 'http://a/b/c/g#s/./x'),
('g#s/../x', 'http://a/b/c/g#s/../x'),
# Not supported: scheme with relative path
#("http:g", "http:g"), # strict
#("http:g", "http://a/b/c/g"), # non-strict
]
_percentenc = lambda s: ''.join('%%%02X' % ord(c) for c in s)
class TestURL(TestCase):
"""
Tests for L{URL}.
"""
def assertUnicoded(self, u):
"""
The given L{URL}'s components should be L{unicode}.
@param u: The L{URL} to test.
"""
self.assertTrue(isinstance(u.scheme, unicode)
or u.scheme is None, repr(u))
self.assertTrue(isinstance(u.host, unicode)
or u.host is None, repr(u))
for seg in u.path:
self.assertTrue(isinstance(seg, unicode), repr(u))
for (k, v) in u.query:
self.assertTrue(isinstance(k, unicode), repr(u))
self.assertTrue(v is None or isinstance(v, unicode), repr(u))
self.assertTrue(isinstance(u.fragment, unicode), repr(u))
def assertURL(self, u, scheme, host, path, query,
fragment, port, userinfo=u''):
"""
The given L{URL} should have the given components.
@param u: The actual L{URL} to examine.
@param scheme: The expected scheme.
@param host: The expected host.
@param path: The expected path.
@param query: The expected query.
@param fragment: The expected fragment.
@param port: The expected port.
@param userinfo: The expected userinfo.
"""
actual = (u.scheme, u.host, u.path, u.query,
u.fragment, u.port, u.userinfo)
expected = (scheme, host, tuple(path), tuple(query),
fragment, port, u.userinfo)
self.assertEqual(actual, expected)
def test_initDefaults(self):
"""
L{URL} should have appropriate default values.
"""
def check(u):
self.assertUnicoded(u)
self.assertURL(u, u'http', u'', [], [], u'', 80, u'')
check(URL(u'http', u''))
check(URL(u'http', u'', [], []))
check(URL(u'http', u'', [], [], u''))
def test_init(self):
"""
L{URL} should accept L{unicode} parameters.
"""
u = URL(u's', u'h', [u'p'], [(u'k', u'v'), (u'k', None)], u'f')
self.assertUnicoded(u)
self.assertURL(u, u's', u'h', [u'p'], [(u'k', u'v'), (u'k', None)],
u'f', None)
self.assertURL(URL(u'http', u'\xe0', [u'\xe9'],
[(u'\u03bb', u'\u03c0')], u'\u22a5'),
u'http', u'\xe0', [u'\xe9'],
[(u'\u03bb', u'\u03c0')], u'\u22a5', 80)
def test_initPercent(self):
"""
L{URL} should accept (and not interpret) percent characters.
"""
u = URL(u's', u'%68', [u'%70'], [(u'%6B', u'%76'), (u'%6B', None)],
u'%66')
self.assertUnicoded(u)
self.assertURL(u,
u's', u'%68', [u'%70'],
[(u'%6B', u'%76'), (u'%6B', None)],
u'%66', None)
def test_repr(self):
"""
L{URL.__repr__} will display the canoncial form of the URL, wrapped in
a L{URL.fromText} invocation, so that it is C{eval}-able but still easy
to read.
"""
self.assertEqual(
repr(URL(scheme=u'http', host=u'foo', path=[u'bar'],
query=[(u'baz', None), (u'k', u'v')],
fragment=u'frob')),
"URL.fromText(%s)" % (repr(u"http://foo/bar?baz&k=v#frob"),)
)
def test_fromText(self):
"""
Round-tripping L{URL.fromText} with C{str} results in an equivalent
URL.
"""
urlpath = URL.fromText(theurl)
self.assertEqual(theurl, urlpath.asText())
def test_roundtrip(self):
"""
L{URL.asText} should invert L{URL.fromText}.
"""
tests = (
"http://localhost",
"http://localhost/",
"http://localhost/foo",
"http://localhost/foo/",
"http://localhost/foo!!bar/",
"http://localhost/foo%20bar/",
"http://localhost/foo%2Fbar/",
"http://localhost/foo?n",
"http://localhost/foo?n=v",
"http://localhost/foo?n=/a/b",
"http://example.com/foo!@$bar?b!@z=123",
"http://localhost/asd?a=asd%20sdf/345",
"http://(%2525)/(%2525)?(%2525)&(%2525)=(%2525)#(%2525)",
"http://(%C3%A9)/(%C3%A9)?(%C3%A9)&(%C3%A9)=(%C3%A9)#(%C3%A9)",
)
for test in tests:
result = URL.fromText(test).asText()
self.assertEqual(test, result)
def test_equality(self):
"""
Two URLs decoded using L{URL.fromText} will be equal (C{==}) if they
decoded same URL string, and unequal (C{!=}) if they decoded different
strings.
"""
urlpath = URL.fromText(theurl)
self.assertEqual(urlpath, URL.fromText(theurl))
self.assertNotEqual(
urlpath,
URL.fromText('ftp://www.anotherinvaliddomain.com/'
'foo/bar/baz/?zot=21&zut')
)
def test_fragmentEquality(self):
"""
An URL created with the empty string for a fragment compares equal
to an URL created with an unspecified fragment.
"""
self.assertEqual(URL(fragment=u''), URL())
self.assertEqual(URL.fromText(u"http://localhost/#"),
URL.fromText(u"http://localhost/"))
def test_child(self):
"""
L{URL.child} appends a new path segment, but does not affect the query
or fragment.
"""
urlpath = URL.fromText(theurl)
self.assertEqual("http://www.foo.com/a/nice/path/gong?zot=23&zut",
urlpath.child(u'gong').asText())
self.assertEqual("http://www.foo.com/a/nice/path/gong%2F?zot=23&zut",
urlpath.child(u'gong/').asText())
self.assertEqual(
"http://www.foo.com/a/nice/path/gong%2Fdouble?zot=23&zut",
urlpath.child(u'gong/double').asText()
)
self.assertEqual(
"http://www.foo.com/a/nice/path/gong%2Fdouble%2F?zot=23&zut",
urlpath.child(u'gong/double/').asText()
)
def test_multiChild(self):
"""
L{URL.child} receives multiple segments as C{*args} and appends each in
turn.
"""
self.assertEqual(URL.fromText('http://example.com/a/b')
.child('c', 'd', 'e').asText(),
'http://example.com/a/b/c/d/e')
def test_childInitRoot(self):
"""
L{URL.child} of a L{URL} without a path produces a L{URL} with a single
path segment.
"""
childURL = URL(host=u"www.foo.com").child(u"c")
self.assertEqual(childURL.rooted, True)
self.assertEqual("http://www.foo.com/c", childURL.asText())
def test_sibling(self):
"""
L{URL.sibling} of a L{URL} replaces the last path segment, but does not
affect the query or fragment.
"""
urlpath = URL.fromText(theurl)
self.assertEqual(
"http://www.foo.com/a/nice/path/sister?zot=23&zut",
urlpath.sibling(u'sister').asText()
)
# Use an url without trailing '/' to check child removal.
theurl2 = "http://www.foo.com/a/nice/path?zot=23&zut"
urlpath = URL.fromText(theurl2)
self.assertEqual(
"http://www.foo.com/a/nice/sister?zot=23&zut",
urlpath.sibling(u'sister').asText()
)
def test_click(self):
"""
L{URL.click} interprets the given string as a relative URI-reference
and returns a new L{URL} interpreting C{self} as the base absolute URI.
"""
urlpath = URL.fromText(theurl)
# A null uri should be valid (return here).
self.assertEqual("http://www.foo.com/a/nice/path/?zot=23&zut",
urlpath.click("").asText())
# A simple relative path remove the query.
self.assertEqual("http://www.foo.com/a/nice/path/click",
urlpath.click("click").asText())
# An absolute path replace path and query.
self.assertEqual("http://www.foo.com/click",
urlpath.click("/click").asText())
# Replace just the query.
self.assertEqual("http://www.foo.com/a/nice/path/?burp",
urlpath.click("?burp").asText())
# One full url to another should not generate '//' between authority.
# and path
self.assertNotIn("//foobar",
urlpath.click('http://www.foo.com/foobar').asText())
# From a url with no query clicking a url with a query, the query
# should be handled properly.
u = URL.fromText('http://www.foo.com/me/noquery')
self.assertEqual('http://www.foo.com/me/17?spam=158',
u.click('/me/17?spam=158').asText())
# Check that everything from the path onward is removed when the click
# link has no path.
u = URL.fromText('http://localhost/foo?abc=def')
self.assertEqual(u.click('http://www.python.org').asText(),
'http://www.python.org')
def test_clickRFC3986(self):
"""
L{URL.click} should correctly resolve the examples in RFC 3986.
"""
base = URL.fromText(relativeLinkBaseForRFC3986)
for (ref, expected) in relativeLinkTestsForRFC3986:
self.assertEqual(base.click(ref).asText(), expected)
def test_clickSchemeRelPath(self):
"""
L{URL.click} should not accept schemes with relative paths.
"""
base = URL.fromText(relativeLinkBaseForRFC3986)
self.assertRaises(NotImplementedError, base.click, 'g:h')
self.assertRaises(NotImplementedError, base.click, 'http:h')
def test_cloneUnchanged(self):
"""
Verify that L{URL.replace} doesn't change any of the arguments it
is passed.
"""
urlpath = URL.fromText('https://x:1/y?z=1#A')
self.assertEqual(
urlpath.replace(urlpath.scheme,
urlpath.host,
urlpath.path,
urlpath.query,
urlpath.fragment,
urlpath.port),
urlpath)
self.assertEqual(
urlpath.replace(),
urlpath)
def test_clickCollapse(self):
"""
L{URL.click} collapses C{.} and C{..} according to RFC 3986 section
5.2.4.
"""
tests = [
['http://localhost/', '.', 'http://localhost/'],
['http://localhost/', '..', 'http://localhost/'],
['http://localhost/a/b/c', '.', 'http://localhost/a/b/'],
['http://localhost/a/b/c', '..', 'http://localhost/a/'],
['http://localhost/a/b/c', './d/e', 'http://localhost/a/b/d/e'],
['http://localhost/a/b/c', '../d/e', 'http://localhost/a/d/e'],
['http://localhost/a/b/c', '/./d/e', 'http://localhost/d/e'],
['http://localhost/a/b/c', '/../d/e', 'http://localhost/d/e'],
['http://localhost/a/b/c/', '../../d/e/',
'http://localhost/a/d/e/'],
['http://localhost/a/./c', '../d/e', 'http://localhost/d/e'],
['http://localhost/a/./c/', '../d/e', 'http://localhost/a/d/e'],
['http://localhost/a/b/c/d', './e/../f/../g',
'http://localhost/a/b/c/g'],
['http://localhost/a/b/c', 'd//e', 'http://localhost/a/b/d//e'],
]
for start, click, expected in tests:
actual = URL.fromText(start).click(click).asText()
self.assertEqual(
actual,
expected,
"{start}.click({click}) => {actual} not {expected}".format(
start=start,
click=repr(click),
actual=actual,
expected=expected,
)
)
def test_queryAdd(self):
"""
L{URL.add} adds query parameters.
"""
self.assertEqual(
"http://www.foo.com/a/nice/path/?foo=bar",
URL.fromText("http://www.foo.com/a/nice/path/")
.add(u"foo", u"bar").asText())
self.assertEqual(
"http://www.foo.com/?foo=bar",
URL(host=u"www.foo.com").add(u"foo", u"bar")
.asText())
urlpath = URL.fromText(theurl)
self.assertEqual(
"http://www.foo.com/a/nice/path/?zot=23&zut&burp",
urlpath.add(u"burp").asText())
self.assertEqual(
"http://www.foo.com/a/nice/path/?zot=23&zut&burp=xxx",
urlpath.add(u"burp", u"xxx").asText())
self.assertEqual(
"http://www.foo.com/a/nice/path/?zot=23&zut&burp=xxx&zing",
urlpath.add(u"burp", u"xxx").add(u"zing").asText())
# Note the inversion!
self.assertEqual(
"http://www.foo.com/a/nice/path/?zot=23&zut&zing&burp=xxx",
urlpath.add(u"zing").add(u"burp", u"xxx").asText())
# Note the two values for the same name.
self.assertEqual(
"http://www.foo.com/a/nice/path/?zot=23&zut&burp=xxx&zot=32",
urlpath.add(u"burp", u"xxx").add(u"zot", u'32')
.asText())
def test_querySet(self):
"""
L{URL.set} replaces query parameters by name.
"""
urlpath = URL.fromText(theurl)
self.assertEqual(
"http://www.foo.com/a/nice/path/?zot=32&zut",
urlpath.set(u"zot", u'32').asText())
# Replace name without value with name/value and vice-versa.
self.assertEqual(
"http://www.foo.com/a/nice/path/?zot&zut=itworked",
urlpath.set(u"zot").set(u"zut", u"itworked").asText()
)
# Q: what happens when the query has two values and we replace?
# A: we replace both values with a single one
self.assertEqual(
"http://www.foo.com/a/nice/path/?zot=32&zut",
urlpath.add(u"zot", u"xxx").set(u"zot", u'32').asText()
)
def test_queryRemove(self):
"""
L{URL.remove} removes all instances of a query parameter.
"""
url = URL.fromText(u"https://example.com/a/b/?foo=1&bar=2&foo=3")
self.assertEqual(
url.remove(u"foo"),
URL.fromText(u"https://example.com/a/b/?bar=2")
)
def test_parseEqualSignInParamValue(self):
"""
Every C{=}-sign after the first in a query parameter is simply included
in the value of the parameter.
"""
u = URL.fromText('http://localhost/?=x=x=x')
self.assertEqual(u.get(u''), ['x=x=x'])
self.assertEqual(u.asText(), 'http://localhost/?=x%3Dx%3Dx')
u = URL.fromText('http://localhost/?foo=x=x=x&bar=y')
self.assertEqual(u.query, (('foo', 'x=x=x'),
('bar', 'y')))
self.assertEqual(u.asText(), 'http://localhost/?foo=x%3Dx%3Dx&bar=y')
def test_empty(self):
"""
An empty L{URL} should serialize as the empty string.
"""
self.assertEqual(URL().asText(), u'')
def test_justQueryText(self):
"""
An L{URL} with query text should serialize as just query text.
"""
u = URL(query=[(u"hello", u"world")])
self.assertEqual(u.asText(), u'?hello=world')
def test_identicalEqual(self):
"""
L{URL} compares equal to itself.
"""
u = URL.fromText('http://localhost/')
self.assertEqual(u, u)
def test_similarEqual(self):
"""
URLs with equivalent components should compare equal.
"""
u1 = URL.fromText('http://localhost/')
u2 = URL.fromText('http://localhost/')
self.assertEqual(u1, u2)
def test_differentNotEqual(self):
"""
L{URL}s that refer to different resources are both unequal (C{!=}) and
also not equal (not C{==}).
"""
u1 = URL.fromText('http://localhost/a')
u2 = URL.fromText('http://localhost/b')
self.assertFalse(u1 == u2, "%r != %r" % (u1, u2))
self.assertNotEqual(u1, u2)
def test_otherTypesNotEqual(self):
"""
L{URL} is not equal (C{==}) to other types.
"""
u = URL.fromText('http://localhost/')
self.assertFalse(u == 42, "URL must not equal a number.")
self.assertFalse(u == object(), "URL must not equal an object.")
self.assertNotEqual(u, 42)
self.assertNotEqual(u, object())
def test_identicalNotUnequal(self):
"""
Identical L{URL}s are not unequal (C{!=}) to each other.
"""
u = URL.fromText('http://localhost/')
self.assertFalse(u != u, "%r == itself" % u)
def test_similarNotUnequal(self):
"""
Structurally similar L{URL}s are not unequal (C{!=}) to each other.
"""
u1 = URL.fromText('http://localhost/')
u2 = URL.fromText('http://localhost/')
self.assertFalse(u1 != u2, "%r == %r" % (u1, u2))
def test_differentUnequal(self):
"""
Structurally different L{URL}s are unequal (C{!=}) to each other.
"""
u1 = URL.fromText('http://localhost/a')
u2 = URL.fromText('http://localhost/b')
self.assertTrue(u1 != u2, "%r == %r" % (u1, u2))
def test_otherTypesUnequal(self):
"""
L{URL} is unequal (C{!=}) to other types.
"""
u = URL.fromText('http://localhost/')
self.assertTrue(u != 42, "URL must differ from a number.")
self.assertTrue(u != object(), "URL must be differ from an object.")
def test_asURI(self):
"""
L{URL.asURI} produces an URI which converts any URI unicode encoding
into pure US-ASCII and returns a new L{URL}.
"""
unicodey = ('http://\N{LATIN SMALL LETTER E WITH ACUTE}.com/'
'\N{LATIN SMALL LETTER E}\N{COMBINING ACUTE ACCENT}'
'?\N{LATIN SMALL LETTER A}\N{COMBINING ACUTE ACCENT}='
'\N{LATIN SMALL LETTER I}\N{COMBINING ACUTE ACCENT}'
'#\N{LATIN SMALL LETTER U}\N{COMBINING ACUTE ACCENT}')
iri = URL.fromText(unicodey)
uri = iri.asURI()
self.assertEqual(iri.host, '\N{LATIN SMALL LETTER E WITH ACUTE}.com')
self.assertEqual(iri.path[0],
'\N{LATIN SMALL LETTER E}\N{COMBINING ACUTE ACCENT}')
self.assertEqual(iri.asText(), unicodey)
expectedURI = 'http://xn--9ca.com/%C3%A9?%C3%A1=%C3%AD#%C3%BA'
actualURI = uri.asText()
self.assertEqual(actualURI, expectedURI,
'%r != %r' % (actualURI, expectedURI))
def test_asIRI(self):
"""
L{URL.asIRI} decodes any percent-encoded text in the URI, making it
more suitable for reading by humans, and returns a new L{URL}.
"""
asciiish = 'http://xn--9ca.com/%C3%A9?%C3%A1=%C3%AD#%C3%BA'
uri = URL.fromText(asciiish)
iri = uri.asIRI()
self.assertEqual(uri.host, 'xn--9ca.com')
self.assertEqual(uri.path[0], '%C3%A9')
self.assertEqual(uri.asText(), asciiish)
expectedIRI = ('http://\N{LATIN SMALL LETTER E WITH ACUTE}.com/'
'\N{LATIN SMALL LETTER E WITH ACUTE}'
'?\N{LATIN SMALL LETTER A WITH ACUTE}='
'\N{LATIN SMALL LETTER I WITH ACUTE}'
'#\N{LATIN SMALL LETTER U WITH ACUTE}')
actualIRI = iri.asText()
self.assertEqual(actualIRI, expectedIRI,
'%r != %r' % (actualIRI, expectedIRI))
def test_badUTF8AsIRI(self):
"""
Bad UTF-8 in a path segment, query parameter, or fragment results in
that portion of the URI remaining percent-encoded in the IRI.
"""
urlWithBinary = 'http://xn--9ca.com/%00%FF/%C3%A9'
uri = URL.fromText(urlWithBinary)
iri = uri.asIRI()
expectedIRI = ('http://\N{LATIN SMALL LETTER E WITH ACUTE}.com/'
'%00%FF/'
'\N{LATIN SMALL LETTER E WITH ACUTE}')
actualIRI = iri.asText()
self.assertEqual(actualIRI, expectedIRI,
'%r != %r' % (actualIRI, expectedIRI))
def test_alreadyIRIAsIRI(self):
"""
A L{URL} composed of non-ASCII text will result in non-ASCII text.
"""
unicodey = ('http://\N{LATIN SMALL LETTER E WITH ACUTE}.com/'
'\N{LATIN SMALL LETTER E}\N{COMBINING ACUTE ACCENT}'
'?\N{LATIN SMALL LETTER A}\N{COMBINING ACUTE ACCENT}='
'\N{LATIN SMALL LETTER I}\N{COMBINING ACUTE ACCENT}'
'#\N{LATIN SMALL LETTER U}\N{COMBINING ACUTE ACCENT}')
iri = URL.fromText(unicodey)
alsoIRI = iri.asIRI()
self.assertEqual(alsoIRI.asText(), unicodey)
def test_alreadyURIAsURI(self):
"""
A L{URL} composed of encoded text will remain encoded.
"""
expectedURI = 'http://xn--9ca.com/%C3%A9?%C3%A1=%C3%AD#%C3%BA'
uri = URL.fromText(expectedURI)
actualURI = uri.asURI().asText()
self.assertEqual(actualURI, expectedURI)
def test_userinfo(self):
"""
L{URL.fromText} will parse the C{userinfo} portion of the URI
separately from the host and port.
"""
url = URL.fromText(
'http://someuser:[email protected]/some-segment@ignore'
)
self.assertEqual(url.authority(True),
'someuser:[email protected]')
self.assertEqual(url.authority(False), 'someuser:@example.com')
self.assertEqual(url.userinfo, 'someuser:somepassword')
self.assertEqual(url.user, 'someuser')
self.assertEqual(url.asText(),
'http://someuser:@example.com/some-segment@ignore')
self.assertEqual(
url.replace(userinfo=u"someuser").asText(),
'http://[email protected]/some-segment@ignore'
)
def test_portText(self):
"""
L{URL.fromText} parses custom port numbers as integers.
"""
portURL = URL.fromText(u"http://www.example.com:8080/")
self.assertEqual(portURL.port, 8080)
self.assertEqual(portURL.asText(), u"http://www.example.com:8080/")
def test_mailto(self):
"""
Although L{URL} instances are mainly for dealing with HTTP, other
schemes (such as C{mailto:}) should work as well. For example,
L{URL.fromText}/L{URL.asText} round-trips cleanly for a C{mailto:} URL
representing an email address.
"""
self.assertEqual(URL.fromText(u"mailto:[email protected]").asText(),
u"mailto:[email protected]")
def test_queryIterable(self):
"""
When a L{URL} is created with a C{query} argument, the C{query}
argument is converted into an N-tuple of 2-tuples.
"""
url = URL(query=[[u'alpha', u'beta']])
self.assertEqual(url.query, ((u'alpha', u'beta'),))
def test_pathIterable(self):
"""
When a L{URL} is created with a C{path} argument, the C{path} is
converted into a tuple.
"""
url = URL(path=[u'hello', u'world'])
self.assertEqual(url.path, (u'hello', u'world'))
def test_invalidArguments(self):
"""
Passing an argument of the wrong type to any of the constructor
arguments of L{URL} will raise a descriptive L{TypeError}.
L{URL} typechecks very aggressively to ensure that its constitutent
parts are all properly immutable and to prevent confusing errors when
bad data crops up in a method call long after the code that called the
constructor is off the stack.
"""
class Unexpected(object):
def __str__(self):
return "wrong"
def __repr__(self):
return "<unexpected>"
defaultExpectation = "unicode" if bytes is str else "str"
def assertRaised(raised, expectation, name):
self.assertEqual(str(raised.exception),
"expected {} for {}, got {}".format(
expectation,
name, "<unexpected>"))
def check(param, expectation=defaultExpectation):
with self.assertRaises(TypeError) as raised:
URL(**{param: Unexpected()})
assertRaised(raised, expectation, param)
check("scheme")
check("host")
check("fragment")
check("rooted", "bool")
check("userinfo")
check("port", "int or NoneType")
with self.assertRaises(TypeError) as raised:
URL(path=[Unexpected(),])
assertRaised(raised, defaultExpectation, "path segment")
with self.assertRaises(TypeError) as raised:
URL(query=[(u"name", Unexpected()),])
assertRaised(raised, defaultExpectation + " or NoneType",
"query parameter value")
with self.assertRaises(TypeError) as raised:
URL(query=[(Unexpected(), u"value"),])
assertRaised(raised, defaultExpectation, "query parameter name")
# No custom error message for this one, just want to make sure
# non-2-tuples don't get through.
with self.assertRaises(TypeError):
URL(query=[Unexpected()])
with self.assertRaises(ValueError):
URL(query=[(u'k', u'v', u'vv')])
with self.assertRaises(ValueError):
URL(query=[(u'k',)])
url = URL.fromText("https://valid.example.com/")
with self.assertRaises(TypeError) as raised:
url.child(Unexpected())
assertRaised(raised, defaultExpectation, "path segment")
with self.assertRaises(TypeError) as raised:
url.sibling(Unexpected())
assertRaised(raised, defaultExpectation, "path segment")
with self.assertRaises(TypeError) as raised:
url.click(Unexpected())
assertRaised(raised, defaultExpectation, "relative URL")
def test_technicallyTextIsIterableBut(self):
"""
Technically, L{str} (or L{unicode}, as appropriate) is iterable, but
C{URL(path="foo")} resulting in C{URL.fromText("f/o/o")} is never what
you want.
"""
with self.assertRaises(TypeError) as raised:
URL(path=u'foo')
self.assertEqual(
str(raised.exception),
"expected iterable of text for path, got text itself: {}"
.format(repr(u'foo'))
)
| gpl-3.0 |
alexryndin/ambari | ambari-server/src/test/python/TestYAMLUtils.py | 5 | 3715 | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from unittest import TestCase
from ambari_commons import yaml_utils
class TestYAMLUtils(TestCase):
def setUp(self):
pass
def test_convert_yaml_array(self):
expected_values = []
expected_values.append("c6401.ambari.apache.org")
values = yaml_utils.get_values_from_yaml_array("['c6401.ambari.apache.org']")
self.assertEquals(expected_values, values)
expected_values.append("c6402.ambari.apache.org")
values = yaml_utils.get_values_from_yaml_array("['c6401.ambari.apache.org', 'c6402.ambari.apache.org']")
self.assertEquals(expected_values, values)
values = yaml_utils.get_values_from_yaml_array('["c6401.ambari.apache.org", "c6402.ambari.apache.org"]')
self.assertEquals(expected_values, values)
values = yaml_utils.get_values_from_yaml_array('[\'c6401.ambari.apache.org\', "c6402.ambari.apache.org"]')
self.assertEquals(expected_values, values)
def test_yaml_property_escaping(self):
"""
Tests that YAML values are escaped with quotes properly when needed
"""
self.assertEquals("True", yaml_utils.escape_yaml_property("True"))
self.assertEquals("FALSE", yaml_utils.escape_yaml_property("FALSE"))
self.assertEquals("yes", yaml_utils.escape_yaml_property("yes"))
self.assertEquals("NO", yaml_utils.escape_yaml_property("NO"))
self.assertEquals("28", yaml_utils.escape_yaml_property("28"))
self.assertEquals("28.0", yaml_utils.escape_yaml_property("28.0"))
self.assertEquals("[a,b,c]", yaml_utils.escape_yaml_property("[a,b,c]"))
self.assertEquals("{ foo : bar }", yaml_utils.escape_yaml_property("{ foo : bar }"))
# some strings which should be escaped
self.assertEquals("'5f'", yaml_utils.escape_yaml_property("5f"))
self.assertEquals("'28.O'", yaml_utils.escape_yaml_property("28.O"))
self.assertEquals("'This is a test of a string'", yaml_utils.escape_yaml_property("This is a test of a string"))
# test maps
map = """
storm-cluster:
hosts:
[c6401.ambari.apache.org, c6402.ambari.apache.org, c6403-master.ambari.apache.org]
groups:
[hadoop, hadoop-secure]
"""
escaped_map = yaml_utils.escape_yaml_property(map)
self.assertTrue(escaped_map.startswith("\n"))
self.assertFalse("'" in escaped_map)
# try some weird but valid formatting
map = """
storm-cluster :
hosts :
[c6401.ambari.apache.org, c6402.ambari.apache.org, c6403-master.ambari.apache.org]
groups :
[hadoop!!!, hadoop-secure!!!!-----]
"""
escaped_map = yaml_utils.escape_yaml_property(map)
self.assertTrue(escaped_map.startswith("\n"))
self.assertFalse("'" in escaped_map)
# try some bad formatting - this is not a map
map = """ foo : bar :
[baz]"""
escaped_map = yaml_utils.escape_yaml_property(map)
self.assertFalse(escaped_map.startswith("\n"))
self.assertTrue("'" in escaped_map)
| apache-2.0 |
GiladE/birde | venv/lib/python2.7/site-packages/psycopg2/tests/test_connection.py | 39 | 39512 | #!/usr/bin/env python
# test_connection.py - unit test for connection attributes
#
# Copyright (C) 2008-2011 James Henstridge <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import os
import time
import threading
from operator import attrgetter
import psycopg2
import psycopg2.errorcodes
import psycopg2.extensions
from testutils import unittest, decorate_all_tests, skip_if_no_superuser
from testutils import skip_before_postgres, skip_after_postgres
from testutils import ConnectingTestCase, skip_if_tpc_disabled
from testutils import skip_if_windows
from testconfig import dsn, dbname
class ConnectionTests(ConnectingTestCase):
def test_closed_attribute(self):
conn = self.conn
self.assertEqual(conn.closed, False)
conn.close()
self.assertEqual(conn.closed, True)
def test_close_idempotent(self):
conn = self.conn
conn.close()
conn.close()
self.assert_(conn.closed)
def test_cursor_closed_attribute(self):
conn = self.conn
curs = conn.cursor()
self.assertEqual(curs.closed, False)
curs.close()
self.assertEqual(curs.closed, True)
# Closing the connection closes the cursor:
curs = conn.cursor()
conn.close()
self.assertEqual(curs.closed, True)
@skip_before_postgres(8, 4)
@skip_if_no_superuser
@skip_if_windows
def test_cleanup_on_badconn_close(self):
# ticket #148
conn = self.conn
cur = conn.cursor()
try:
cur.execute("select pg_terminate_backend(pg_backend_pid())")
except psycopg2.OperationalError, e:
if e.pgcode != psycopg2.errorcodes.ADMIN_SHUTDOWN:
raise
except psycopg2.DatabaseError, e:
# curiously when disconnected in green mode we get a DatabaseError
# without pgcode.
if e.pgcode is not None:
raise
self.assertEqual(conn.closed, 2)
conn.close()
self.assertEqual(conn.closed, 1)
def test_reset(self):
conn = self.conn
# switch isolation level, then reset
level = conn.isolation_level
conn.set_isolation_level(0)
self.assertEqual(conn.isolation_level, 0)
conn.reset()
# now the isolation level should be equal to saved one
self.assertEqual(conn.isolation_level, level)
def test_notices(self):
conn = self.conn
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
cur.execute("create temp table chatty (id serial primary key);")
self.assertEqual("CREATE TABLE", cur.statusmessage)
self.assert_(conn.notices)
def test_notices_consistent_order(self):
conn = self.conn
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
cur.execute("create temp table table1 (id serial); create temp table table2 (id serial);")
cur.execute("create temp table table3 (id serial); create temp table table4 (id serial);")
self.assertEqual(4, len(conn.notices))
self.assert_('table1' in conn.notices[0])
self.assert_('table2' in conn.notices[1])
self.assert_('table3' in conn.notices[2])
self.assert_('table4' in conn.notices[3])
def test_notices_limited(self):
conn = self.conn
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
for i in range(0, 100, 10):
sql = " ".join(["create temp table table%d (id serial);" % j for j in range(i, i+10)])
cur.execute(sql)
self.assertEqual(50, len(conn.notices))
self.assert_('table99' in conn.notices[-1], conn.notices[-1])
def test_server_version(self):
self.assert_(self.conn.server_version)
def test_protocol_version(self):
self.assert_(self.conn.protocol_version in (2,3),
self.conn.protocol_version)
def test_tpc_unsupported(self):
cnn = self.conn
if cnn.server_version >= 80100:
return self.skipTest("tpc is supported")
self.assertRaises(psycopg2.NotSupportedError,
cnn.xid, 42, "foo", "bar")
@skip_before_postgres(8, 2)
def test_concurrent_execution(self):
def slave():
cnn = self.connect()
cur = cnn.cursor()
cur.execute("select pg_sleep(4)")
cur.close()
cnn.close()
t1 = threading.Thread(target=slave)
t2 = threading.Thread(target=slave)
t0 = time.time()
t1.start()
t2.start()
t1.join()
t2.join()
self.assert_(time.time() - t0 < 7,
"something broken in concurrency")
def test_encoding_name(self):
self.conn.set_client_encoding("EUC_JP")
# conn.encoding is 'EUCJP' now.
cur = self.conn.cursor()
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE, cur)
cur.execute("select 'foo'::text;")
self.assertEqual(cur.fetchone()[0], u'foo')
def test_connect_nonnormal_envvar(self):
# We must perform encoding normalization at connection time
self.conn.close()
oldenc = os.environ.get('PGCLIENTENCODING')
os.environ['PGCLIENTENCODING'] = 'utf-8' # malformed spelling
try:
self.conn = self.connect()
finally:
if oldenc is not None:
os.environ['PGCLIENTENCODING'] = oldenc
else:
del os.environ['PGCLIENTENCODING']
def test_weakref(self):
from weakref import ref
import gc
conn = psycopg2.connect(dsn)
w = ref(conn)
conn.close()
del conn
gc.collect()
self.assert_(w() is None)
def test_commit_concurrency(self):
# The problem is the one reported in ticket #103. Because of bad
# status check, we commit even when a commit is already on its way.
# We can detect this condition by the warnings.
conn = self.conn
notices = []
stop = []
def committer():
while not stop:
conn.commit()
while conn.notices:
notices.append((2, conn.notices.pop()))
cur = conn.cursor()
t1 = threading.Thread(target=committer)
t1.start()
i = 1
for i in range(1000):
cur.execute("select %s;",(i,))
conn.commit()
while conn.notices:
notices.append((1, conn.notices.pop()))
# Stop the committer thread
stop.append(True)
self.assert_(not notices, "%d notices raised" % len(notices))
def test_connect_cursor_factory(self):
import psycopg2.extras
conn = self.connect(cursor_factory=psycopg2.extras.DictCursor)
cur = conn.cursor()
cur.execute("select 1 as a")
self.assertEqual(cur.fetchone()['a'], 1)
def test_cursor_factory(self):
self.assertEqual(self.conn.cursor_factory, None)
cur = self.conn.cursor()
cur.execute("select 1 as a")
self.assertRaises(TypeError, (lambda r: r['a']), cur.fetchone())
self.conn.cursor_factory = psycopg2.extras.DictCursor
self.assertEqual(self.conn.cursor_factory, psycopg2.extras.DictCursor)
cur = self.conn.cursor()
cur.execute("select 1 as a")
self.assertEqual(cur.fetchone()['a'], 1)
self.conn.cursor_factory = None
self.assertEqual(self.conn.cursor_factory, None)
cur = self.conn.cursor()
cur.execute("select 1 as a")
self.assertRaises(TypeError, (lambda r: r['a']), cur.fetchone())
def test_cursor_factory_none(self):
# issue #210
conn = self.connect()
cur = conn.cursor(cursor_factory=None)
self.assertEqual(type(cur), psycopg2.extensions.cursor)
conn = self.connect(cursor_factory=psycopg2.extras.DictCursor)
cur = conn.cursor(cursor_factory=None)
self.assertEqual(type(cur), psycopg2.extras.DictCursor)
def test_failed_init_status(self):
class SubConnection(psycopg2.extensions.connection):
def __init__(self, dsn):
try:
super(SubConnection, self).__init__(dsn)
except Exception:
pass
c = SubConnection("dbname=thereisnosuchdatabasemate password=foobar")
self.assert_(c.closed, "connection failed so it must be closed")
self.assert_('foobar' not in c.dsn, "password was not obscured")
class IsolationLevelsTestCase(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
conn = self.connect()
cur = conn.cursor()
try:
cur.execute("drop table isolevel;")
except psycopg2.ProgrammingError:
conn.rollback()
cur.execute("create table isolevel (id integer);")
conn.commit()
conn.close()
def test_isolation_level(self):
conn = self.connect()
self.assertEqual(
conn.isolation_level,
psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
def test_encoding(self):
conn = self.connect()
self.assert_(conn.encoding in psycopg2.extensions.encodings)
def test_set_isolation_level(self):
conn = self.connect()
curs = conn.cursor()
levels = [
(None, psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT),
('read uncommitted', psycopg2.extensions.ISOLATION_LEVEL_READ_UNCOMMITTED),
('read committed', psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED),
('repeatable read', psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ),
('serializable', psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE),
]
for name, level in levels:
conn.set_isolation_level(level)
# the only values available on prehistoric PG versions
if conn.server_version < 80000:
if level in (
psycopg2.extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ):
name, level = levels[levels.index((name, level)) + 1]
self.assertEqual(conn.isolation_level, level)
curs.execute('show transaction_isolation;')
got_name = curs.fetchone()[0]
if name is None:
curs.execute('show default_transaction_isolation;')
name = curs.fetchone()[0]
self.assertEqual(name, got_name)
conn.commit()
self.assertRaises(ValueError, conn.set_isolation_level, -1)
self.assertRaises(ValueError, conn.set_isolation_level, 5)
def test_set_isolation_level_abort(self):
conn = self.connect()
cur = conn.cursor()
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
cur.execute("insert into isolevel values (10);")
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_INTRANS,
conn.get_transaction_status())
conn.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
cur.execute("select count(*) from isolevel;")
self.assertEqual(0, cur.fetchone()[0])
cur.execute("insert into isolevel values (10);")
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_INTRANS,
conn.get_transaction_status())
conn.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
cur.execute("select count(*) from isolevel;")
self.assertEqual(0, cur.fetchone()[0])
cur.execute("insert into isolevel values (10);")
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
conn.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
cur.execute("select count(*) from isolevel;")
self.assertEqual(1, cur.fetchone()[0])
def test_isolation_level_autocommit(self):
cnn1 = self.connect()
cnn2 = self.connect()
cnn2.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cur1 = cnn1.cursor()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(0, cur1.fetchone()[0])
cnn1.commit()
cur2 = cnn2.cursor()
cur2.execute("insert into isolevel values (10);")
cur1.execute("select count(*) from isolevel;")
self.assertEqual(1, cur1.fetchone()[0])
def test_isolation_level_read_committed(self):
cnn1 = self.connect()
cnn2 = self.connect()
cnn2.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
cur1 = cnn1.cursor()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(0, cur1.fetchone()[0])
cnn1.commit()
cur2 = cnn2.cursor()
cur2.execute("insert into isolevel values (10);")
cur1.execute("insert into isolevel values (20);")
cur2.execute("select count(*) from isolevel;")
self.assertEqual(1, cur2.fetchone()[0])
cnn1.commit()
cur2.execute("select count(*) from isolevel;")
self.assertEqual(2, cur2.fetchone()[0])
cur1.execute("select count(*) from isolevel;")
self.assertEqual(1, cur1.fetchone()[0])
cnn2.commit()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(2, cur1.fetchone()[0])
def test_isolation_level_serializable(self):
cnn1 = self.connect()
cnn2 = self.connect()
cnn2.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
cur1 = cnn1.cursor()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(0, cur1.fetchone()[0])
cnn1.commit()
cur2 = cnn2.cursor()
cur2.execute("insert into isolevel values (10);")
cur1.execute("insert into isolevel values (20);")
cur2.execute("select count(*) from isolevel;")
self.assertEqual(1, cur2.fetchone()[0])
cnn1.commit()
cur2.execute("select count(*) from isolevel;")
self.assertEqual(1, cur2.fetchone()[0])
cur1.execute("select count(*) from isolevel;")
self.assertEqual(1, cur1.fetchone()[0])
cnn2.commit()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(2, cur1.fetchone()[0])
cur2.execute("select count(*) from isolevel;")
self.assertEqual(2, cur2.fetchone()[0])
def test_isolation_level_closed(self):
cnn = self.connect()
cnn.close()
self.assertRaises(psycopg2.InterfaceError, getattr,
cnn, 'isolation_level')
self.assertRaises(psycopg2.InterfaceError,
cnn.set_isolation_level, 0)
self.assertRaises(psycopg2.InterfaceError,
cnn.set_isolation_level, 1)
class ConnectionTwoPhaseTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
self.make_test_table()
self.clear_test_xacts()
def tearDown(self):
self.clear_test_xacts()
ConnectingTestCase.tearDown(self)
def clear_test_xacts(self):
"""Rollback all the prepared transaction in the testing db."""
cnn = self.connect()
cnn.set_isolation_level(0)
cur = cnn.cursor()
try:
cur.execute(
"select gid from pg_prepared_xacts where database = %s",
(dbname,))
except psycopg2.ProgrammingError:
cnn.rollback()
cnn.close()
return
gids = [ r[0] for r in cur ]
for gid in gids:
cur.execute("rollback prepared %s;", (gid,))
cnn.close()
def make_test_table(self):
cnn = self.connect()
cur = cnn.cursor()
try:
cur.execute("DROP TABLE test_tpc;")
except psycopg2.ProgrammingError:
cnn.rollback()
cur.execute("CREATE TABLE test_tpc (data text);")
cnn.commit()
cnn.close()
def count_xacts(self):
"""Return the number of prepared xacts currently in the test db."""
cnn = self.connect()
cur = cnn.cursor()
cur.execute("""
select count(*) from pg_prepared_xacts
where database = %s;""",
(dbname,))
rv = cur.fetchone()[0]
cnn.close()
return rv
def count_test_records(self):
"""Return the number of records in the test table."""
cnn = self.connect()
cur = cnn.cursor()
cur.execute("select count(*) from test_tpc;")
rv = cur.fetchone()[0]
cnn.close()
return rv
def test_tpc_commit(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_PREPARED)
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_commit()
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(1, self.count_test_records())
def test_tpc_commit_one_phase(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit_1p');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_commit()
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(1, self.count_test_records())
def test_tpc_commit_recovered(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit_rec');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
cnn.close()
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
cnn.tpc_commit(xid)
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(1, self.count_test_records())
def test_tpc_rollback(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_rollback');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_PREPARED)
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_rollback()
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
def test_tpc_rollback_one_phase(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_rollback_1p');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_rollback()
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
def test_tpc_rollback_recovered(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit_rec');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
cnn.close()
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
cnn.tpc_rollback(xid)
self.assertEqual(cnn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
def test_status_after_recover(self):
cnn = self.connect()
self.assertEqual(psycopg2.extensions.STATUS_READY, cnn.status)
xns = cnn.tpc_recover()
self.assertEqual(psycopg2.extensions.STATUS_READY, cnn.status)
cur = cnn.cursor()
cur.execute("select 1")
self.assertEqual(psycopg2.extensions.STATUS_BEGIN, cnn.status)
xns = cnn.tpc_recover()
self.assertEqual(psycopg2.extensions.STATUS_BEGIN, cnn.status)
def test_recovered_xids(self):
# insert a few test xns
cnn = self.connect()
cnn.set_isolation_level(0)
cur = cnn.cursor()
cur.execute("begin; prepare transaction '1-foo';")
cur.execute("begin; prepare transaction '2-bar';")
# read the values to return
cur.execute("""
select gid, prepared, owner, database
from pg_prepared_xacts
where database = %s;""",
(dbname,))
okvals = cur.fetchall()
okvals.sort()
cnn = self.connect()
xids = cnn.tpc_recover()
xids = [ xid for xid in xids if xid.database == dbname ]
xids.sort(key=attrgetter('gtrid'))
# check the values returned
self.assertEqual(len(okvals), len(xids))
for (xid, (gid, prepared, owner, database)) in zip (xids, okvals):
self.assertEqual(xid.gtrid, gid)
self.assertEqual(xid.prepared, prepared)
self.assertEqual(xid.owner, owner)
self.assertEqual(xid.database, database)
def test_xid_encoding(self):
cnn = self.connect()
xid = cnn.xid(42, "gtrid", "bqual")
cnn.tpc_begin(xid)
cnn.tpc_prepare()
cnn = self.connect()
cur = cnn.cursor()
cur.execute("select gid from pg_prepared_xacts where database = %s;",
(dbname,))
self.assertEqual('42_Z3RyaWQ=_YnF1YWw=', cur.fetchone()[0])
def test_xid_roundtrip(self):
for fid, gtrid, bqual in [
(0, "", ""),
(42, "gtrid", "bqual"),
(0x7fffffff, "x" * 64, "y" * 64),
]:
cnn = self.connect()
xid = cnn.xid(fid, gtrid, bqual)
cnn.tpc_begin(xid)
cnn.tpc_prepare()
cnn.close()
cnn = self.connect()
xids = [ xid for xid in cnn.tpc_recover()
if xid.database == dbname ]
self.assertEqual(1, len(xids))
xid = xids[0]
self.assertEqual(xid.format_id, fid)
self.assertEqual(xid.gtrid, gtrid)
self.assertEqual(xid.bqual, bqual)
cnn.tpc_rollback(xid)
def test_unparsed_roundtrip(self):
for tid in [
'',
'hello, world!',
'x' * 199, # PostgreSQL's limit in transaction id length
]:
cnn = self.connect()
cnn.tpc_begin(tid)
cnn.tpc_prepare()
cnn.close()
cnn = self.connect()
xids = [ xid for xid in cnn.tpc_recover()
if xid.database == dbname ]
self.assertEqual(1, len(xids))
xid = xids[0]
self.assertEqual(xid.format_id, None)
self.assertEqual(xid.gtrid, tid)
self.assertEqual(xid.bqual, None)
cnn.tpc_rollback(xid)
def test_xid_construction(self):
from psycopg2.extensions import Xid
x1 = Xid(74, 'foo', 'bar')
self.assertEqual(74, x1.format_id)
self.assertEqual('foo', x1.gtrid)
self.assertEqual('bar', x1.bqual)
def test_xid_from_string(self):
from psycopg2.extensions import Xid
x2 = Xid.from_string('42_Z3RyaWQ=_YnF1YWw=')
self.assertEqual(42, x2.format_id)
self.assertEqual('gtrid', x2.gtrid)
self.assertEqual('bqual', x2.bqual)
x3 = Xid.from_string('99_xxx_yyy')
self.assertEqual(None, x3.format_id)
self.assertEqual('99_xxx_yyy', x3.gtrid)
self.assertEqual(None, x3.bqual)
def test_xid_to_string(self):
from psycopg2.extensions import Xid
x1 = Xid.from_string('42_Z3RyaWQ=_YnF1YWw=')
self.assertEqual(str(x1), '42_Z3RyaWQ=_YnF1YWw=')
x2 = Xid.from_string('99_xxx_yyy')
self.assertEqual(str(x2), '99_xxx_yyy')
def test_xid_unicode(self):
cnn = self.connect()
x1 = cnn.xid(10, u'uni', u'code')
cnn.tpc_begin(x1)
cnn.tpc_prepare()
cnn.reset()
xid = [ xid for xid in cnn.tpc_recover()
if xid.database == dbname ][0]
self.assertEqual(10, xid.format_id)
self.assertEqual('uni', xid.gtrid)
self.assertEqual('code', xid.bqual)
def test_xid_unicode_unparsed(self):
# We don't expect people shooting snowmen as transaction ids,
# so if something explodes in an encode error I don't mind.
# Let's just check uniconde is accepted as type.
cnn = self.connect()
cnn.set_client_encoding('utf8')
cnn.tpc_begin(u"transaction-id")
cnn.tpc_prepare()
cnn.reset()
xid = [ xid for xid in cnn.tpc_recover()
if xid.database == dbname ][0]
self.assertEqual(None, xid.format_id)
self.assertEqual('transaction-id', xid.gtrid)
self.assertEqual(None, xid.bqual)
def test_cancel_fails_prepared(self):
cnn = self.connect()
cnn.tpc_begin('cancel')
cnn.tpc_prepare()
self.assertRaises(psycopg2.ProgrammingError, cnn.cancel)
def test_tpc_recover_non_dbapi_connection(self):
from psycopg2.extras import RealDictConnection
cnn = self.connect(connection_factory=RealDictConnection)
cnn.tpc_begin('dict-connection')
cnn.tpc_prepare()
cnn.reset()
xids = cnn.tpc_recover()
xid = [ xid for xid in xids if xid.database == dbname ][0]
self.assertEqual(None, xid.format_id)
self.assertEqual('dict-connection', xid.gtrid)
self.assertEqual(None, xid.bqual)
decorate_all_tests(ConnectionTwoPhaseTests, skip_if_tpc_disabled)
class TransactionControlTests(ConnectingTestCase):
def test_closed(self):
self.conn.close()
self.assertRaises(psycopg2.InterfaceError,
self.conn.set_session,
psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
def test_not_in_transaction(self):
cur = self.conn.cursor()
cur.execute("select 1")
self.assertRaises(psycopg2.ProgrammingError,
self.conn.set_session,
psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
def test_set_isolation_level(self):
cur = self.conn.cursor()
self.conn.set_session(
psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.set_session(
psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ)
cur.execute("SHOW default_transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], 'repeatable read')
else:
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.set_session(
isolation_level=psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
self.conn.set_session(
isolation_level=psycopg2.extensions.ISOLATION_LEVEL_READ_UNCOMMITTED)
cur.execute("SHOW default_transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], 'read uncommitted')
else:
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
def test_set_isolation_level_str(self):
cur = self.conn.cursor()
self.conn.set_session("serializable")
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.set_session("repeatable read")
cur.execute("SHOW default_transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], 'repeatable read')
else:
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.set_session("read committed")
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
self.conn.set_session("read uncommitted")
cur.execute("SHOW default_transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], 'read uncommitted')
else:
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
def test_bad_isolation_level(self):
self.assertRaises(ValueError, self.conn.set_session, 0)
self.assertRaises(ValueError, self.conn.set_session, 5)
self.assertRaises(ValueError, self.conn.set_session, 'whatever')
def test_set_read_only(self):
cur = self.conn.cursor()
self.conn.set_session(readonly=True)
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
cur = self.conn.cursor()
self.conn.set_session(readonly=None)
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
self.conn.set_session(readonly=False)
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'off')
self.conn.rollback()
def test_set_default(self):
cur = self.conn.cursor()
cur.execute("SHOW default_transaction_isolation;")
default_isolevel = cur.fetchone()[0]
cur.execute("SHOW default_transaction_read_only;")
default_readonly = cur.fetchone()[0]
self.conn.rollback()
self.conn.set_session(isolation_level='serializable', readonly=True)
self.conn.set_session(isolation_level='default', readonly='default')
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], default_isolevel)
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], default_readonly)
@skip_before_postgres(9, 1)
def test_set_deferrable(self):
cur = self.conn.cursor()
self.conn.set_session(readonly=True, deferrable=True)
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
cur.execute("SHOW default_transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
cur.execute("SHOW default_transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
self.conn.set_session(deferrable=False)
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
cur.execute("SHOW default_transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'off')
self.conn.rollback()
@skip_after_postgres(9, 1)
def test_set_deferrable_error(self):
self.assertRaises(psycopg2.ProgrammingError,
self.conn.set_session, readonly=True, deferrable=True)
class AutocommitTests(ConnectingTestCase):
def test_closed(self):
self.conn.close()
self.assertRaises(psycopg2.InterfaceError,
setattr, self.conn, 'autocommit', True)
# The getter doesn't have a guard. We may change this in future
# to make it consistent with other methods; meanwhile let's just check
# it doesn't explode.
try:
self.assert_(self.conn.autocommit in (True, False))
except psycopg2.InterfaceError:
pass
def test_default_no_autocommit(self):
self.assert_(not self.conn.autocommit)
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
cur = self.conn.cursor()
cur.execute('select 1;')
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_BEGIN)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_INTRANS)
self.conn.rollback()
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
def test_set_autocommit(self):
self.conn.autocommit = True
self.assert_(self.conn.autocommit)
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
cur = self.conn.cursor()
cur.execute('select 1;')
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
self.conn.autocommit = False
self.assert_(not self.conn.autocommit)
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
cur.execute('select 1;')
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_BEGIN)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_INTRANS)
def test_set_intrans_error(self):
cur = self.conn.cursor()
cur.execute('select 1;')
self.assertRaises(psycopg2.ProgrammingError,
setattr, self.conn, 'autocommit', True)
def test_set_session_autocommit(self):
self.conn.set_session(autocommit=True)
self.assert_(self.conn.autocommit)
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
cur = self.conn.cursor()
cur.execute('select 1;')
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
self.conn.set_session(autocommit=False)
self.assert_(not self.conn.autocommit)
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
cur.execute('select 1;')
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_BEGIN)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_INTRANS)
self.conn.rollback()
self.conn.set_session('serializable', readonly=True, autocommit=True)
self.assert_(self.conn.autocommit)
cur.execute('select 1;')
self.assertEqual(self.conn.status, psycopg2.extensions.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
psycopg2.extensions.TRANSACTION_STATUS_IDLE)
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'serializable')
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| mit |
martydill/url_shortener | code/venv/lib/python2.7/site-packages/IPython/qt/console/pygments_highlighter.py | 5 | 8696 | # System library imports.
from IPython.external.qt import QtGui
from pygments.formatters.html import HtmlFormatter
from pygments.lexer import RegexLexer, _TokenType, Text, Error
from pygments.lexers import PythonLexer
from pygments.styles import get_style_by_name
# Local imports
from IPython.utils.py3compat import string_types
def get_tokens_unprocessed(self, text, stack=('root',)):
""" Split ``text`` into (tokentype, text) pairs.
Monkeypatched to store the final stack on the object itself.
The `text` parameter this gets passed is only the current line, so to
highlight things like multiline strings correctly, we need to retrieve
the state from the previous line (this is done in PygmentsHighlighter,
below), and use it to continue processing the current line.
"""
pos = 0
tokendefs = self._tokens
if hasattr(self, '_saved_state_stack'):
statestack = list(self._saved_state_stack)
else:
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if action is not None:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
pos += 1
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, u'\n'
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
self._saved_state_stack = list(statestack)
# Monkeypatch!
RegexLexer.get_tokens_unprocessed = get_tokens_unprocessed
class PygmentsBlockUserData(QtGui.QTextBlockUserData):
""" Storage for the user data associated with each line.
"""
syntax_stack = ('root',)
def __init__(self, **kwds):
for key, value in kwds.items():
setattr(self, key, value)
QtGui.QTextBlockUserData.__init__(self)
def __repr__(self):
attrs = ['syntax_stack']
kwds = ', '.join([ '%s=%r' % (attr, getattr(self, attr))
for attr in attrs ])
return 'PygmentsBlockUserData(%s)' % kwds
class PygmentsHighlighter(QtGui.QSyntaxHighlighter):
""" Syntax highlighter that uses Pygments for parsing. """
#---------------------------------------------------------------------------
# 'QSyntaxHighlighter' interface
#---------------------------------------------------------------------------
def __init__(self, parent, lexer=None):
super(PygmentsHighlighter, self).__init__(parent)
self._document = self.document()
self._formatter = HtmlFormatter(nowrap=True)
self._lexer = lexer if lexer else PythonLexer()
self.set_style('default')
def highlightBlock(self, string):
""" Highlight a block of text.
"""
prev_data = self.currentBlock().previous().userData()
if prev_data is not None:
self._lexer._saved_state_stack = prev_data.syntax_stack
elif hasattr(self._lexer, '_saved_state_stack'):
del self._lexer._saved_state_stack
# Lex the text using Pygments
index = 0
for token, text in self._lexer.get_tokens(string):
length = len(text)
self.setFormat(index, length, self._get_format(token))
index += length
if hasattr(self._lexer, '_saved_state_stack'):
data = PygmentsBlockUserData(
syntax_stack=self._lexer._saved_state_stack)
self.currentBlock().setUserData(data)
# Clean up for the next go-round.
del self._lexer._saved_state_stack
#---------------------------------------------------------------------------
# 'PygmentsHighlighter' interface
#---------------------------------------------------------------------------
def set_style(self, style):
""" Sets the style to the specified Pygments style.
"""
if isinstance(style, string_types):
style = get_style_by_name(style)
self._style = style
self._clear_caches()
def set_style_sheet(self, stylesheet):
""" Sets a CSS stylesheet. The classes in the stylesheet should
correspond to those generated by:
pygmentize -S <style> -f html
Note that 'set_style' and 'set_style_sheet' completely override each
other, i.e. they cannot be used in conjunction.
"""
self._document.setDefaultStyleSheet(stylesheet)
self._style = None
self._clear_caches()
#---------------------------------------------------------------------------
# Protected interface
#---------------------------------------------------------------------------
def _clear_caches(self):
""" Clear caches for brushes and formats.
"""
self._brushes = {}
self._formats = {}
def _get_format(self, token):
""" Returns a QTextCharFormat for token or None.
"""
if token in self._formats:
return self._formats[token]
if self._style is None:
result = self._get_format_from_document(token, self._document)
else:
result = self._get_format_from_style(token, self._style)
self._formats[token] = result
return result
def _get_format_from_document(self, token, document):
""" Returns a QTextCharFormat for token by
"""
code, html = next(self._formatter._format_lines([(token, u'dummy')]))
self._document.setHtml(html)
return QtGui.QTextCursor(self._document).charFormat()
def _get_format_from_style(self, token, style):
""" Returns a QTextCharFormat for token by reading a Pygments style.
"""
result = QtGui.QTextCharFormat()
for key, value in style.style_for_token(token).items():
if value:
if key == 'color':
result.setForeground(self._get_brush(value))
elif key == 'bgcolor':
result.setBackground(self._get_brush(value))
elif key == 'bold':
result.setFontWeight(QtGui.QFont.Bold)
elif key == 'italic':
result.setFontItalic(True)
elif key == 'underline':
result.setUnderlineStyle(
QtGui.QTextCharFormat.SingleUnderline)
elif key == 'sans':
result.setFontStyleHint(QtGui.QFont.SansSerif)
elif key == 'roman':
result.setFontStyleHint(QtGui.QFont.Times)
elif key == 'mono':
result.setFontStyleHint(QtGui.QFont.TypeWriter)
return result
def _get_brush(self, color):
""" Returns a brush for the color.
"""
result = self._brushes.get(color)
if result is None:
qcolor = self._get_color(color)
result = QtGui.QBrush(qcolor)
self._brushes[color] = result
return result
def _get_color(self, color):
""" Returns a QColor built from a Pygments color string.
"""
qcolor = QtGui.QColor()
qcolor.setRgb(int(color[:2], base=16),
int(color[2:4], base=16),
int(color[4:6], base=16))
return qcolor
| mit |
kbrebanov/ansible | lib/ansible/modules/cloud/docker/docker_container.py | 3 | 80183 | #!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_container
short_description: manage docker containers
description:
- Manage the life cycle of docker containers.
- Supports check mode. Run with --check and --diff to view config difference and list of actions to be taken.
version_added: "2.1"
options:
auto_remove:
description:
- enable auto-removal of the container on daemon side when the container's process exits
default: false
version_added: "2.4"
blkio_weight:
description:
- Block IO (relative weight), between 10 and 1000.
default: null
required: false
capabilities:
description:
- List of capabilities to add to the container.
default: null
required: false
cleanup:
description:
- Use with I(detach) to remove the container after successful execution.
default: false
required: false
version_added: "2.2"
command:
description:
- Command to execute when the container starts.
A command may be either a string or a list.
Prior to version 2.4, strings were split on commas.
default: null
required: false
cpu_period:
description:
- Limit CPU CFS (Completely Fair Scheduler) period
default: 0
required: false
cpu_quota:
description:
- Limit CPU CFS (Completely Fair Scheduler) quota
default: 0
required: false
cpuset_cpus:
description:
- CPUs in which to allow execution C(1,3) or C(1-3).
default: null
required: false
cpuset_mems:
description:
- Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1)
default: null
required: false
cpu_shares:
description:
- CPU shares (relative weight).
default: null
required: false
detach:
description:
- Enable detached mode to leave the container running in background.
If disabled, the task will reflect the status of the container run (failed if the command failed).
default: true
required: false
devices:
description:
- "List of host device bindings to add to the container. Each binding is a mapping expressed
in the format: <path_on_host>:<path_in_container>:<cgroup_permissions>"
default: null
required: false
dns_servers:
description:
- List of custom DNS servers.
default: null
required: false
dns_search_domains:
description:
- List of custom DNS search domains.
default: null
required: false
env:
description:
- Dictionary of key,value pairs.
default: null
required: false
env_file:
version_added: "2.2"
description:
- Path to a file containing environment variables I(FOO=BAR).
- If variable also present in C(env), then C(env) value will override.
- Requires docker-py >= 1.4.0.
default: null
required: false
entrypoint:
description:
- Command that overwrites the default ENTRYPOINT of the image.
default: null
required: false
etc_hosts:
description:
- Dict of host-to-IP mappings, where each host name is a key in the dictionary.
Each host name will be added to the container's /etc/hosts file.
default: null
required: false
exposed_ports:
description:
- List of additional container ports which informs Docker that the container
listens on the specified network ports at runtime.
If the port is already exposed using EXPOSE in a Dockerfile, it does not
need to be exposed again.
default: null
required: false
aliases:
- exposed
force_kill:
description:
- Use the kill command when stopping a running container.
default: false
required: false
groups:
description:
- List of additional group names and/or IDs that the container process will run as.
default: null
required: false
hostname:
description:
- Container hostname.
default: null
required: false
ignore_image:
description:
- When C(state) is I(present) or I(started) the module compares the configuration of an existing
container to requested configuration. The evaluation includes the image version. If
the image version in the registry does not match the container, the container will be
recreated. Stop this behavior by setting C(ignore_image) to I(True).
default: false
required: false
version_added: "2.2"
image:
description:
- Repository path and tag used to create the container. If an image is not found or pull is true, the image
will be pulled from the registry. If no tag is included, 'latest' will be used.
default: null
required: false
interactive:
description:
- Keep stdin open after a container is launched, even if not attached.
default: false
required: false
ipc_mode:
description:
- Set the IPC mode for the container. Can be one of 'container:<name|id>' to reuse another
container's IPC namespace or 'host' to use the host's IPC namespace within the container.
default: null
required: false
keep_volumes:
description:
- Retain volumes associated with a removed container.
default: true
required: false
kill_signal:
description:
- Override default signal used to kill a running container.
default: null
required: false
kernel_memory:
description:
- "Kernel memory limit (format: <number>[<unit>]). Number is a positive integer.
Unit can be one of b, k, m, or g. Minimum is 4M."
default: 0
required: false
labels:
description:
- Dictionary of key value pairs.
default: null
required: false
links:
description:
- List of name aliases for linked containers in the format C(container_name:alias)
default: null
required: false
log_driver:
description:
- Specify the logging driver. Docker uses json-file by default.
choices:
- none
- json-file
- syslog
- journald
- gelf
- fluentd
- awslogs
- splunk
default: null
required: false
log_options:
description:
- Dictionary of options specific to the chosen log_driver. See https://docs.docker.com/engine/admin/logging/overview/
for details.
required: false
default: null
mac_address:
description:
- Container MAC address (e.g. 92:d0:c6:0a:29:33)
default: null
required: false
memory:
description:
- "Memory limit (format: <number>[<unit>]). Number is a positive integer.
Unit can be one of b, k, m, or g"
default: 0
required: false
memory_reservation:
description:
- "Memory soft limit (format: <number>[<unit>]). Number is a positive integer.
Unit can be one of b, k, m, or g"
default: 0
required: false
memory_swap:
description:
- Total memory limit (memory + swap, format:<number>[<unit>]).
Number is a positive integer. Unit can be one of b, k, m, or g.
default: 0
required: false
memory_swappiness:
description:
- Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
default: 0
required: false
name:
description:
- Assign a name to a new container or match an existing container.
- When identifying an existing container name may be a name or a long or short container ID.
required: true
network_mode:
description:
- Connect the container to a network.
choices:
- bridge
- container:<name|id>
- host
- none
default: null
required: false
userns_mode:
description:
- User namespace to use
default: null
required: false
version_added: "2.5"
networks:
description:
- List of networks the container belongs to.
- Each network is a dict with keys C(name), C(ipv4_address), C(ipv6_address), C(links), C(aliases).
- For each network C(name) is required, all other keys are optional.
- If included, C(links) or C(aliases) are lists.
- For examples of the data structure and usage see EXAMPLES below.
- To remove a container from one or more networks, use the C(purge_networks) option.
default: null
required: false
version_added: "2.2"
oom_killer:
description:
- Whether or not to disable OOM Killer for the container.
default: false
required: false
oom_score_adj:
description:
- An integer value containing the score given to the container in order to tune OOM killer preferences.
default: 0
required: false
version_added: "2.2"
paused:
description:
- Use with the started state to pause running processes inside the container.
default: false
required: false
pid_mode:
description:
- Set the PID namespace mode for the container. Currently only supports 'host'.
default: null
required: false
privileged:
description:
- Give extended privileges to the container.
default: false
required: false
published_ports:
description:
- List of ports to publish from the container to the host.
- "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a
container port, 9000 is a host port, and 0.0.0.0 is a host interface."
- Container ports must be exposed either in the Dockerfile or via the C(expose) option.
- A value of all will publish all exposed container ports to random host ports, ignoring
any other mappings.
- If C(networks) parameter is provided, will inspect each network to see if there exists
a bridge network with optional parameter com.docker.network.bridge.host_binding_ipv4.
If such a network is found, then published ports where no host IP address is specified
will be bound to the host IP pointed to by com.docker.network.bridge.host_binding_ipv4.
Note that the first bridge network with a com.docker.network.bridge.host_binding_ipv4
value encountered in the list of C(networks) is the one that will be used.
aliases:
- ports
required: false
default: null
pull:
description:
- If true, always pull the latest version of an image. Otherwise, will only pull an image when missing.
default: false
required: false
purge_networks:
description:
- Remove the container from ALL networks not included in C(networks) parameter.
- Any default networks such as I(bridge), if not found in C(networks), will be removed as well.
default: false
required: false
version_added: "2.2"
read_only:
description:
- Mount the container's root file system as read-only.
default: false
required: false
recreate:
description:
- Use with present and started states to force the re-creation of an existing container.
default: false
required: false
restart:
description:
- Use with started state to force a matching container to be stopped and restarted.
default: false
required: false
restart_policy:
description:
- Container restart policy. Place quotes around I(no) option.
choices:
- always
- no
- on-failure
- unless-stopped
default: on-failure
required: false
restart_retries:
description:
- Use with restart policy to control maximum number of restart attempts.
default: 0
required: false
shm_size:
description:
- Size of `/dev/shm`. The format is `<number><unit>`. `number` must be greater than `0`.
Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes).
- Omitting the unit defaults to bytes. If you omit the size entirely, the system uses `64m`.
default: null
required: false
security_opts:
description:
- List of security options in the form of C("label:user:User")
default: null
required: false
state:
description:
- 'I(absent) - A container matching the specified name will be stopped and removed. Use force_kill to kill the container
rather than stopping it. Use keep_volumes to retain volumes associated with the removed container.'
- 'I(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no
container matches the name, a container will be created. If a container matches the name but the provided configuration
does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created
with the requested config. Image version will be taken into account when comparing configuration. To ignore image
version use the ignore_image option. Use the recreate option to force the re-creation of the matching container. Use
force_kill to kill the container rather than stopping it. Use keep_volumes to retain volumes associated with a removed
container.'
- 'I(started) - Asserts there is a running container matching the name and any provided configuration. If no container
matches the name, a container will be created and started. If a container matching the name is found but the
configuration does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed
and a new container will be created with the requested configuration and started. Image version will be taken into
account when comparing configuration. To ignore image version use the ignore_image option. Use recreate to always
re-create a matching container, even if it is running. Use restart to force a matching container to be stopped and
restarted. Use force_kill to kill a container rather than stopping it. Use keep_volumes to retain volumes associated
with a removed container.'
- 'I(stopped) - Asserts that the container is first I(present), and then if the container is running moves it to a stopped
state. Use force_kill to kill a container rather than stopping it.'
required: false
default: started
choices:
- absent
- present
- stopped
- started
stop_signal:
description:
- Override default signal used to stop the container.
default: null
required: false
stop_timeout:
description:
- Number of seconds to wait for the container to stop before sending SIGKILL.
required: false
default: null
trust_image_content:
description:
- If true, skip image verification.
default: false
required: false
tmpfs:
description:
- Mount a tmpfs directory
default: null
required: false
version_added: 2.4
tty:
description:
- Allocate a pseudo-TTY.
default: false
required: false
ulimits:
description:
- "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)"
default: null
required: false
sysctls:
description:
- Dictionary of key,value pairs.
default: null
required: false
version_added: 2.4
user:
description:
- Sets the username or UID used and optionally the groupname or GID for the specified command.
- "Can be [ user | user:group | uid | uid:gid | user:gid | uid:group ]"
default: null
required: false
uts:
description:
- Set the UTS namespace mode for the container.
default: null
required: false
volumes:
description:
- List of volumes to mount within the container.
- "Use docker CLI-style syntax: C(/host:/container[:mode])"
- You can specify a read mode for the mount with either C(ro) or C(rw).
- SELinux hosts can additionally use C(z) or C(Z) to use a shared or
private label for the volume.
default: null
required: false
volume_driver:
description:
- The container volume driver.
default: none
required: false
volumes_from:
description:
- List of container names or Ids to get volumes from.
default: null
required: false
working_dir:
description:
- Path to the working directory.
default: null
required: false
version_added: "2.4"
extends_documentation_fragment:
- docker
author:
- "Cove Schneider (@cove)"
- "Joshua Conner (@joshuaconner)"
- "Pavel Antonov (@softzilla)"
- "Thomas Steinbach (@ThomasSteinbach)"
- "Philippe Jandot (@zfil)"
- "Daan Oosterveld (@dusdanig)"
- "James Tanner (@jctanner)"
- "Chris Houseknecht (@chouseknecht)"
- "Kassian Sun (@kassiansun)"
requirements:
- "python >= 2.6"
- "docker-py >= 1.7.0"
- "Docker API >= 1.20"
'''
EXAMPLES = '''
- name: Create a data container
docker_container:
name: mydata
image: busybox
volumes:
- /data
- name: Re-create a redis container
docker_container:
name: myredis
image: redis
command: redis-server --appendonly yes
state: present
recreate: yes
exposed_ports:
- 6379
volumes_from:
- mydata
- name: Restart a container
docker_container:
name: myapplication
image: someuser/appimage
state: started
restart: yes
links:
- "myredis:aliasedredis"
devices:
- "/dev/sda:/dev/xvda:rwm"
ports:
- "8080:9000"
- "127.0.0.1:8081:9001/udp"
env:
SECRET_KEY: ssssh
- name: Container present
docker_container:
name: mycontainer
state: present
image: ubuntu:14.04
command: sleep infinity
- name: Stop a container
docker_container:
name: mycontainer
state: stopped
- name: Start 4 load-balanced containers
docker_container:
name: "container{{ item }}"
recreate: yes
image: someuser/anotherappimage
command: sleep 1d
with_sequence: count=4
- name: remove container
docker_container:
name: ohno
state: absent
- name: Syslogging output
docker_container:
name: myservice
image: busybox
log_driver: syslog
log_options:
syslog-address: tcp://my-syslog-server:514
syslog-facility: daemon
# NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for
# older docker installs, use "syslog-tag" instead
tag: myservice
- name: Create db container and connect to network
docker_container:
name: db_test
image: "postgres:latest"
networks:
- name: "{{ docker_network_name }}"
- name: Start container, connect to network and link
docker_container:
name: sleeper
image: ubuntu:14.04
networks:
- name: TestingNet
ipv4_address: "172.1.1.100"
aliases:
- sleepyzz
links:
- db_test:db
- name: TestingNet2
- name: Start a container with a command
docker_container:
name: sleepy
image: ubuntu:14.04
command: ["sleep", "infinity"]
- name: Add container to networks
docker_container:
name: sleepy
networks:
- name: TestingNet
ipv4_address: 172.1.1.18
links:
- sleeper
- name: TestingNet2
ipv4_address: 172.1.10.20
- name: Update network with aliases
docker_container:
name: sleepy
networks:
- name: TestingNet
aliases:
- sleepyz
- zzzz
- name: Remove container from one network
docker_container:
name: sleepy
networks:
- name: TestingNet2
purge_networks: yes
- name: Remove container from all networks
docker_container:
name: sleepy
purge_networks: yes
'''
RETURN = '''
docker_container:
description:
- Before 2.3 this was 'ansible_docker_container' but was renamed due to conflicts with the connection plugin.
- Facts representing the current state of the container. Matches the docker inspection output.
- Note that facts are not part of registered vars but accessible directly.
- Empty if C(state) is I(absent)
- If detached is I(False), will include Output attribute containing any output from container run.
returned: always
type: dict
sample: '{
"AppArmorProfile": "",
"Args": [],
"Config": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/usr/bin/supervisord"
],
"Domainname": "",
"Entrypoint": null,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"ExposedPorts": {
"443/tcp": {},
"80/tcp": {}
},
"Hostname": "8e47bf643eb9",
"Image": "lnmp_nginx:v1",
"Labels": {},
"OnBuild": null,
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": {
"/tmp/lnmp/nginx-sites/logs/": {}
},
...
}'
'''
import os
import re
import shlex
from ansible.module_utils.basic import human_to_bytes
from ansible.module_utils.docker_common import HAS_DOCKER_PY_2, AnsibleDockerClient, DockerBaseClass
from ansible.module_utils.six import string_types
try:
from docker import utils
if HAS_DOCKER_PY_2:
from docker.types import Ulimit, LogConfig
else:
from docker.utils.types import Ulimit, LogConfig
except:
# missing docker-py handled in ansible.module_utils.docker
pass
REQUIRES_CONVERSION_TO_BYTES = [
'memory',
'memory_reservation',
'memory_swap',
'shm_size'
]
VOLUME_PERMISSIONS = ('rw', 'ro', 'z', 'Z')
class TaskParameters(DockerBaseClass):
'''
Access and parse module parameters
'''
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.auto_remove = None
self.blkio_weight = None
self.capabilities = None
self.cleanup = None
self.command = None
self.cpu_period = None
self.cpu_quota = None
self.cpuset_cpus = None
self.cpuset_mems = None
self.cpu_shares = None
self.detach = None
self.debug = None
self.devices = None
self.dns_servers = None
self.dns_opts = None
self.dns_search_domains = None
self.env = None
self.env_file = None
self.entrypoint = None
self.etc_hosts = None
self.exposed_ports = None
self.force_kill = None
self.groups = None
self.hostname = None
self.ignore_image = None
self.image = None
self.interactive = None
self.ipc_mode = None
self.keep_volumes = None
self.kernel_memory = None
self.kill_signal = None
self.labels = None
self.links = None
self.log_driver = None
self.log_options = None
self.mac_address = None
self.memory = None
self.memory_reservation = None
self.memory_swap = None
self.memory_swappiness = None
self.name = None
self.network_mode = None
self.userns_mode = None
self.networks = None
self.oom_killer = None
self.oom_score_adj = None
self.paused = None
self.pid_mode = None
self.privileged = None
self.purge_networks = None
self.pull = None
self.read_only = None
self.recreate = None
self.restart = None
self.restart_retries = None
self.restart_policy = None
self.shm_size = None
self.security_opts = None
self.state = None
self.stop_signal = None
self.stop_timeout = None
self.tmpfs = None
self.trust_image_content = None
self.tty = None
self.user = None
self.uts = None
self.volumes = None
self.volume_binds = dict()
self.volumes_from = None
self.volume_driver = None
self.working_dir = None
for key, value in client.module.params.items():
setattr(self, key, value)
for param_name in REQUIRES_CONVERSION_TO_BYTES:
if client.module.params.get(param_name):
try:
setattr(self, param_name, human_to_bytes(client.module.params.get(param_name)))
except ValueError as exc:
self.fail("Failed to convert %s to bytes: %s" % (param_name, exc))
self.publish_all_ports = False
self.published_ports = self._parse_publish_ports()
if self.published_ports in ('all', 'ALL'):
self.publish_all_ports = True
self.published_ports = None
self.ports = self._parse_exposed_ports(self.published_ports)
self.log("expose ports:")
self.log(self.ports, pretty_print=True)
self.links = self._parse_links(self.links)
if self.volumes:
self.volumes = self._expand_host_paths()
self.tmpfs = self._parse_tmpfs()
self.env = self._get_environment()
self.ulimits = self._parse_ulimits()
self.sysctls = self._parse_sysctls()
self.log_config = self._parse_log_config()
self.exp_links = None
self.volume_binds = self._get_volume_binds(self.volumes)
self.log("volumes:")
self.log(self.volumes, pretty_print=True)
self.log("volume binds:")
self.log(self.volume_binds, pretty_print=True)
if self.networks:
for network in self.networks:
if not network.get('name'):
self.fail("Parameter error: network must have a name attribute.")
network['id'] = self._get_network_id(network['name'])
if not network['id']:
self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name'])
if network.get('links'):
network['links'] = self._parse_links(network['links'])
if self.entrypoint:
# convert from list to str.
self.entrypoint = ' '.join([str(x) for x in self.entrypoint])
if self.command:
# convert from list to str
if isinstance(self.command, list):
self.command = ' '.join([str(x) for x in self.command])
def fail(self, msg):
self.client.module.fail_json(msg=msg)
@property
def update_parameters(self):
'''
Returns parameters used to update a container
'''
update_parameters = dict(
blkio_weight='blkio_weight',
cpu_period='cpu_period',
cpu_quota='cpu_quota',
cpu_shares='cpu_shares',
cpuset_cpus='cpuset_cpus',
mem_limit='memory',
mem_reservation='memory_reservation',
memswap_limit='memory_swap',
kernel_memory='kernel_memory',
)
result = dict()
for key, value in update_parameters.items():
if getattr(self, value, None) is not None:
result[key] = getattr(self, value)
return result
@property
def create_parameters(self):
'''
Returns parameters used to create a container
'''
create_params = dict(
command='command',
hostname='hostname',
user='user',
detach='detach',
stdin_open='interactive',
tty='tty',
ports='ports',
environment='env',
name='name',
entrypoint='entrypoint',
cpu_shares='cpu_shares',
mac_address='mac_address',
labels='labels',
stop_signal='stop_signal',
volume_driver='volume_driver',
working_dir='working_dir',
)
result = dict(
host_config=self._host_config(),
volumes=self._get_mounts(),
)
for key, value in create_params.items():
if getattr(self, value, None) is not None:
result[key] = getattr(self, value)
return result
def _expand_host_paths(self):
new_vols = []
for vol in self.volumes:
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if re.match(r'[\.~]', host):
host = os.path.abspath(host)
new_vols.append("%s:%s:%s" % (host, container, mode))
continue
elif len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS and re.match(r'[\.~]', parts[0]):
host = os.path.abspath(parts[0])
new_vols.append("%s:%s:rw" % (host, parts[1]))
continue
new_vols.append(vol)
return new_vols
def _get_mounts(self):
'''
Return a list of container mounts.
:return:
'''
result = []
if self.volumes:
for vol in self.volumes:
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, _ = vol.split(':')
result.append(container)
continue
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
result.append(parts[1])
continue
result.append(vol)
self.log("mounts:")
self.log(result, pretty_print=True)
return result
def _host_config(self):
'''
Returns parameters used to create a HostConfig object
'''
host_config_params=dict(
port_bindings='published_ports',
publish_all_ports='publish_all_ports',
links='links',
privileged='privileged',
dns='dns_servers',
dns_search='dns_search_domains',
binds='volume_binds',
volumes_from='volumes_from',
network_mode='network_mode',
userns_mode='userns_mode',
cap_add='capabilities',
extra_hosts='etc_hosts',
read_only='read_only',
ipc_mode='ipc_mode',
security_opt='security_opts',
ulimits='ulimits',
sysctls='sysctls',
log_config='log_config',
mem_limit='memory',
memswap_limit='memory_swap',
mem_swappiness='memory_swappiness',
oom_score_adj='oom_score_adj',
oom_kill_disable='oom_killer',
shm_size='shm_size',
group_add='groups',
devices='devices',
pid_mode='pid_mode',
tmpfs='tmpfs'
)
if HAS_DOCKER_PY_2:
# auto_remove is only supported in docker>=2
host_config_params['auto_remove'] = 'auto_remove'
params = dict()
for key, value in host_config_params.items():
if getattr(self, value, None) is not None:
params[key] = getattr(self, value)
if self.restart_policy:
params['restart_policy'] = dict(Name=self.restart_policy,
MaximumRetryCount=self.restart_retries)
return self.client.create_host_config(**params)
@property
def default_host_ip(self):
ip = '0.0.0.0'
if not self.networks:
return ip
for net in self.networks:
if net.get('name'):
network = self.client.inspect_network(net['name'])
if network.get('Driver') == 'bridge' and \
network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'):
ip = network['Options']['com.docker.network.bridge.host_binding_ipv4']
break
return ip
def _parse_publish_ports(self):
'''
Parse ports from docker CLI syntax
'''
if self.published_ports is None:
return None
if 'all' in self.published_ports:
return 'all'
default_ip = self.default_host_ip
binds = {}
for port in self.published_ports:
parts = str(port).split(':')
container_port = parts[-1]
if '/' not in container_port:
container_port = int(parts[-1])
p_len = len(parts)
if p_len == 1:
bind = (default_ip,)
elif p_len == 2:
bind = (default_ip, int(parts[0]))
elif p_len == 3:
bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],)
if container_port in binds:
old_bind = binds[container_port]
if isinstance(old_bind, list):
old_bind.append(bind)
else:
binds[container_port] = [binds[container_port], bind]
else:
binds[container_port] = bind
return binds
@staticmethod
def _get_volume_binds(volumes):
'''
Extract host bindings, if any, from list of volume mapping strings.
:return: dictionary of bind mappings
'''
result = dict()
if volumes:
for vol in volumes:
host = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
host, container, mode = (vol.split(':') + ['rw'])
if host is not None:
result[host] = dict(
bind=container,
mode=mode
)
return result
def _parse_exposed_ports(self, published_ports):
'''
Parse exposed ports from docker CLI-style ports syntax.
'''
exposed = []
if self.exposed_ports:
for port in self.exposed_ports:
port = str(port).strip()
protocol = 'tcp'
match = re.search(r'(/.+$)', port)
if match:
protocol = match.group(1).replace('/', '')
port = re.sub(r'/.+$', '', port)
exposed.append((port, protocol))
if published_ports:
# Any published port should also be exposed
for publish_port in published_ports:
match = False
if isinstance(publish_port, string_types) and '/' in publish_port:
port, protocol = publish_port.split('/')
port = int(port)
else:
protocol = 'tcp'
port = int(publish_port)
for exposed_port in exposed:
if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]:
start_port, end_port = exposed_port[0].split('-')
if int(start_port) <= port <= int(end_port):
match = True
elif exposed_port[0] == port:
match = True
if not match:
exposed.append((port, protocol))
return exposed
@staticmethod
def _parse_links(links):
'''
Turn links into a dictionary
'''
if links is None:
return None
result = []
for link in links:
parsed_link = link.split(':', 1)
if len(parsed_link) == 2:
result.append((parsed_link[0], parsed_link[1]))
else:
result.append((parsed_link[0], parsed_link[0]))
return result
def _parse_ulimits(self):
'''
Turn ulimits into an array of Ulimit objects
'''
if self.ulimits is None:
return None
results = []
for limit in self.ulimits:
limits = dict()
pieces = limit.split(':')
if len(pieces) >= 2:
limits['name'] = pieces[0]
limits['soft'] = int(pieces[1])
limits['hard'] = int(pieces[1])
if len(pieces) == 3:
limits['hard'] = int(pieces[2])
try:
results.append(Ulimit(**limits))
except ValueError as exc:
self.fail("Error parsing ulimits value %s - %s" % (limit, exc))
return results
def _parse_sysctls(self):
'''
Turn sysctls into an hash of Sysctl objects
'''
return self.sysctls
def _parse_log_config(self):
'''
Create a LogConfig object
'''
if self.log_driver is None:
return None
options = dict(
Type=self.log_driver,
Config = dict()
)
if self.log_options is not None:
options['Config'] = self.log_options
try:
return LogConfig(**options)
except ValueError as exc:
self.fail('Error parsing logging options - %s' % (exc))
def _parse_tmpfs(self):
'''
Turn tmpfs into a hash of Tmpfs objects
'''
result = dict()
if self.tmpfs is None:
return result
for tmpfs_spec in self.tmpfs:
split_spec = tmpfs_spec.split(":", 1)
if len(split_spec) > 1:
result[split_spec[0]] = split_spec[1]
else:
result[split_spec[0]] = ""
return result
def _get_environment(self):
"""
If environment file is combined with explicit environment variables, the explicit environment variables
take precedence.
"""
final_env = {}
if self.env_file:
parsed_env_file = utils.parse_env_file(self.env_file)
for name, value in parsed_env_file.items():
final_env[name] = str(value)
if self.env:
for name, value in self.env.items():
final_env[name] = str(value)
return final_env
def _get_network_id(self, network_name):
network_id = None
try:
for network in self.client.networks(names=[network_name]):
if network['Name'] == network_name:
network_id = network['Id']
break
except Exception as exc:
self.fail("Error getting network id for %s - %s" % (network_name, str(exc)))
return network_id
class Container(DockerBaseClass):
def __init__(self, container, parameters):
super(Container, self).__init__()
self.raw = container
self.Id = None
self.container = container
if container:
self.Id = container['Id']
self.Image = container['Image']
self.log(self.container, pretty_print=True)
self.parameters = parameters
self.parameters.expected_links = None
self.parameters.expected_ports = None
self.parameters.expected_exposed = None
self.parameters.expected_volumes = None
self.parameters.expected_ulimits = None
self.parameters.expected_sysctls = None
self.parameters.expected_etc_hosts = None
self.parameters.expected_env = None
def fail(self, msg):
self.parameters.client.module.fail_json(msg=msg)
@property
def exists(self):
return True if self.container else False
@property
def running(self):
if self.container and self.container.get('State'):
if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False):
return True
return False
def has_different_configuration(self, image):
'''
Diff parameters vs existing container config. Returns tuple: (True | False, List of differences)
'''
self.log('Starting has_different_configuration')
self.parameters.expected_entrypoint = self._get_expected_entrypoint()
self.parameters.expected_links = self._get_expected_links()
self.parameters.expected_ports = self._get_expected_ports()
self.parameters.expected_exposed = self._get_expected_exposed(image)
self.parameters.expected_volumes = self._get_expected_volumes(image)
self.parameters.expected_binds = self._get_expected_binds(image)
self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits)
self.parameters.expected_sysctls = self._get_expected_sysctls(self.parameters.sysctls)
self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts')
self.parameters.expected_env = self._get_expected_env(image)
self.parameters.expected_cmd = self._get_expected_cmd()
self.parameters.expected_devices = self._get_expected_devices()
if not self.container.get('HostConfig'):
self.fail("has_config_diff: Error parsing container properties. HostConfig missing.")
if not self.container.get('Config'):
self.fail("has_config_diff: Error parsing container properties. Config missing.")
if not self.container.get('NetworkSettings'):
self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.")
host_config = self.container['HostConfig']
log_config = host_config.get('LogConfig', dict())
restart_policy = host_config.get('RestartPolicy', dict())
config = self.container['Config']
network = self.container['NetworkSettings']
# The previous version of the docker module ignored the detach state by
# assuming if the container was running, it must have been detached.
detach = not (config.get('AttachStderr') and config.get('AttachStdout'))
# "ExposedPorts": null returns None type & causes AttributeError - PR #5517
if config.get('ExposedPorts') is not None:
expected_exposed = [re.sub(r'/.+$', '', p) for p in config.get('ExposedPorts', dict()).keys()]
else:
expected_exposed = []
# Map parameters to container inspect results
config_mapping = dict(
auto_remove=host_config.get('AutoRemove'),
expected_cmd=config.get('Cmd'),
hostname=config.get('Hostname'),
user=config.get('User'),
detach=detach,
interactive=config.get('OpenStdin'),
capabilities=host_config.get('CapAdd'),
expected_devices=host_config.get('Devices'),
dns_servers=host_config.get('Dns'),
dns_opts=host_config.get('DnsOptions'),
dns_search_domains=host_config.get('DnsSearch'),
expected_env=(config.get('Env') or []),
expected_entrypoint=config.get('Entrypoint'),
expected_etc_hosts=host_config['ExtraHosts'],
expected_exposed=expected_exposed,
groups=host_config.get('GroupAdd'),
ipc_mode=host_config.get("IpcMode"),
labels=config.get('Labels'),
expected_links=host_config.get('Links'),
log_driver=log_config.get('Type'),
log_options=log_config.get('Config'),
mac_address=network.get('MacAddress'),
memory_swappiness=host_config.get('MemorySwappiness'),
network_mode=host_config.get('NetworkMode'),
userns_mode=host_config.get('UsernsMode'),
oom_killer=host_config.get('OomKillDisable'),
oom_score_adj=host_config.get('OomScoreAdj'),
pid_mode=host_config.get('PidMode'),
privileged=host_config.get('Privileged'),
expected_ports=host_config.get('PortBindings'),
read_only=host_config.get('ReadonlyRootfs'),
restart_policy=restart_policy.get('Name'),
restart_retries=restart_policy.get('MaximumRetryCount'),
# Cannot test shm_size, as shm_size is not included in container inspection results.
# shm_size=host_config.get('ShmSize'),
security_opts=host_config.get("SecurityOpt"),
stop_signal=config.get("StopSignal"),
tmpfs=host_config.get('Tmpfs'),
tty=config.get('Tty'),
expected_ulimits=host_config.get('Ulimits'),
expected_sysctls=host_config.get('Sysctls'),
uts=host_config.get('UTSMode'),
expected_volumes=config.get('Volumes'),
expected_binds=host_config.get('Binds'),
volumes_from=host_config.get('VolumesFrom'),
volume_driver=host_config.get('VolumeDriver'),
working_dir=host_config.get('WorkingDir')
)
differences = []
for key, value in config_mapping.items():
self.log('check differences %s %s vs %s' % (key, getattr(self.parameters, key), str(value)))
if getattr(self.parameters, key, None) is not None:
if isinstance(getattr(self.parameters, key), list) and isinstance(value, list):
if len(getattr(self.parameters, key)) > 0 and isinstance(getattr(self.parameters, key)[0], dict):
# compare list of dictionaries
self.log("comparing list of dict: %s" % key)
match = self._compare_dictionary_lists(getattr(self.parameters, key), value)
else:
# compare two lists. Is list_a in list_b?
self.log("comparing lists: %s" % key)
set_a = set(getattr(self.parameters, key))
set_b = set(value)
match = (set_b >= set_a)
elif isinstance(getattr(self.parameters, key), list) and not len(getattr(self.parameters, key)) \
and value is None:
# an empty list and None are ==
continue
elif isinstance(getattr(self.parameters, key), dict) and isinstance(value, dict):
# compare two dicts
self.log("comparing two dicts: %s" % key)
match = self._compare_dicts(getattr(self.parameters, key), value)
elif isinstance(getattr(self.parameters, key), dict) and \
not len(list(getattr(self.parameters, key).keys())) and value is None:
# an empty dict and None are ==
continue
else:
# primitive compare
self.log("primitive compare: %s" % key)
match = (getattr(self.parameters, key) == value)
if not match:
# no match. record the differences
item = dict()
item[key] = dict(
parameter=getattr(self.parameters, key),
container=value
)
differences.append(item)
has_differences = True if len(differences) > 0 else False
return has_differences, differences
def _compare_dictionary_lists(self, list_a, list_b):
'''
If all of list_a exists in list_b, return True
'''
if not isinstance(list_a, list) or not isinstance(list_b, list):
return False
matches = 0
for dict_a in list_a:
for dict_b in list_b:
if self._compare_dicts(dict_a, dict_b):
matches += 1
break
result = (matches == len(list_a))
return result
def _compare_dicts(self, dict_a, dict_b):
'''
If dict_a in dict_b, return True
'''
if not isinstance(dict_a, dict) or not isinstance(dict_b, dict):
return False
for key, value in dict_a.items():
if isinstance(value, dict):
match = self._compare_dicts(value, dict_b.get(key))
elif isinstance(value, list):
if len(value) > 0 and isinstance(value[0], dict):
match = self._compare_dictionary_lists(value, dict_b.get(key))
else:
set_a = set(value)
set_b = set(dict_b.get(key))
match = (set_a == set_b)
else:
match = (value == dict_b.get(key))
if not match:
return False
return True
def has_different_resource_limits(self):
'''
Diff parameters and container resource limits
'''
if not self.container.get('HostConfig'):
self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.")
host_config = self.container['HostConfig']
config_mapping = dict(
cpu_period=host_config.get('CpuPeriod'),
cpu_quota=host_config.get('CpuQuota'),
cpuset_cpus=host_config.get('CpusetCpus'),
cpuset_mems=host_config.get('CpusetMems'),
cpu_shares=host_config.get('CpuShares'),
kernel_memory=host_config.get("KernelMemory"),
memory=host_config.get('Memory'),
memory_reservation=host_config.get('MemoryReservation'),
memory_swap=host_config.get('MemorySwap'),
oom_score_adj=host_config.get('OomScoreAdj'),
oom_killer=host_config.get('OomKillDisable'),
)
differences = []
for key, value in config_mapping.items():
if getattr(self.parameters, key, None) and getattr(self.parameters, key) != value:
# no match. record the differences
item = dict()
item[key] = dict(
parameter=getattr(self.parameters, key),
container=value
)
differences.append(item)
different = (len(differences) > 0)
return different, differences
def has_network_differences(self):
'''
Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
'''
different = False
differences = []
if not self.parameters.networks:
return different, differences
if not self.container.get('NetworkSettings'):
self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = self.container['NetworkSettings']['Networks']
for network in self.parameters.networks:
if connected_networks.get(network['name'], None) is None:
different = True
differences.append(dict(
parameter=network,
container=None
))
else:
diff = False
if network.get('ipv4_address') and network['ipv4_address'] != connected_networks[network['name']].get('IPAddress'):
diff = True
if network.get('ipv6_address') and network['ipv6_address'] != connected_networks[network['name']].get('GlobalIPv6Address'):
diff = True
if network.get('aliases') and not connected_networks[network['name']].get('Aliases'):
diff = True
if network.get('aliases') and connected_networks[network['name']].get('Aliases'):
for alias in network.get('aliases'):
if alias not in connected_networks[network['name']].get('Aliases', []):
diff = True
if network.get('links') and not connected_networks[network['name']].get('Links'):
diff = True
if network.get('links') and connected_networks[network['name']].get('Links'):
expected_links = []
for link, alias in network['links']:
expected_links.append("%s:%s" % (link, alias))
for link in expected_links:
if link not in connected_networks[network['name']].get('Links', []):
diff = True
if diff:
different = True
differences.append(dict(
parameter=network,
container=dict(
name=network['name'],
ipv4_address=connected_networks[network['name']].get('IPAddress'),
ipv6_address=connected_networks[network['name']].get('GlobalIPv6Address'),
aliases=connected_networks[network['name']].get('Aliases'),
links=connected_networks[network['name']].get('Links')
)
))
return different, differences
def has_extra_networks(self):
'''
Check if the container is connected to non-requested networks
'''
extra_networks = []
extra = False
if not self.container.get('NetworkSettings'):
self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = self.container['NetworkSettings'].get('Networks')
if connected_networks:
for network, network_config in connected_networks.items():
keep = False
if self.parameters.networks:
for expected_network in self.parameters.networks:
if expected_network['name'] == network:
keep = True
if not keep:
extra = True
extra_networks.append(dict(name=network, id=network_config['NetworkID']))
return extra, extra_networks
def _get_expected_devices(self):
if not self.parameters.devices:
return None
expected_devices = []
for device in self.parameters.devices:
parts = device.split(':')
if len(parts) == 1:
expected_devices.append(
dict(
CgroupPermissions='rwm',
PathInContainer=parts[0],
PathOnHost=parts[0]
))
elif len(parts) == 2:
parts = device.split(':')
expected_devices.append(
dict(
CgroupPermissions='rwm',
PathInContainer=parts[1],
PathOnHost=parts[0]
)
)
else:
expected_devices.append(
dict(
CgroupPermissions=parts[2],
PathInContainer=parts[1],
PathOnHost=parts[0]
))
return expected_devices
def _get_expected_entrypoint(self):
if not self.parameters.entrypoint:
return None
return shlex.split(self.parameters.entrypoint)
def _get_expected_ports(self):
if not self.parameters.published_ports:
return None
expected_bound_ports = {}
for container_port, config in self.parameters.published_ports.items():
if isinstance(container_port, int):
container_port = "%s/tcp" % container_port
if len(config) == 1:
if isinstance(config[0], int):
expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}]
else:
expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}]
elif isinstance(config[0], tuple):
expected_bound_ports[container_port] = []
for host_ip, host_port in config:
expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': str(host_port)})
else:
expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}]
return expected_bound_ports
def _get_expected_links(self):
if self.parameters.links is None:
return None
self.log('parameter links:')
self.log(self.parameters.links, pretty_print=True)
exp_links = []
for link, alias in self.parameters.links:
exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias))
return exp_links
def _get_expected_binds(self, image):
self.log('_get_expected_binds')
image_vols = []
if image:
image_vols = self._get_image_binds(image['ContainerConfig'].get('Volumes'))
param_vols = []
if self.parameters.volumes:
for vol in self.parameters.volumes:
host = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
host, container, mode = vol.split(':') + ['rw']
if host:
param_vols.append("%s:%s:%s" % (host, container, mode))
result = list(set(image_vols + param_vols))
self.log("expected_binds:")
self.log(result, pretty_print=True)
return result
def _get_image_binds(self, volumes):
'''
Convert array of binds to array of strings with format host_path:container_path:mode
:param volumes: array of bind dicts
:return: array of strings
'''
results = []
if isinstance(volumes, dict):
results += self._get_bind_from_dict(volumes)
elif isinstance(volumes, list):
for vol in volumes:
results += self._get_bind_from_dict(vol)
return results
@staticmethod
def _get_bind_from_dict(volume_dict):
results = []
if volume_dict:
for host_path, config in volume_dict.items():
if isinstance(config, dict) and config.get('bind'):
container_path = config.get('bind')
mode = config.get('mode', 'rw')
results.append("%s:%s:%s" % (host_path, container_path, mode))
return results
def _get_expected_volumes(self, image):
self.log('_get_expected_volumes')
expected_vols = dict()
if image and image['ContainerConfig'].get('Volumes'):
expected_vols.update(image['ContainerConfig'].get('Volumes'))
if self.parameters.volumes:
for vol in self.parameters.volumes:
container = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if len(vol.split(':')) == 2:
parts = vol.split(':')
if parts[1] not in VOLUME_PERMISSIONS:
host, container, mode = vol.split(':') + ['rw']
new_vol = dict()
if container:
new_vol[container] = dict()
else:
new_vol[vol] = dict()
expected_vols.update(new_vol)
if not expected_vols:
expected_vols = None
self.log("expected_volumes:")
self.log(expected_vols, pretty_print=True)
return expected_vols
def _get_expected_env(self, image):
self.log('_get_expected_env')
expected_env = dict()
if image and image['ContainerConfig'].get('Env'):
for env_var in image['ContainerConfig']['Env']:
parts = env_var.split('=', 1)
expected_env[parts[0]] = parts[1]
if self.parameters.env:
expected_env.update(self.parameters.env)
param_env = []
for key, value in expected_env.items():
param_env.append("%s=%s" % (key, value))
return param_env
def _get_expected_exposed(self, image):
self.log('_get_expected_exposed')
image_ports = []
if image:
image_ports = [re.sub(r'/.+$', '', p) for p in (image['ContainerConfig'].get('ExposedPorts') or {}).keys()]
param_ports = []
if self.parameters.ports:
param_ports = [str(p[0]) for p in self.parameters.ports]
result = list(set(image_ports + param_ports))
self.log(result, pretty_print=True)
return result
def _get_expected_ulimits(self, config_ulimits):
self.log('_get_expected_ulimits')
if config_ulimits is None:
return None
results = []
for limit in config_ulimits:
results.append(dict(
Name=limit.name,
Soft=limit.soft,
Hard=limit.hard
))
return results
def _get_expected_sysctls(self, config_sysctls):
self.log('_get_expected_sysctls')
if config_sysctls is None:
return None
result = dict()
for key, value in config_sysctls.items():
result[key] = str(value)
return result
def _get_expected_cmd(self):
self.log('_get_expected_cmd')
if not self.parameters.command:
return None
return shlex.split(self.parameters.command)
def _convert_simple_dict_to_list(self, param_name, join_with=':'):
if getattr(self.parameters, param_name, None) is None:
return None
results = []
for key, value in getattr(self.parameters, param_name).items():
results.append("%s%s%s" % (key, join_with, value))
return results
class ContainerManager(DockerBaseClass):
'''
Perform container management tasks
'''
def __init__(self, client):
super(ContainerManager, self).__init__()
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {'changed': False, 'actions': []}
self.diff = {}
self.facts = {}
state = self.parameters.state
if state in ('stopped', 'started', 'present'):
self.present(state)
elif state == 'absent':
self.absent()
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
if self.client.module._diff or self.parameters.debug:
self.results['diff'] = self.diff
if self.facts:
self.results['ansible_facts'] = {'docker_container': self.facts}
def present(self, state):
container = self._get_container(self.parameters.name)
image = self._get_image()
self.log(image, pretty_print=True)
if not container.exists:
# New container
self.log('No container found')
new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
if new_container:
container = new_container
else:
# Existing container
different, differences = container.has_different_configuration(image)
image_different = False
if not self.parameters.ignore_image:
image_different = self._image_is_different(image, container)
if image_different or different or self.parameters.recreate:
self.diff['differences'] = differences
if image_different:
self.diff['image_different'] = True
self.log("differences")
self.log(differences, pretty_print=True)
if container.running:
self.container_stop(container.Id)
self.container_remove(container.Id)
new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
if new_container:
container = new_container
if container and container.exists:
container = self.update_limits(container)
container = self.update_networks(container)
if state == 'started' and not container.running:
container = self.container_start(container.Id)
elif state == 'started' and self.parameters.restart:
self.container_stop(container.Id)
container = self.container_start(container.Id)
elif state == 'stopped' and container.running:
self.container_stop(container.Id)
container = self._get_container(container.Id)
self.facts = container.raw
def absent(self):
container = self._get_container(self.parameters.name)
if container.exists:
if container.running:
self.container_stop(container.Id)
self.container_remove(container.Id)
def fail(self, msg, **kwargs):
self.client.module.fail_json(msg=msg, **kwargs)
def _get_container(self, container):
'''
Expects container ID or Name. Returns a container object
'''
return Container(self.client.get_container(container), self.parameters)
def _get_image(self):
if not self.parameters.image:
self.log('No image specified')
return None
repository, tag = utils.parse_repository_tag(self.parameters.image)
if not tag:
tag = "latest"
image = self.client.find_image(repository, tag)
if not self.check_mode:
if not image or self.parameters.pull:
self.log("Pull the image.")
image, alreadyToLatest = self.client.pull_image(repository, tag)
if alreadyToLatest:
self.results['changed'] = False
else:
self.results['changed'] = True
self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
self.log("image")
self.log(image, pretty_print=True)
return image
def _image_is_different(self, image, container):
if image and image.get('Id'):
if container and container.Image:
if image.get('Id') != container.Image:
return True
return False
def update_limits(self, container):
limits_differ, different_limits = container.has_different_resource_limits()
if limits_differ:
self.log("limit differences:")
self.log(different_limits, pretty_print=True)
if limits_differ and not self.check_mode:
self.container_update(container.Id, self.parameters.update_parameters)
return self._get_container(container.Id)
return container
def update_networks(self, container):
has_network_differences, network_differences = container.has_network_differences()
updated_container = container
if has_network_differences:
if self.diff.get('differences'):
self.diff['differences'].append(dict(network_differences=network_differences))
else:
self.diff['differences'] = [dict(network_differences=network_differences)]
self.results['changed'] = True
updated_container = self._add_networks(container, network_differences)
if self.parameters.purge_networks:
has_extra_networks, extra_networks = container.has_extra_networks()
if has_extra_networks:
if self.diff.get('differences'):
self.diff['differences'].append(dict(purge_networks=extra_networks))
else:
self.diff['differences'] = [dict(purge_networks=extra_networks)]
self.results['changed'] = True
updated_container = self._purge_networks(container, extra_networks)
return updated_container
def _add_networks(self, container, differences):
for diff in differences:
# remove the container from the network, if connected
if diff.get('container'):
self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
if not self.check_mode:
try:
self.client.disconnect_container_from_network(container.Id, diff['parameter']['id'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
str(exc)))
# connect to the network
params = dict(
ipv4_address=diff['parameter'].get('ipv4_address', None),
ipv6_address=diff['parameter'].get('ipv6_address', None),
links=diff['parameter'].get('links', None),
aliases=diff['parameter'].get('aliases', None)
)
self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params))
if not self.check_mode:
try:
self.log("Connecting container to network %s" % diff['parameter']['id'])
self.log(params, pretty_print=True)
self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params)
except Exception as exc:
self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], str(exc)))
return self._get_container(container.Id)
def _purge_networks(self, container, networks):
for network in networks:
self.results['actions'].append(dict(removed_from_network=network['name']))
if not self.check_mode:
try:
self.client.disconnect_container_from_network(container.Id, network['name'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (network['name'],
str(exc)))
return self._get_container(container.Id)
def container_create(self, image, create_parameters):
self.log("create container")
self.log("image: %s parameters:" % image)
self.log(create_parameters, pretty_print=True)
self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters))
self.results['changed'] = True
new_container = None
if not self.check_mode:
try:
new_container = self.client.create_container(image, **create_parameters)
except Exception as exc:
self.fail("Error creating container: %s" % str(exc))
return self._get_container(new_container['Id'])
return new_container
def container_start(self, container_id):
self.log("start container %s" % (container_id))
self.results['actions'].append(dict(started=container_id))
self.results['changed'] = True
if not self.check_mode:
try:
self.client.start(container=container_id)
except Exception as exc:
self.fail("Error starting container %s: %s" % (container_id, str(exc)))
if not self.parameters.detach:
status = self.client.wait(container_id)
config = self.client.inspect_container(container_id)
logging_driver = config['HostConfig']['LogConfig']['Type']
if logging_driver == 'json-file' or logging_driver == 'journald':
output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False)
else:
output = "Result logged using `%s` driver" % logging_driver
if status != 0:
self.fail(output, status=status)
if self.parameters.cleanup:
self.container_remove(container_id, force=True)
insp = self._get_container(container_id)
if insp.raw:
insp.raw['Output'] = output
else:
insp.raw = dict(Output=output)
return insp
return self._get_container(container_id)
def container_remove(self, container_id, link=False, force=False):
volume_state = (not self.parameters.keep_volumes)
self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
response = self.client.remove_container(container_id, v=volume_state, link=link, force=force)
except Exception as exc:
self.fail("Error removing container %s: %s" % (container_id, str(exc)))
return response
def container_update(self, container_id, update_parameters):
if update_parameters:
self.log("update container %s" % (container_id))
self.log(update_parameters, pretty_print=True)
self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
self.results['changed'] = True
if not self.check_mode and callable(getattr(self.client, 'update_container')):
try:
self.client.update_container(container_id, **update_parameters)
except Exception as exc:
self.fail("Error updating container %s: %s" % (container_id, str(exc)))
return self._get_container(container_id)
def container_kill(self, container_id):
self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
if self.parameters.kill_signal:
response = self.client.kill(container_id, signal=self.parameters.kill_signal)
else:
response = self.client.kill(container_id)
except Exception as exc:
self.fail("Error killing container %s: %s" % (container_id, exc))
return response
def container_stop(self, container_id):
if self.parameters.force_kill:
self.container_kill(container_id)
return
self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
if self.parameters.stop_timeout:
response = self.client.stop(container_id, timeout=self.parameters.stop_timeout)
else:
response = self.client.stop(container_id)
except Exception as exc:
self.fail("Error stopping container %s: %s" % (container_id, str(exc)))
return response
def main():
argument_spec = dict(
auto_remove=dict(type='bool', default=False),
blkio_weight=dict(type='int'),
capabilities=dict(type='list'),
cleanup=dict(type='bool', default=False),
command=dict(type='raw'),
cpu_period=dict(type='int'),
cpu_quota=dict(type='int'),
cpuset_cpus=dict(type='str'),
cpuset_mems=dict(type='str'),
cpu_shares=dict(type='int'),
detach=dict(type='bool', default=True),
devices=dict(type='list'),
dns_servers=dict(type='list'),
dns_opts=dict(type='list'),
dns_search_domains=dict(type='list'),
env=dict(type='dict'),
env_file=dict(type='path'),
entrypoint=dict(type='list'),
etc_hosts=dict(type='dict'),
exposed_ports=dict(type='list', aliases=['exposed', 'expose']),
force_kill=dict(type='bool', default=False, aliases=['forcekill']),
groups=dict(type='list'),
hostname=dict(type='str'),
ignore_image=dict(type='bool', default=False),
image=dict(type='str'),
interactive=dict(type='bool', default=False),
ipc_mode=dict(type='str'),
keep_volumes=dict(type='bool', default=True),
kernel_memory=dict(type='str'),
kill_signal=dict(type='str'),
labels=dict(type='dict'),
links=dict(type='list'),
log_driver=dict(type='str',
choices=['none', 'json-file', 'syslog', 'journald', 'gelf', 'fluentd', 'awslogs', 'splunk'],
default=None),
log_options=dict(type='dict', aliases=['log_opt']),
mac_address=dict(type='str'),
memory=dict(type='str', default='0'),
memory_reservation=dict(type='str'),
memory_swap=dict(type='str'),
memory_swappiness=dict(type='int'),
name=dict(type='str', required=True),
network_mode=dict(type='str'),
userns_mode=dict(type='str'),
networks=dict(type='list'),
oom_killer=dict(type='bool'),
oom_score_adj=dict(type='int'),
paused=dict(type='bool', default=False),
pid_mode=dict(type='str'),
privileged=dict(type='bool', default=False),
published_ports=dict(type='list', aliases=['ports']),
pull=dict(type='bool', default=False),
purge_networks=dict(type='bool', default=False),
read_only=dict(type='bool', default=False),
recreate=dict(type='bool', default=False),
restart=dict(type='bool', default=False),
restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']),
restart_retries=dict(type='int', default=None),
shm_size=dict(type='str'),
security_opts=dict(type='list'),
state=dict(type='str', choices=['absent', 'present', 'started', 'stopped'], default='started'),
stop_signal=dict(type='str'),
stop_timeout=dict(type='int'),
tmpfs=dict(type='list'),
trust_image_content=dict(type='bool', default=False),
tty=dict(type='bool', default=False),
ulimits=dict(type='list'),
sysctls=dict(type='dict'),
user=dict(type='str'),
uts=dict(type='str'),
volumes=dict(type='list'),
volumes_from=dict(type='list'),
volume_driver=dict(type='str'),
working_dir=dict(type='str'),
)
required_if = [
('state', 'present', ['image'])
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True
)
if not HAS_DOCKER_PY_2 and client.module.params.get('auto_remove'):
client.module.fail_json(msg="'auto_remove' is not compatible with docker-py, and requires the docker python module")
cm = ContainerManager(client)
client.module.exit_json(**cm.results)
if __name__ == '__main__':
main()
| gpl-3.0 |
DavidAndreev/indico | indico/modules/events/surveys/forms.py | 1 | 6843 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from datetime import time
from markupsafe import escape
from wtforms.fields import StringField, TextAreaField, BooleanField
from wtforms.fields.html5 import IntegerField
from wtforms.validators import DataRequired, Optional, NumberRange
from indico.core.db import db
from indico.modules.events.surveys.models.surveys import Survey
from indico.web.forms.base import IndicoForm
from indico.web.forms.fields import IndicoDateTimeField, EmailListField, FileField
from indico.web.forms.widgets import SwitchWidget
from indico.web.forms.validators import HiddenUnless, ValidationError, DateTimeRange, LinkedDateTime, UsedIf
from indico.util.i18n import _
class SurveyForm(IndicoForm):
_notification_fields = ('notifications_enabled', 'notify_participants', 'start_notification_emails',
'new_submission_emails')
title = StringField(_("Title"), [DataRequired()], description=_("The title of the survey"))
introduction = TextAreaField(_("Introduction"), description=_("An introduction to be displayed before the survey"))
anonymous = BooleanField(_("Anonymous submissions"), widget=SwitchWidget(),
description=_("User information will not be attached to submissions"))
require_user = BooleanField(_("Only logged-in users"), [HiddenUnless('anonymous')], widget=SwitchWidget(),
description=_("Still require users to be logged in for submitting the survey"))
limit_submissions = BooleanField(_("Limit submissions"), widget=SwitchWidget(),
description=_("Whether there is a submission cap"))
submission_limit = IntegerField(_("Capacity"),
[HiddenUnless('limit_submissions'), DataRequired(), NumberRange(min=1)],
description=_("Maximum number of submissions accepted"))
notifications_enabled = BooleanField(_('Enabled'), widget=SwitchWidget(),
description=_('Send email notifications for specific events related to the '
'survey.'))
notify_participants = BooleanField(_('Participants'), [HiddenUnless('notifications_enabled', preserve_data=True)],
widget=SwitchWidget(),
description=_('Notify participants of the event when this survey starts.'))
start_notification_emails = EmailListField(_('Start notification recipients'),
[HiddenUnless('notifications_enabled', preserve_data=True)],
description=_('Email addresses to notify about the start of the survey'))
new_submission_emails = EmailListField(_('New submission notification recipients'),
[HiddenUnless('notifications_enabled', preserve_data=True)],
description=_('Email addresses to notify when a new submission is made'))
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event', None)
super(IndicoForm, self).__init__(*args, **kwargs)
def validate_title(self, field):
query = Survey.find(Survey.event_id == self.event.id,
db.func.lower(Survey.title) == field.data.lower(),
Survey.title != field.object_data,
~Survey.is_deleted)
if query.count():
raise ValidationError(_("There is already an survey named \"{}\" on this event".format(escape(field.data))))
def post_validate(self):
if not self.anonymous.data:
self.require_user.data = True
class ScheduleSurveyForm(IndicoForm):
start_dt = IndicoDateTimeField(_("Start"), [UsedIf(lambda form, field: form.allow_reschedule_start), Optional(),
DateTimeRange(earliest='now')],
default_time=time(0, 0),
description=_("Moment when the survey will open for submissions"))
end_dt = IndicoDateTimeField(_("End"), [Optional(), LinkedDateTime('start_dt')],
default_time=time(23, 59),
description=_("Moment when the survey will close"))
resend_start_notification = BooleanField(_('Resend start notification'), widget=SwitchWidget(),
description=_("Resend the survey start notification."))
def __init__(self, *args, **kwargs):
survey = kwargs.pop('survey')
self.allow_reschedule_start = kwargs.pop('allow_reschedule_start')
self.timezone = survey.event.getTimezone()
super(IndicoForm, self).__init__(*args, **kwargs)
if not survey.start_notification_sent or not self.allow_reschedule_start:
del self.resend_start_notification
class SectionForm(IndicoForm):
display_as_section = BooleanField(_("Display as section"), widget=SwitchWidget(), default=True,
description=_("Whether this is going to be displayed as a section or standalone"))
title = StringField(_('Title'), [HiddenUnless('display_as_section', preserve_data=True), DataRequired()],
description=_("The title of the section."))
description = TextAreaField(_('Description'), [HiddenUnless('display_as_section', preserve_data=True)],
description=_("The description text of the section."))
class TextForm(IndicoForm):
description = TextAreaField(_('Text'),
description=_("The text that should be displayed."))
class ImportQuestionnaireForm(IndicoForm):
json_file = FileField(_('File'), accepted_file_types="application/json,.json",
description=_("Choose a previously exported survey content to import. "
"Existing sections will be preserved."))
| gpl-3.0 |
tbarbette/clickwatcher | npf_compare.py | 1 | 7662 | #!/usr/bin/env python3
"""
NPF Program to compare multiple software against the same testie
A specific script for that purpose is needed because tags may influence
the testie according to the repo, so some tricks to find
common variables must be used. For this reason also one testie only is
supported in comparator.
"""
import argparse
from npf import npf
from npf.regression import *
from pathlib import Path
from npf.testie import Testie
from npf.statistics import Statistics
class Comparator():
def __init__(self, repo_list: List[Repository]):
self.repo_list = repo_list
self.graphs_series = []
self.kind_graphs_series = []
def build_list(self, on_finish, testie, build, data_datasets, kind_datasets):
on_finish(self.graphs_series + [(testie,build,data_datasets[0])], self.kind_graphs_series + [(testie,build,kind_datasets[0])])
def run(self, testie_name, options, tags, on_finish=None):
for irepo,repo in enumerate(self.repo_list):
regressor = Regression(repo)
testies = Testie.expand_folder(testie_name, options=options, tags=repo.tags + tags)
testies = npf.override(options, testies)
for itestie,testie in enumerate(testies):
build, data_dataset, kind_dataset = regressor.regress_all_testies(testies=[testie], options=options, on_finish=lambda b,dd,td: self.build_list(on_finish,testie,b,dd,td) if on_finish else None,iserie=irepo,nseries=len(self.repo_list) )
if len(testies) > 0 and not build is None:
build._pretty_name = repo.name
self.graphs_series.append((testie, build, data_dataset[0]))
self.kind_graphs_series.append((testie, build, kind_dataset[0]))
if len(self.graphs_series) == 0:
print("No valid tags/testie/repo combination.")
return None, None
return self.graphs_series, self.kind_graphs_series
def do_graph(filename,args,series,kind_series,options):
if series is None:
return
#Group repo if asked to do so
if options.group_repo:
repo_series=OrderedDict()
for i, (testie, build, dataset) in enumerate(series):
repo_series.setdefault(build.repo.reponame,(testie,build,OrderedDict()))
for run, run_results in dataset.items():
run.variables['SERIE'] = build.pretty_name()
repo_series[build.repo.reponame][2][run] = run_results
series = []
for reponame, (testie, build, dataset) in repo_series.items():
build._pretty_name = reponame
build.version = reponame
series.append((testie, build, dataset))
# Merge series with common name
merged_series = OrderedDict()
for testie, build, dataset in series:
#Group series by serie name
merged_series.setdefault(build.pretty_name(), []).append((testie, build, dataset))
series = []
for sname,slist in merged_series.items():
if len(slist) == 1:
series.append(slist[0])
else:
all_r = {}
for results in [l[2] for l in slist]:
all_r.update(results)
series.append((slist[0][0], slist[0][1], all_r))
# We must find the common variables to all series, and change dataset to reflect only those
all_variables = []
for testie, build, dataset in series:
v_list = set()
for name, variable in testie.variables.vlist.items():
v_list.add(name)
all_variables.append(v_list)
if args.statistics:
Statistics.run(build,dataset, testie, max_depth=args.statistics_maxdepth, filename=args.statistics_filename if args.statistics_filename else npf.build_output_filename(args, [build.repo for t,build,d in series]))
common_variables = set.intersection(*map(set, all_variables))
#Remove variables that are totally defined by the series, that is
# variables that only have one value inside each serie
# but have different values accross series
useful_variables=[]
for variable in common_variables:
all_values = set()
all_alone=True
for i, (testie, build, dataset) in enumerate(series):
serie_values = set()
for run, result_types in dataset.items():
if variable in run.variables:
val = run.variables[variable]
serie_values.add(val)
if len(serie_values) > 1:
all_alone = False
break
if all_alone:
pass
else:
useful_variables.append(variable)
if options.group_repo:
useful_variables.append('SERIE')
for v in series[0][0].config.get_list("graph_hide_variables"):
if v in useful_variables:
useful_variables.remove(v)
#Keep only the variables in Run that are usefull as defined above
for i, (testie, build, dataset) in enumerate(series):
ndataset = OrderedDict()
for run, results in dataset.items():
ndataset[run.intersect(useful_variables)] = results
series[i] = (testie, build, ndataset)
#Keep only the variables in Time Run that are usefull as defined above
if options.do_time:
n_kind_series=OrderedDict()
for i, (testie, build, kind_dataset) in enumerate(kind_series):
for kind, dataset in kind_dataset.items():
ndataset = OrderedDict()
n_kind_series.setdefault(kind,[])
for run, results in dataset.items():
ndataset[run.intersect(useful_variables + [kind])] = results
if ndataset:
n_kind_series[kind].append((testie, build, ndataset))
grapher = Grapher()
print("Generating graphs...")
g = grapher.graph(series=series,
filename=filename,
options=args,
title=args.graph_title)
if options.do_time:
for kind,series in n_kind_series.items():
print("Generating graph %s..." % kind)
g = grapher.graph(series=series,
filename=filename,
fileprefix=kind,
options=args,
title=args.graph_title)
def main():
parser = argparse.ArgumentParser(description='NPF cross-repository comparator')
npf.add_verbosity_options(parser)
parser.add_argument('repos', metavar='repo', type=str, nargs='+', help='names of the repositories to watch')
parser.add_argument('--graph-title', type=str, nargs='?', help='Graph title')
b = npf.add_building_options(parser)
t = npf.add_testing_options(parser)
g = npf.add_graph_options(parser)
args = parser.parse_args()
npf.parse_nodes(args)
# Parsing repo list and getting last_build
repo_list = []
for repo_name in args.repos:
repo = Repository.get_instance(repo_name, args)
repo.last_build = None
repo_list.append(repo)
comparator = Comparator(repo_list)
filename = npf.build_output_filename(args, repo_list)
savedir = Path(os.path.dirname(filename))
if not savedir.exists():
os.makedirs(savedir.as_posix())
if not os.path.isabs(filename):
filename = os.getcwd() + os.sep + filename
series, time_series = comparator.run(testie_name=args.testie, tags=args.tags, options=args, on_finish=lambda series,time_series:do_graph(filename,args,series,time_series,options=args) if args.iterative else None)
do_graph(filename,args,series, time_series, options=args)
if __name__ == "__main__":
main()
| gpl-3.0 |
SEL-Columbia/commcare-hq | custom/m4change/reports/mcct_project_review.py | 1 | 23053 | from datetime import date, datetime
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from jsonobject import DateTimeProperty
from corehq.apps.locations.models import Location
from corehq.apps.reports.cache import request_cache
from corehq.apps.reports.filters.fixtures import AsyncLocationFilter
from corehq.elastic import ES_URLS
from corehq.apps.reports.standard import CustomProjectReport
from corehq.apps.reports.standard import ProjectReport, ProjectReportParametersMixin, DatespanMixin
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.dont_use.fields import StrongFilterUsersField
from corehq.apps.reports.generic import ElasticProjectInspectionReport
from corehq.apps.reports.standard.monitoring import MultiFormDrilldownMixin
from corehq.elastic import es_query
from custom.m4change.constants import REJECTION_REASON_DISPLAY_NAMES, MCCT_SERVICE_TYPES
from custom.m4change.filters import ServiceTypeFilter
from custom.m4change.models import McctStatus
from custom.m4change.reports import get_location_hierarchy_by_id
from custom.m4change.utils import get_case_by_id, get_property, get_form_ids_by_status
from custom.m4change.constants import EMPTY_FIELD
from corehq.apps.reports.tasks import export_all_rows_task
def _get_date_range(range):
if range is not None:
dates = str(range).split(_(" to "))
return (dates[0], dates[1])
return None
def _get_relevant_xmlnss_for_service_type(service_type_filter):
relevant_form_types = \
MCCT_SERVICE_TYPES[service_type_filter] if service_type_filter else MCCT_SERVICE_TYPES["all"]
return filter(None, [form for form in relevant_form_types])
def _get_report_query(start_date, end_date, filtered_case_ids, location_ids):
return {
"query": {
"bool": {
"must": [
{"range": {"form.meta.timeEnd": {"from": start_date, "to": end_date, "include_upper": True}}},
{"term": {"doc_type": "xforminstance"}}
]
}
},
"filter": {
"and": [
{"not": {"missing": {"field": "form.case.@case_id"}}},
{"terms": {"form.case.@case_id": filtered_case_ids}},
{"script":{
"script": """
if (_source.form.?service_type != "" && _source.form.?location_id != "" && (location_ids contains _source.form.?location_id)) {
return true;
}
return false;
""",
"params": {
"location_ids": location_ids
}
}}
]
}
}
def calculate_form_data(self, form):
try:
case_id = form["form"]["case"]["@case_id"]
case = get_case_by_id(case_id)
except KeyError:
case = EMPTY_FIELD
amount_due = EMPTY_FIELD
if form["form"].get("registration_amount", None) is not None:
amount_due = form["form"].get("registration_amount", None)
elif form["form"].get("immunization_amount", None) is not None:
amount_due = form["form"].get("immunization_amount", None)
service_type = form["form"].get("service_type", EMPTY_FIELD)
form_id = form["_id"]
location_name = EMPTY_FIELD
location_parent_name = EMPTY_FIELD
location_id = form["form"].get("location_id", None)
if location_id is not None:
location = Location.get(location_id)
location_name = location.name
location_parent = location.parent
if location_parent is not None and location_parent.location_type != 'state':
while location_parent is not None and location_parent.location_type not in ('district', 'lga'):
location_parent = location_parent.parent
location_parent_name = location_parent.name if location_parent is not None else EMPTY_FIELD
return {'case': case, 'service_type': service_type, 'location_name': location_name,
'location_parent_name': location_parent_name, 'amount_due': amount_due, 'form_id': form_id}
class BaseReport(CustomProjectReport, ElasticProjectInspectionReport, ProjectReport,
ProjectReportParametersMixin, MultiFormDrilldownMixin, DatespanMixin):
emailable = False
exportable = True
exportable_all = True
asynchronous = True
ajax_pagination = True
include_inactive = True
fields = [
AsyncLocationFilter,
'custom.m4change.fields.DateRangeField',
'custom.m4change.fields.CaseSearchField',
ServiceTypeFilter
]
base_template = 'm4change/report.html'
report_template_path = 'm4change/selectTemplate.html'
filter_users_field_class = StrongFilterUsersField
@property
def es_results(self):
if not getattr(self, 'es_response', None):
self.es_query(paginated=True)
return self.es_response
@property
def es_all_results(self):
if not getattr(self, 'es_response', None):
self.es_query(paginated=False)
return self.es_response
def _get_filtered_cases(self, start_date, end_date):
query = {
"query": {
"bool": {
"must_not": [
{"range": {"modified_on.date": {"lt": start_date}}},
{"range": {"opened_on.date": {"gt": end_date}}}
]
}
}
}
case_search = self.request.GET.get("case_search", "")
if len(case_search) > 0:
query["filter"] = {
"and": [
{"regexp": {"name.exact": ".*?%s.*?" % case_search}}
]
}
es_response = es_query(params={"domain.exact": self.domain}, q=query, es_url=ES_URLS.get('cases'))
return [res['_source']['_id'] for res in es_response.get('hits', {}).get('hits', [])]
@property
def total_records(self):
return int(self.es_results['hits']['total'])
def _make_link(self, url, label):
return '<a href="%s" target="_blank">%s</a>' % (url, label)
def _get_case_name_html(self, case, add_link):
case_name = get_property(case, "full_name", EMPTY_FIELD)
return self._make_link(
reverse('corehq.apps.reports.views.case_details', args=[self.domain, case._id]), case_name
) if add_link else case_name
def _get_service_type_html(self, form, service_type, add_link):
return self._make_link(
reverse('corehq.apps.reports.views.form_data', args=[self.domain, form['_id']]), service_type
) if add_link else service_type
class McctProjectReview(BaseReport):
name = 'mCCT Beneficiary list view'
slug = 'mcct_project_review_page'
report_template_path = 'm4change/reviewStatus.html'
display_status = 'eligible'
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn(_("Date of service"), prop_name="form.meta.timeEnd"),
DataTablesColumn(_("Beneficiary Name"), sortable=False),
DataTablesColumn(_("Service Type"), sortable=False),
DataTablesColumn(_("Health Facility"), sortable=False),
DataTablesColumn(_("Card No."), sortable=False),
DataTablesColumn(_("LGA"), sortable=False),
DataTablesColumn(_("Phone No."), sortable=False),
DataTablesColumn(_("Amount"), sortable=False),
DataTablesColumn(_("Visits"), sortable=False),
DataTablesColumn(mark_safe('Status/Action <a href="#" class="select-all btn btn-mini btn-inverse">all</a> '
'<a href="#" class="select-none btn btn-mini btn-warning">none</a>'),
sortable=False, span=3))
return headers
def es_query(self, paginated):
if not getattr(self, 'es_response', None):
range = self.request_params.get('range', None)
start_date = None
end_date = None
if range is not None:
dates = str(range).split(_(" to "))
start_date = dates[0]
end_date = dates[1]
filtered_case_ids = self._get_filtered_cases(start_date, end_date)
exclude_form_ids = [mcct_status.form_id for mcct_status in McctStatus.objects.filter(
domain=self.domain, received_on__range=(start_date, end_date))
if (mcct_status.status != "eligible" or
(mcct_status.immunized == False and
(date.today() - mcct_status.registration_date).days < 272 and
mcct_status.is_booking == False))]
location_ids = get_location_hierarchy_by_id(self.request_params.get("location_id", None), self.domain,
CCT_only=True)
q = _get_report_query(start_date, end_date, filtered_case_ids, location_ids)
if len(exclude_form_ids) > 0:
q["filter"]["and"].append({"not": {"ids": {"values": exclude_form_ids}}})
xmlnss = _get_relevant_xmlnss_for_service_type(self.request.GET.get("service_type_filter"))
if xmlnss:
q["filter"]["and"].append({"terms": {"xmlns.exact": xmlnss}})
modify_close = filter(None, [u'Modify/Close Client'])
q["filter"]["and"].append({"not": {"terms": {"form.@name": modify_close}}})
q["sort"] = self.get_sorting_block() \
if self.get_sorting_block() else [{"form.meta.timeEnd" : {"order": "desc"}}]
if paginated:
self.es_response = es_query(params={"domain.exact": self.domain}, q=q, es_url=ES_URLS.get('forms'),
start_at=self.pagination.start, size=self.pagination.count)
else:
self.es_response = es_query(params={"domain.exact": self.domain}, q=q, es_url=ES_URLS.get('forms'))
return self.es_response
@property
def rows(self):
return self.make_rows(self.es_results, with_checkbox=True)
@property
def get_all_rows(self):
return self.make_rows(self.es_all_results, with_checkbox=False)
def make_rows(self, es_results, with_checkbox):
submissions = [res['_source'] for res in self.es_results.get('hits', {}).get('hits', [])]
for form in submissions:
data = calculate_form_data(self, form)
row = [
DateTimeProperty().wrap(form["form"]["meta"]["timeEnd"]).strftime("%Y-%m-%d"),
self._get_case_name_html(data.get('case'), with_checkbox),
self._get_service_type_html(form, data.get('service_type'), with_checkbox),
data.get('location_name'),
get_property(data.get('case'), "card_number", EMPTY_FIELD),
data.get('location_parent_name'),
get_property(data.get('case'), "phone_number", EMPTY_FIELD),
data.get('amount_due'),
get_property(data.get('case'), "visits", EMPTY_FIELD)
]
if with_checkbox:
checkbox = mark_safe('<input type="checkbox" class="selected-element" '
'data-formid="%(form_id)s" '
'data-caseid="%(case_id)s" data-servicetype="%(service_type)s"/>')
row.append(checkbox % dict(form_id=data.get('form_id'), case_id=data.get('case_id'),
service_type=data.get('service_type')))
else:
row.append(self.display_status)
yield row
@property
def export_table(self):
headers = self.headers
headers.header.pop()
headers.header.append(DataTablesColumn(_("Status"), sortable=False))
table = headers.as_export_table
export_rows = self.get_all_rows
table.extend(export_rows)
return [[self.export_sheet_name, table]]
@property
@request_cache("export")
def export_response(self):
self.request.datespan = None
export_all_rows_task.delay(self.__class__, self.__getstate__())
return HttpResponse()
class McctClientApprovalPage(McctProjectReview):
name = 'mCCT Beneficiary Approval Page'
slug = 'mcct_client_approval_page'
report_template_path = 'm4change/approveStatus.html'
display_status = 'reviewed'
def es_query(self, paginated):
reviewed_form_ids = get_form_ids_by_status(self.domain, getattr(self, 'display_status', None))
if len(reviewed_form_ids) > 0:
if not getattr(self, 'es_response', None):
date_tuple = _get_date_range(self.request_params.get('range', None))
filtered_case_ids = self._get_filtered_cases(date_tuple[0], date_tuple[1])
location_ids = get_location_hierarchy_by_id(self.request_params.get("location_id", None), self.domain,
CCT_only=True)
q = _get_report_query(date_tuple[0], date_tuple[1], filtered_case_ids, location_ids)
if len(reviewed_form_ids) > 0:
q["filter"]["and"].append({"ids": {"values": reviewed_form_ids}})
q["sort"] = self.get_sorting_block() if self.get_sorting_block() else [{"form.meta.timeEnd" : {"order": "desc"}}]
if paginated:
self.es_response = es_query(params={"domain.exact": self.domain}, q=q, es_url=ES_URLS.get('forms'),
start_at=self.pagination.start, size=self.pagination.count)
else:
self.es_response = es_query(params={"domain.exact": self.domain}, q=q, es_url=ES_URLS.get('forms'))
else:
self.es_response = {'hits': {'total': 0}}
return self.es_response
class McctClientPaymentPage(McctClientApprovalPage):
name = 'mCCT Beneficiary Payment Page'
slug = 'mcct_client_payment_page'
report_template_path = 'm4change/paidStatus.html'
display_status = 'approved'
class McctRejectedClientPage(McctClientApprovalPage):
name = 'mCCT Rejected Beneficiary Page'
slug = 'mcct_rejected_clients_page'
report_template_path = 'm4change/activateStatus.html'
display_status = 'rejected'
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn(_("Date of service"), prop_name="form.meta.timeEnd"),
DataTablesColumn(_("Beneficiary Name"), sortable=False),
DataTablesColumn(_("Service Type"), sortable=False),
DataTablesColumn(_("Health Facility"), sortable=False),
DataTablesColumn(_("Card No."), sortable=False),
DataTablesColumn(_("LGA"), sortable=False),
DataTablesColumn(_("Phone No."), sortable=False),
DataTablesColumn(_("Amount"), sortable=False),
DataTablesColumn(_("Comment"), sortable=False),
DataTablesColumn(_("User"), sortable=False),
DataTablesColumn(mark_safe('Status/Action <a href="#" class="select-all btn btn-mini btn-inverse">all</a> '
'<a href="#" class="select-none btn btn-mini btn-warning">none</a>'),
sortable=False, span=3))
return headers
def make_rows(self, es_results, with_checkbox):
submissions = [res['_source'] for res in self.es_results.get('hits', {}).get('hits', [])]
for form in submissions:
data = calculate_form_data(self, form)
try:
status_data = McctStatus.objects.get(domain=self.domain, form_id=data.get('form_id'))
reason = status_data.reason
except McctStatus.DoesNotExist:
reason = None
row = [
DateTimeProperty().wrap(form["form"]["meta"]["timeEnd"]).strftime("%Y-%m-%d %H:%M"),
self._get_case_name_html(data.get('case'), with_checkbox),
self._get_service_type_html(form, data.get('service_type'), with_checkbox),
data.get('location_name'),
get_property(data.get('case'), "card_number", EMPTY_FIELD),
data.get('location_parent_name'),
get_property(data.get('case'), "phone_number", EMPTY_FIELD),
data.get('amount_due'),
REJECTION_REASON_DISPLAY_NAMES[reason] if reason is not None else '',
form["form"]["meta"]["username"]
]
if with_checkbox:
checkbox = mark_safe('<input type="checkbox" class="selected-element" '
'data-formid="%(form_id)s" '
'data-caseid="%(case_id)s" data-servicetype="%(service_type)s"/>')
row.append(checkbox % dict(form_id=data.get('form_id'), case_id=data.get('case_id'),
service_type=data.get('service_type')))
else:
row.insert(8, self.display_status)
yield row
@property
def export_table(self):
headers = self.headers
headers.header.insert(8, DataTablesColumn("Status", sortable=False))
headers.header.pop()
table = headers.as_export_table
export_rows = self.get_all_rows
table.extend(export_rows)
return [[self.export_sheet_name, table]]
class McctClientLogPage(McctProjectReview):
name = 'mCCT Beneficiary Log Page'
slug = 'mcct_client_log_page'
report_template_path = 'm4change/report_content.html'
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn(_("Date of action"), sortable=False),
DataTablesColumn(_("Beneficiary Name"), sortable=False),
DataTablesColumn(_("Service Type"), sortable=False),
DataTablesColumn(_("Health Facility"), sortable=False),
DataTablesColumn(_("Card No."), sortable=False),
DataTablesColumn(_("LGA"), sortable=False),
DataTablesColumn(_("Phone No."), sortable=False),
DataTablesColumn(_("Amount"), sortable=False),
DataTablesColumn(_("Status"), sortable=False),
DataTablesColumn(_("Comment"), sortable=False),
DataTablesColumn(_("User"), sortable=False))
return headers
def es_query(self, paginated):
if not getattr(self, 'es_response', None):
date_tuple = _get_date_range(self.request_params.get('range', None))
filtered_case_ids = self._get_filtered_cases(date_tuple[0], date_tuple[1])
location_ids = get_location_hierarchy_by_id(self.request_params.get("location_id", None), self.domain,
CCT_only=True)
q = _get_report_query(date_tuple[0], date_tuple[1], filtered_case_ids, location_ids)
xmlnss = _get_relevant_xmlnss_for_service_type(self.request.GET.get("service_type_filter"))
if xmlnss:
q["filter"]["and"].append({"terms": {"xmlns.exact": xmlnss}})
modify_close = filter(None, [u'Modify/Close Client'])
q["filter"]["and"].append({"not": {"terms": {"form.@name": modify_close}}})
q["sort"] = self.get_sorting_block() \
if self.get_sorting_block() else [{"form.meta.timeEnd": {"order": "desc"}}]
if paginated:
self.es_response = es_query(params={"domain.exact": self.domain}, q=q, es_url=ES_URLS.get('forms'),
start_at=self.pagination.start, size=self.pagination.count)
else:
self.es_response = es_query(params={"domain.exact": self.domain}, q=q, es_url=ES_URLS.get('forms'))
return self.es_response
def make_rows(self, es_results, with_checkbox):
submissions = [res['_source'] for res in self.es_results.get('hits', {}).get('hits', [])]
for form in submissions:
data = calculate_form_data(self, form)
try:
status_data = McctStatus.objects.get(domain=self.domain, form_id=data.get('form_id'))
status, reason, status_date, username = (status_data.status, status_data.reason,
status_data.modified_on, status_data.user)
except:
status, reason, status_date, username = ('eligible', None, None, None)
row = [
status_date.strftime("%Y-%m-%d %H:%M") if status_date is not None else EMPTY_FIELD,
self._get_case_name_html(data.get('case'), with_checkbox),
self._get_service_type_html(form, data.get('service_type'), with_checkbox),
data.get('location_name'),
get_property(data.get('case'), "card_number", EMPTY_FIELD),
data.get('location_parent_name'),
get_property(data.get('case'), "phone_number", EMPTY_FIELD),
data.get('amount_due'),
status,
REJECTION_REASON_DISPLAY_NAMES[reason] if reason is not None else '',
username if username else form["form"]["meta"]["username"]
]
yield row
@property
def export_table(self):
table = self.headers.as_export_table
export_rows = self.get_all_rows
table.extend(export_rows)
return [[self.export_sheet_name, table]]
class McctPaidClientsPage(McctClientApprovalPage):
name = 'mCCT Paid Beneficiary Page'
slug = 'mcct_paid_clients_page'
report_template_path = 'm4change/report_content.html'
display_status = 'paid'
@property
def rows(self):
return self.make_rows(self.es_results, with_checkbox=False)
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn(_("Date of service"), prop_name="form.meta.timeEnd"),
DataTablesColumn(_("Beneficiary Name"), sortable=False),
DataTablesColumn(_("Service Type"), sortable=False),
DataTablesColumn(_("Health Facility"), sortable=False),
DataTablesColumn(_("Card No."), sortable=False),
DataTablesColumn(_("LGA"), sortable=False),
DataTablesColumn(_("Phone No."), sortable=False),
DataTablesColumn(_("Amount"), sortable=False),
DataTablesColumn(_("Status"), sortable=False))
return headers | bsd-3-clause |
cmtm/networkx | networkx/readwrite/tests/test_shp.py | 9 | 7027 | """Unit tests for shp.
"""
import os
import tempfile
from nose import SkipTest
from nose.tools import assert_equal
import networkx as nx
class TestShp(object):
@classmethod
def setupClass(cls):
global ogr
try:
from osgeo import ogr
except ImportError:
raise SkipTest('ogr not available.')
def deletetmp(self, drv, *paths):
for p in paths:
if os.path.exists(p):
drv.DeleteDataSource(p)
def setUp(self):
def createlayer(driver, layerType=ogr.wkbLineString):
lyr = driver.CreateLayer("edges", None, layerType)
namedef = ogr.FieldDefn("Name", ogr.OFTString)
namedef.SetWidth(32)
lyr.CreateField(namedef)
return lyr
drv = ogr.GetDriverByName("ESRI Shapefile")
testdir = os.path.join(tempfile.gettempdir(), 'shpdir')
shppath = os.path.join(tempfile.gettempdir(), 'tmpshp.shp')
multi_shppath = os.path.join(tempfile.gettempdir(), 'tmp_mshp.shp')
self.deletetmp(drv, testdir, shppath, multi_shppath)
os.mkdir(testdir)
self.names = ['a', 'b', 'c', 'c'] # edgenames
self.paths = ([(1.0, 1.0), (2.0, 2.0)],
[(2.0, 2.0), (3.0, 3.0)],
[(0.9, 0.9), (4.0, 0.9), (4.0, 2.0)])
self.simplified_names = ['a', 'b', 'c'] # edgenames
self.simplified_paths = ([(1.0, 1.0), (2.0, 2.0)],
[(2.0, 2.0), (3.0, 3.0)],
[(0.9, 0.9), (4.0, 2.0)])
self.multi_names = ['a', 'a', 'a', 'a'] # edgenames
shp = drv.CreateDataSource(shppath)
lyr = createlayer(shp)
for path, name in zip(self.paths, self.names):
feat = ogr.Feature(lyr.GetLayerDefn())
g = ogr.Geometry(ogr.wkbLineString)
for p in path:
g.AddPoint_2D(*p)
feat.SetGeometry(g)
feat.SetField("Name", name)
lyr.CreateFeature(feat)
# create single record multiline shapefile for testing
multi_shp = drv.CreateDataSource(multi_shppath)
multi_lyr = createlayer(multi_shp, ogr.wkbMultiLineString)
multi_g = ogr.Geometry(ogr.wkbMultiLineString)
for path in self.paths:
g = ogr.Geometry(ogr.wkbLineString)
for p in path:
g.AddPoint_2D(*p)
multi_g.AddGeometry(g)
multi_feat = ogr.Feature(multi_lyr.GetLayerDefn())
multi_feat.SetGeometry(multi_g)
multi_feat.SetField("Name", 'a')
multi_lyr.CreateFeature(multi_feat)
self.shppath = shppath
self.multi_shppath = multi_shppath
self.testdir = testdir
self.drv = drv
def testload(self):
def compare_graph_paths_names(g, paths, names):
expected = nx.DiGraph()
for p in paths:
nx.add_path(expected, p)
assert_equal(sorted(expected.node), sorted(g.node))
assert_equal(sorted(expected.edges()), sorted(g.edges()))
g_names = [g.get_edge_data(s, e)['Name'] for s, e in g.edges()]
assert_equal(names, sorted(g_names))
# simplified
G = nx.read_shp(self.shppath)
compare_graph_paths_names(G, self.simplified_paths, \
self.simplified_names)
# unsimplified
G = nx.read_shp(self.shppath, simplify=False)
compare_graph_paths_names(G, self.paths, self.names)
# multiline unsimplified
G = nx.read_shp(self.multi_shppath, simplify=False)
compare_graph_paths_names(G, self.paths, self.multi_names)
def checkgeom(self, lyr, expected):
feature = lyr.GetNextFeature()
actualwkt = []
while feature:
actualwkt.append(feature.GetGeometryRef().ExportToWkt())
feature = lyr.GetNextFeature()
assert_equal(sorted(expected), sorted(actualwkt))
def test_geometryexport(self):
expectedpoints_simple = (
"POINT (1 1)",
"POINT (2 2)",
"POINT (3 3)",
"POINT (0.9 0.9)",
"POINT (4 2)"
)
expectedlines_simple = (
"LINESTRING (1 1,2 2)",
"LINESTRING (2 2,3 3)",
"LINESTRING (0.9 0.9,4.0 0.9,4 2)"
)
expectedpoints = (
"POINT (1 1)",
"POINT (2 2)",
"POINT (3 3)",
"POINT (0.9 0.9)",
"POINT (4.0 0.9)",
"POINT (4 2)"
)
expectedlines = (
"LINESTRING (1 1,2 2)",
"LINESTRING (2 2,3 3)",
"LINESTRING (0.9 0.9,4.0 0.9)",
"LINESTRING (4.0 0.9,4 2)"
)
tpath = os.path.join(tempfile.gettempdir(), 'shpdir')
G = nx.read_shp(self.shppath)
nx.write_shp(G, tpath)
shpdir = ogr.Open(tpath)
self.checkgeom(shpdir.GetLayerByName("nodes"), expectedpoints_simple)
self.checkgeom(shpdir.GetLayerByName("edges"), expectedlines_simple)
# Test unsimplified
# Nodes should have additional point,
# edges should be 'flattened'
G = nx.read_shp(self.shppath, simplify=False)
nx.write_shp(G, tpath)
shpdir = ogr.Open(tpath)
self.checkgeom(shpdir.GetLayerByName("nodes"), expectedpoints)
self.checkgeom(shpdir.GetLayerByName("edges"), expectedlines)
def test_attributeexport(self):
def testattributes(lyr, graph):
feature = lyr.GetNextFeature()
while feature:
coords = []
ref = feature.GetGeometryRef()
last = ref.GetPointCount() - 1
edge_nodes = (ref.GetPoint_2D(0), ref.GetPoint_2D(last))
name = feature.GetFieldAsString('Name')
assert_equal(graph.get_edge_data(*edge_nodes)['Name'], name)
feature = lyr.GetNextFeature()
tpath = os.path.join(tempfile.gettempdir(), 'shpdir')
G = nx.read_shp(self.shppath)
nx.write_shp(G, tpath)
shpdir = ogr.Open(tpath)
edges = shpdir.GetLayerByName("edges")
testattributes(edges, G)
def test_wkt_export(self):
G = nx.DiGraph()
tpath = os.path.join(tempfile.gettempdir(), 'shpdir')
points = (
"POINT (0.9 0.9)",
"POINT (4 2)"
)
line = (
"LINESTRING (0.9 0.9,4 2)",
)
G.add_node(1, Wkt=points[0])
G.add_node(2, Wkt=points[1])
G.add_edge(1, 2, Wkt=line[0])
try:
nx.write_shp(G, tpath)
except Exception as e:
assert False, e
shpdir = ogr.Open(tpath)
self.checkgeom(shpdir.GetLayerByName("nodes"), points)
self.checkgeom(shpdir.GetLayerByName("edges"), line)
def tearDown(self):
self.deletetmp(self.drv, self.testdir, self.shppath)
| bsd-3-clause |
ebt-hpc/cca | cca/scripts/metrics_queries_cpp.py | 1 | 23376 | #!/usr/bin/env python3
'''
Source code metrics for C programs
Copyright 2013-2018 RIKEN
Copyright 2018-2020 Chiba Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__author__ = 'Masatomo Hashimoto <[email protected]>'
from ns import NS_TBL
Q_LOOP_LOOP_C = '''DEFINE input:inference "ont.cpi"
PREFIX cpp: <%(cpp_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?fd ?fn ?loop ?child_loop ?loop_d ?child_loop_d
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?loop ?loop_d
WHERE {
?loop a cpp:IterationStatement ;
src:treeDigest ?loop_d ;
cpp:inTranslationUnit ?tu .
?tu a cpp:TranslationUnit ;
src:inFile/src:location ?loc ;
ver:version ?ver .
} GROUP BY ?ver ?loc ?loop ?loop_d
}
OPTIONAL {
?child_loop a cpp:IterationStatement ;
src:treeDigest ?child_loop_d ;
cpp:inIterationStatement ?loop .
FILTER (?child_loop != ?loop)
}
OPTIONAL {
?loop cpp:inFunctionDefinition ?fd .
?fd a cpp:FunctionDefinition ;
cpp:provides/(cpp:name|cpp:regexp) ?fn .
FILTER NOT EXISTS {
?loop cpp:inFunctionDefinition ?fd0 .
?fd0 cpp:inFunctionDefinition ?fd .
FILTER (?fd != ?fd0)
}
}
}
}
''' % NS_TBL
Q_LOOP_FD_C = '''DEFINE input:inference "ont.cpi"
PREFIX cpp: <%(cpp_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?loop ?callee ?callee_loc ?loop_d
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?tu ?loop ?callee ?loc ?loop_d
WHERE {
?call a ?call_cat OPTION (INFERENCE NONE) ;
cpp:inIterationStatement ?loop ;
cpp:mayCall ?callee .
FILTER (?call_cat IN (cpp:PostfixExpressionFunCall,
cpp:PostfixExpressionFunCallGuarded))
?loop a cpp:IterationStatement ;
src:treeDigest ?loop_d ;
cpp:inTranslationUnit ?tu .
?tu a cpp:TranslationUnit ;
src:inFile/src:location ?loc .
} GROUP BY ?tu ?loop ?callee ?loc ?loop_d
}
?callee a cpp:FunctionDefinition ;
cpp:inTranslationUnit/src:inFile ?callee_file .
?callee_file a src:File ;
src:location ?callee_loc ;
ver:version ?ver .
FILTER EXISTS {
?tu ver:version ?ver .
}
}
}
''' % NS_TBL
Q_FD_FD_C = '''DEFINE input:inference "ont.cpi"
PREFIX cpp: <%(cpp_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?fd ?callee ?callee_loc
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?fd ?callee
WHERE {
?call a ?call_cat OPTION (INFERENCE NONE) ;
cpp:inFunctionDefinition ?fd ;
cpp:mayCall ?callee .
FILTER (?call_cat IN (cpp:PostfixExpressionFunCall,
cpp:PostfixExpressionFunCallGuarded))
?fd a cpp:FunctionDefinition ;
cpp:inTranslationUnit/src:inFile ?file .
FILTER NOT EXISTS {
?call cpp:inFunctionDefinition ?fd0 .
?fd0 cpp:inFunctionDefinition ?fd .
FILTER (?fd != ?fd0)
}
?file a src:File ;
src:location ?loc ;
ver:version ?ver .
} GROUP BY ?ver ?loc ?fd ?callee
}
?callee a cpp:FunctionDefinition ;
cpp:inTranslationUnit*/src:inFile ?callee_file .
?callee_file a src:File ;
src:location ?callee_loc ;
ver:version ?ver .
}
}
''' % NS_TBL
Q_ARRAYS_C = '''DEFINE input:inference "ont.cpi"
PREFIX cpp: <%(cpp_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?fn ?loop ?aname ?rank ?dtor ?tyc ?loop_d
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?tu ?loop ?a ?aname ?loop_d ?aa ?acat
WHERE {
?aa a cpp:PostfixExpressionSubscr ;
src:child0* ?a ;
cpp:inIterationStatement ?loop .
FILTER NOT EXISTS {
?aa src:parent+ ?aa0 .
?aa0 a cpp:PostfixExpressionSubscr .
}
?a a ?acat OPTION (INFERENCE NONE) ;
cpp:name ?aname .
FILTER NOT EXISTS {
?a0 src:parent+ ?a ;
cpp:name [] .
}
?loop a cpp:IterationStatement ;
src:treeDigest ?loop_d ;
cpp:inTranslationUnit ?tu .
} GROUP BY ?tu ?loop ?a ?aname ?loop_d ?aa ?acat
}
?tu a cpp:TranslationUnit ;
src:inFile/src:location ?loc ;
ver:version ?ver .
{
SELECT DISTINCT ?aa (COUNT(DISTINCT ?aa0) AS ?rank)
WHERE {
?aa0 a cpp:PostfixExpressionSubscr ;
src:parent* ?aa .
} GROUP BY ?aa
}
{
SELECT DISTINCT ?a ?dtor ?dcat ?tyc ?ty
WHERE {
?a cpp:declarator ?dtor .
?dtor a ?dcat OPTION (INFERENCE NONE) ;
cpp:type ?ty ;
cpp:declarationTypeSpec ?tspec .
?tspec a ?tyc OPTION (INFERENCE NONE) .
} GROUP BY ?a ?dtor ?dcat ?tyc ?ty
}
OPTIONAL {
?loop cpp:inFunctionDefinition ?fd .
?fd a cpp:FunctionDefinition ;
cpp:provides/(cpp:name|cpp:regexp) ?fn .
FILTER NOT EXISTS {
?loop cpp:inFunctionDefinition ?fd0 .
?fd0 cpp:inFunctionDefinition ?fd .
FILTER (?fd != ?fd0)
}
}
}
}
''' % NS_TBL
Q_FFR_IN_LOOP_C = '''DEFINE input:inference "ont.cpi"
PREFIX cpp: <%(cpp_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?fd ?fn ?loop ?fref ?fname ?nargs ?h ?loop_d
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?loop ?loop_d
WHERE {
?loop a cpp:IterationStatement ;
src:treeDigest ?loop_d ;
cpp:inTranslationUnit ?tu .
?tu a cpp:TranslationUnit ;
src:inFile/src:location ?loc ;
ver:version ?ver .
} GROUP BY ?ver ?loc ?loop ?loop_d
}
OPTIONAL {
?loop cpp:inFunctionDefinition ?fd .
?fd a cpp:FunctionDefinition ;
cpp:provides/(cpp:name|cpp:regexp) ?fn .
FILTER NOT EXISTS {
?loop cpp:inFunctionDefinition ?fd0 .
?sp0 cpp:inFunctionDefinition ?fd .
FILTER (?fd != ?fd0)
}
}
{
SELECT DISTINCT ?loop ?fref ?h ?fname (COUNT(DISTINCT ?arg) AS ?nargs) ?p
WHERE {
?call a ?call_cat OPTION (INFERENCE NONE) ;
src:treeDigest ?h ;
src:child0 ?fref ;
src:child1 ?arg ;
src:child1 ?farg ;
cpp:inIterationStatement ?loop .
FILTER (?call_cat IN (cpp:PostfixExpressionFunCall,
cpp:PostfixExpressionFunCallGuarded))
?fref cpp:requires/cpp:name ?fname .
?farg a ?facat OPTION (INFERENCE NONE) .
FILTER (EXISTS { ?farg a cpp:FloatingLiteral } ||
EXISTS {
?farg cpp:declarator ?dtor .
?dtor a ?dcat OPTION (INFERENCE NONE) ;
cpp:declarationTypeSpec ?tspec .
?tspec a ?tcat OPTION (INFERENCE NONE) .
FILTER (?tcat IN (cpp:Double, cpp:Float))
}
)
} GROUP BY ?loop ?fref ?h ?fname ?p
}
}
}
''' % NS_TBL
Q_DFR_IN_LOOP_C = '''DEFINE input:inference "ont.cpi"
PREFIX cpp: <%(cpp_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?fd ?fn ?loop ?fname ?h ?loop_d
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?loop ?loop_d
WHERE {
?loop a cpp:IterationStatement ;
src:treeDigest ?loop_d ;
cpp:inTranslationUnit ?tu .
?tu a cpp:TranslationUnit ;
src:inFile/src:location ?loc ;
ver:version ?ver .
} GROUP BY ?ver ?loc ?loop ?loop_d
}
OPTIONAL {
?loop cpp:inFunctionDefinition ?fd .
?fd a cpp:FunctionDefinition ;
cpp:provides/(cpp:name|cpp:regexp) ?fn .
FILTER NOT EXISTS {
?loop cpp:inFunctionDefinition ?fd0 .
?sp0 cpp:inFunctionDefinition ?fd .
FILTER (?fd != ?fd0)
}
}
{
SELECT DISTINCT ?loop ?fref ?h ?fname (COUNT(DISTINCT ?arg) AS ?nargs) ?p
WHERE {
?call a ?call_cat OPTION (INFERENCE NONE) ;
src:treeDigest ?h ;
src:child0 ?fref ;
src:child1 ?arg ;
src:child1 ?farg ;
cpp:inIterationStatement ?loop .
FILTER (?call_cat IN (cpp:PostfixExpressionFunCall,
cpp:PostfixExpressionFunCallGuarded))
?fref cpp:requires/cpp:name ?fname .
?farg a ?facat OPTION (INFERENCE NONE) .
FILTER (EXISTS { ?farg a cpp:FloatingLiteral } ||
EXISTS {
?farg cpp:declarator ?dtor .
?dtor a ?dcat OPTION (INFERENCE NONE) ;
cpp:declarationTypeSpec ?tspec .
?tspec a ?tcat OPTION (INFERENCE NONE) .
FILTER (?tcat IN (cpp:Double))
}
)
} GROUP BY ?loop ?fref ?h ?fname ?p
}
}
}
''' % NS_TBL
Q_FOP_IN_LOOP_C = '''DEFINE input:inference "ont.cpi"
PREFIX cpp: <%(cpp_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?fd ?fn ?loop ?nfop ?loop_d
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?loop ?loop_d
WHERE {
?loop a cpp:IterationStatement ;
src:treeDigest ?loop_d ;
cpp:inTranslationUnit ?tu .
?tu a cpp:TranslationUnit ;
src:inFile/src:location ?loc ;
ver:version ?ver .
} GROUP BY ?ver ?loc ?loop ?loop_d
}
OPTIONAL {
?loop cpp:inFunctionDefinition ?fd .
?fd a cpp:FunctionDefinition ;
cpp:provides/(cpp:name|cpp:regexp) ?fn .
FILTER NOT EXISTS {
?loop cpp:inFunctionDefinition ?fd0 .
?sp0 cpp:inFunctionDefinition ?fd .
FILTER (?fd != ?fd0)
}
}
OPTIONAL {
SELECT DISTINCT ?loop (COUNT(DISTINCT ?h) AS ?nfop)
WHERE {
?fop a ?fop_cat OPTION (INFERENCE NONE) ;
src:treeDigest ?h ;
cpp:inIterationStatement ?loop .
FILTER (?fop_cat IN (cpp:MultiplicativeExpressionMult,
cpp:MultiplicativeExpressionDiv,
cpp:MultiplicativeExpressionMod,
cpp:AdditiveExpressionAdd,
cpp:AdditiveExpressionSubt
))
?opr a cpp:Expression ;
src:parent+ ?fop .
FILTER (EXISTS {
?opr a cpp:FloatingLiteral .
} || EXISTS {
?opr cpp:declarator/cpp:declarationTypeSpec ?tspec .
?tspec a ?tcat OPTION (INFERENCE NONE) .
FILTER (?tcat IN (cpp:Double,cpp:Float))
})
} GROUP BY ?loop
}
}
}
''' % NS_TBL
Q_IN_LOOP_C = '''DEFINE input:inference "ont.cpi"
PREFIX cpp: <%(cpp_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?fd ?fn ?loop ?vname ?loop_d ?nbr ?nes ?nop ?nc
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?loop ?fd ?fn ?loop_d
WHERE {
?loop a cpp:IterationStatement ;
src:treeDigest ?loop_d ;
cpp:inTranslationUnit ?tu .
?tu a cpp:TranslationUnit ;
src:inFile/src:location ?loc ;
ver:version ?ver .
OPTIONAL {
?loop cpp:inFunctionDefinition ?fd .
?fd a cpp:FunctionDefinition ;
cpp:provides/(cpp:name|cpp:regexp) ?fn .
FILTER NOT EXISTS {
?loop cpp:inFunctionDefinition ?fd0 .
?sp0 cpp:inFunctionDefinition ?fd .
FILTER (?fd != ?fd0)
}
}
} GROUP BY ?ver ?loc ?loop ?fd ?fn ?loop_d
}
OPTIONAL {
SELECT DISTINCT ?loop (COUNT(DISTINCT ?h) AS ?nop)
WHERE {
?op a cpp:Expression ;
src:treeDigest ?h ;
cpp:inIterationStatement ?loop .
} GROUP BY ?loop
}
OPTIONAL {
SELECT DISTINCT ?loop (COUNT(DISTINCT ?br) AS ?nbr)
WHERE {
?br a cpp:SelectionStatement ;
a ?br_cat OPTION (INFERENCE NONE) ;
cpp:inIterationStatement ?loop .
} GROUP BY ?loop
}
OPTIONAL {
SELECT DISTINCT ?loop (COUNT(DISTINCT ?stmt) AS ?nes)
WHERE {
?stmt a cpp:Statement ;
a ?stmt_cat OPTION (INFERENCE NONE) ;
cpp:inIterationStatement ?loop .
} GROUP BY ?loop
}
OPTIONAL {
SELECT DISTINCT ?loop (COUNT(DISTINCT ?call) AS ?nc)
WHERE {
?call a ?call_cat OPTION (INFERENCE NONE) ;
cpp:inIterationStatement ?loop .
FILTER (?call_cat IN (cpp:PostfixExpressionFunCall,
cpp:PostfixExpressionFunCallGuarded))
} GROUP by ?loop
}
}
}
''' % NS_TBL
Q_AREF0_AA_IN_LOOP_C = '''DEFINE input:inference "ont.cpi"
PREFIX cpp: <%(cpp_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?fd ?fn ?loop ?loop_d ?sig
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?loop ?fd ?fn ?loop_d
WHERE {
?loop a cpp:IterationStatement ;
src:treeDigest ?loop_d ;
cpp:inTranslationUnit ?tu .
?tu a cpp:TranslationUnit ;
src:inFile/src:location ?loc ;
ver:version ?ver .
OPTIONAL {
?loop cpp:inFunctionDefinition ?fd .
?fd a cpp:FunctionDefinition ;
cpp:provides/(cpp:name|cpp:regexp) ?fn .
FILTER NOT EXISTS {
?loop cpp:inFunctionDefinition ?fd0 .
?sp0 cpp:inFunctionDefinition ?fd .
FILTER (?fd != ?fd0)
}
}
} GROUP BY ?ver ?loc ?loop ?fd ?fn ?loop_d
}
{
SELECT DISTINCT ?loop ?sig
WHERE {
?aa a cpp:PostfixExpressionSubscr ;
src:child0 ?a ;
cpp:arrayRefSig0 ?asig0 ;
cpp:inIterationStatement ?loop .
OPTIONAL {
?assign a cpp:AssignmentOperatorExpression ;
src:child0 ?aa .
}
BIND(IF(BOUND(?assign), CONCAT(",", ?asig0), ?asig0) AS ?sig)
FILTER EXISTS {
?a cpp:declarator/cpp:declarationTypeSpec ?tspec .
?tspec a cpp:NumericType .
}
} GROUP BY ?loop ?sig
}
}
}
''' % NS_TBL
Q_AREF0_IAA_IN_LOOP_C = '''DEFINE input:inference "ont.cpi"
PREFIX cpp: <%(cpp_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?fd ?fn ?loop ?loop_d ?sig
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?loop ?fd ?fn ?loop_d
WHERE {
?loop a cpp:IterationStatement ;
src:treeDigest ?loop_d ;
cpp:inTranslationUnit ?tu .
?tu a cpp:TranslationUnit ;
src:inFile/src:location ?loc ;
ver:version ?ver .
OPTIONAL {
?loop cpp:inFunctionDefinition ?fd .
?fd a cpp:FunctionDefinition ;
cpp:provides/(cpp:name|cpp:regexp) ?fn .
FILTER NOT EXISTS {
?loop cpp:inFunctionDefinition ?fd0 .
?sp0 cpp:inFunctionDefinition ?fd .
FILTER (?fd != ?fd0)
}
}
} GROUP BY ?ver ?loc ?loop ?fd ?fn ?loop_d
}
{
SELECT DISTINCT ?loop ?sig
WHERE {
?aa a cpp:PostfixExpressionSubscr ;
src:child0 ?a ;
src:child1 ?idx ;
cpp:arrayRefSig0 ?asig0 ;
cpp:inIterationStatement ?loop .
OPTIONAL {
?assign a cpp:AssignmentOperatorExpression ;
src:child0 ?aa .
}
BIND(IF(BOUND(?assign), CONCAT(",", ?asig0), ?asig0) AS ?sig)
FILTER EXISTS {
?a cpp:declarator/cpp:declarationTypeSpec ?tspec .
?tspec a cpp:NumericType .
}
FILTER (EXISTS {
?x a cpp:Expression ;
src:children [] ;
src:parent+ ?idx0 .
?aa0 a cpp:PostfixExpressionSubscr ;
src:child1 ?idx0 ;
src:parent+ ?aa .
FILTER (?x != ?aa)
} || EXISTS {
?x a cpp:Expression ;
src:children [] ;
src:parent+ ?idx .
})
} GROUP BY ?loop ?sig
}
}
}
''' % NS_TBL
Q_AREF0_DAA_IN_LOOP_C = '''DEFINE input:inference "ont.cpi"
PREFIX cpp: <%(cpp_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?fd ?fn ?loop ?loop_d ?sig
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?loop ?fd ?fn ?loop_d
WHERE {
?loop a cpp:IterationStatement ;
src:treeDigest ?loop_d ;
cpp:inTranslationUnit ?tu .
?tu a cpp:TranslationUnit ;
src:inFile/src:location ?loc ;
ver:version ?ver .
OPTIONAL {
?loop cpp:inFunctionDefinition ?fd .
?fd a cpp:FunctionDefinition ;
cpp:provides/(cpp:name|cpp:regexp) ?fn .
FILTER NOT EXISTS {
?loop cpp:inFunctionDefinition ?fd0 .
?sp0 cpp:inFunctionDefinition ?fd .
FILTER (?fd != ?fd0)
}
}
} GROUP BY ?ver ?loc ?loop ?fd ?fn ?loop_d
}
{
SELECT DISTINCT ?loop ?sig
WHERE {
?aa a cpp:PostfixExpressionSubscr ;
src:child0 ?a ;
src:child1 ?idx ;
cpp:arrayRefSig0 ?asig0 ;
cpp:inIterationStatement ?loop .
OPTIONAL {
?assign a cpp:AssignmentOperatorExpression ;
src:child0 ?aa .
}
BIND(IF(BOUND(?assign), CONCAT(",", ?asig0), ?asig0) AS ?sig)
FILTER EXISTS {
?a cpp:declarator/cpp:declarationTypeSpec ?tspec .
?tspec a cpp:Double .
}
} GROUP BY ?loop ?sig
}
}
}
''' % NS_TBL
Q_AREF12_AA_IN_LOOP_C = '''DEFINE input:inference "ont.cpi"
PREFIX cpp: <%(cpp_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?fd ?fn ?loop ?loop_d ?sig
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?loop ?fd ?fn ?loop_d
WHERE {
?loop a cpp:IterationStatement ;
src:treeDigest ?loop_d ;
cpp:inTranslationUnit ?tu .
?tu a cpp:TranslationUnit ;
src:inFile/src:location ?loc ;
ver:version ?ver .
OPTIONAL {
?loop cpp:inFunctionDefinition ?fd .
?fd a cpp:FunctionDefinition ;
cpp:provides/(cpp:name|cpp:regexp) ?fn .
FILTER NOT EXISTS {
?loop cpp:inFunctionDefinition ?fd0 .
?sp0 cpp:inFunctionDefinition ?fd .
FILTER (?fd != ?fd0)
}
}
} GROUP BY ?ver ?loc ?loop ?fd ?fn ?loop_d
}
{
SELECT DISTINCT ?loop ?sig
WHERE {
?aa a cpp:PostfixExpressionSubscr ;
src:child0 ?a ;
src:child1 ?idx ;
cpp:inIterationStatement ?loop .
OPTIONAL {
?aa cpp:arrayRefSig%%(level)d ?asig .
}
OPTIONAL {
?assign a cpp:AssignmentOperatorExpression ;
src:child0 ?aa .
}
BIND(COALESCE(?asig, "") AS ?sig0)
BIND(IF(BOUND(?assign) && ?sig0 != "", CONCAT(",", ?sig0), ?sig0) AS ?sig)
FILTER EXISTS {
?a cpp:declarator/cpp:declarationTypeSpec ?tspec .
?tspec a cpp:NumericType .
}
} GROUP BY ?loop ?sig
}
}
}
''' % NS_TBL
Q_AREF12_IAA_IN_LOOP_C = '''DEFINE input:inference "ont.cpi"
PREFIX cpp: <%(cpp_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?fd ?fn ?loop ?loop_d ?sig
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?loop ?fd ?fn ?loop_d
WHERE {
?loop a cpp:IterationStatement ;
src:treeDigest ?loop_d ;
cpp:inTranslationUnit ?tu .
?tu a cpp:TranslationUnit ;
src:inFile/src:location ?loc ;
ver:version ?ver .
OPTIONAL {
?loop cpp:inFunctionDefinition ?fd .
?fd a cpp:FunctionDefinition ;
cpp:provides/(cpp:name|cpp:regexp) ?fn .
FILTER NOT EXISTS {
?loop cpp:inFunctionDefinition ?fd0 .
?sp0 cpp:inFunctionDefinition ?fd .
FILTER (?fd != ?fd0)
}
}
} GROUP BY ?ver ?loc ?loop ?fd ?fn ?loop_d
}
{
SELECT DISTINCT ?loop ?sig
WHERE {
?aa a cpp:PostfixExpressionSubscr ;
src:child0 ?a ;
src:child1 ?idx ;
cpp:inIterationStatement ?loop .
OPTIONAL {
?aa cpp:arrayRefSig%%(level)d ?asig .
}
OPTIONAL {
?assign a cpp:AssignmentOperatorExpression ;
src:child0 ?aa .
}
BIND(COALESCE(?asig, "") AS ?sig0)
BIND(IF(BOUND(?assign) && ?sig0 != "", CONCAT(",", ?sig0), ?sig0) AS ?sig)
FILTER EXISTS {
?a cpp:declarator/cpp:declarationTypeSpec ?tspec .
?tspec a cpp:NumericType .
}
FILTER (EXISTS {
?x a cpp:Expression ;
src:children [] ;
src:parent+ ?idx0 .
?aa0 a cpp:PostfixExpressionSubscr ;
src:child1 ?idx0 ;
src:parent+ ?aa .
FILTER (?x != ?aa)
} || EXISTS {
?x a cpp:Expression ;
src:children [] ;
src:parent+ ?idx .
})
} GROUP BY ?loop ?sig
}
}
}
''' % NS_TBL
Q_AREF12_DAA_IN_LOOP_C = '''DEFINE input:inference "ont.cpi"
PREFIX cpp: <%(cpp_ns)s>
PREFIX ver: <%(ver_ns)s>
PREFIX src: <%(src_ns)s>
SELECT DISTINCT ?ver ?loc ?fd ?fn ?loop ?loop_d ?sig
WHERE {
GRAPH <%%(proj)s> {
{
SELECT DISTINCT ?ver ?loc ?loop ?fd ?fn ?loop_d
WHERE {
?loop a cpp:IterationStatement ;
src:treeDigest ?loop_d ;
cpp:inTranslationUnit ?tu .
?tu a cpp:TranslationUnit ;
src:inFile/src:location ?loc ;
ver:version ?ver .
OPTIONAL {
?loop cpp:inFunctionDefinition ?fd .
?fd a cpp:FunctionDefinition ;
cpp:provides/(cpp:name|cpp:regexp) ?fn .
FILTER NOT EXISTS {
?loop cpp:inFunctionDefinition ?fd0 .
?sp0 cpp:inFunctionDefinition ?fd .
FILTER (?fd != ?fd0)
}
}
} GROUP BY ?ver ?loc ?loop ?fd ?fn ?loop_d
}
{
SELECT DISTINCT ?loop ?sig
WHERE {
?aa a cpp:PostfixExpressionSubscr ;
src:child0 ?a ;
src:child1 ?idx ;
cpp:inIterationStatement ?loop .
OPTIONAL {
?aa cpp:arrayRefSig%%(level)d ?asig .
}
OPTIONAL {
?assign a cpp:AssignmentOperatorExpression ;
src:child0 ?aa .
}
BIND(COALESCE(?asig, "") AS ?sig0)
BIND(IF(BOUND(?assign) && ?sig0 != "", CONCAT(",", ?sig0), ?sig0) AS ?sig)
FILTER EXISTS {
?a cpp:declarator/cpp:declarationTypeSpec ?tspec .
?tspec a cpp:Double .
}
} GROUP BY ?loop ?sig
}
}
}
''' % NS_TBL
QUERY_TBL = {
'loop_loop' : Q_LOOP_LOOP_C,
'arrays' : Q_ARRAYS_C,
'ffr_in_loop' : Q_FFR_IN_LOOP_C,
'dfr_in_loop' : Q_DFR_IN_LOOP_C,
'fop_in_loop' : Q_FOP_IN_LOOP_C,
'in_loop' : Q_IN_LOOP_C,
'aref0_in_loop' : { 'aa' : Q_AREF0_AA_IN_LOOP_C,
'iaa': Q_AREF0_IAA_IN_LOOP_C,
'daa': Q_AREF0_DAA_IN_LOOP_C,
},
'aref12_in_loop' : { 'aa' : Q_AREF12_AA_IN_LOOP_C,
'iaa': Q_AREF12_IAA_IN_LOOP_C,
'daa': Q_AREF12_DAA_IN_LOOP_C,
},
'loop_fd' : Q_LOOP_FD_C,
'fd_fd' : Q_FD_FD_C,
}
| apache-2.0 |
safwanrahman/kitsune | kitsune/sumo/paginator.py | 23 | 3063 | from django.core.paginator import (Paginator as DjPaginator, EmptyPage,
InvalidPage, Page, PageNotAnInteger)
__all__ = ['Paginator', 'EmptyPage', 'InvalidPage']
class Paginator(DjPaginator):
"""Allows you to pass in a `count` kwarg to avoid running an
expensive, uncacheable `SELECT COUNT` query."""
def __init__(self, object_list, per_page,
orphans=0, allow_empty_first_page=True, count=None):
super(Paginator, self).__init__(
object_list, per_page, orphans=orphans,
allow_empty_first_page=allow_empty_first_page)
if count:
self._count = count
class SimplePaginator(DjPaginator):
"""Paginator for basic Next/Previous pagination.
The big win is that it doesn't require any COUNT queries.
"""
def validate_number(self, number):
"""Validates the given 1-based page number.
Override to stop checking if we have gone to far since that requires
knowing the total number of pages.
"""
try:
number = int(number)
except ValueError:
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
return number
def page(self, number):
"""Returns a SimplePage object for the given 1-based page number."""
number = self.validate_number(number)
# Calculate the bottom (the first result).
bottom = (number - 1) * self.per_page
# Calculate the top, adding one so we know if there is a next page.
top_plus_one = bottom + self.per_page + 1
# Get the items.
page_items = self.object_list[bottom:top_plus_one]
# Check moved from validate_number
if not page_items:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
# Check if there is a next page.
has_next = len(page_items) > self.per_page
# If so, remove the extra item.
if has_next:
page_items = list(page_items)[:-1]
return SimplePage(page_items, number, self, has_next)
@property
def _get_count(self):
raise NotImplementedError
@property
def _get_num_pages(self):
raise NotImplementedError
@property
def _get_page_range(self):
raise NotImplementedError
class SimplePage(Page):
"""A page for the SimplePaginator."""
def __init__(self, object_list, number, paginator, has_next):
self.object_list = object_list
self.number = number
self.paginator = paginator
self._has_next = has_next
def has_next(self):
"""Is there a next page?"""
return self._has_next
def end_index(self):
"""Returns the 1-based index of the last object on this page."""
return ((self.number - 1) * self.paginator.per_page +
len(self.object_list))
| bsd-3-clause |
HLFH/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/viki.py | 35 | 3332 | from __future__ import unicode_literals
import re
from ..utils import (
ExtractorError,
unescapeHTML,
unified_strdate,
US_RATINGS,
)
from .subtitles import SubtitlesInfoExtractor
class VikiIE(SubtitlesInfoExtractor):
IE_NAME = 'viki'
_VALID_URL = r'^https?://(?:www\.)?viki\.com/videos/(?P<id>[0-9]+v)'
_TEST = {
'url': 'http://www.viki.com/videos/1023585v-heirs-episode-14',
'md5': 'a21454021c2646f5433514177e2caa5f',
'info_dict': {
'id': '1023585v',
'ext': 'mp4',
'title': 'Heirs Episode 14',
'uploader': 'SBS',
'description': 'md5:c4b17b9626dd4b143dcc4d855ba3474e',
'upload_date': '20131121',
'age_limit': 13,
},
'skip': 'Blocked in the US',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
uploader_m = re.search(
r'<strong>Broadcast Network: </strong>\s*([^<]*)<', webpage)
if uploader_m is None:
uploader = None
else:
uploader = uploader_m.group(1).strip()
rating_str = self._html_search_regex(
r'<strong>Rating: </strong>\s*([^<]*)<', webpage,
'rating information', default='').strip()
age_limit = US_RATINGS.get(rating_str)
info_url = 'http://www.viki.com/player5_fragment/%s?action=show&controller=videos' % video_id
info_webpage = self._download_webpage(
info_url, video_id, note='Downloading info page')
if re.match(r'\s*<div\s+class="video-error', info_webpage):
raise ExtractorError(
'Video %s is blocked from your location.' % video_id,
expected=True)
video_url = self._html_search_regex(
r'<source[^>]+src="([^"]+)"', info_webpage, 'video URL')
upload_date_str = self._html_search_regex(
r'"created_at":"([^"]+)"', info_webpage, 'upload date')
upload_date = (
unified_strdate(upload_date_str)
if upload_date_str is not None
else None
)
# subtitles
video_subtitles = self.extract_subtitles(video_id, info_webpage)
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, info_webpage)
return
return {
'id': video_id,
'title': title,
'url': video_url,
'description': description,
'thumbnail': thumbnail,
'age_limit': age_limit,
'uploader': uploader,
'subtitles': video_subtitles,
'upload_date': upload_date,
}
def _get_available_subtitles(self, video_id, info_webpage):
res = {}
for sturl_html in re.findall(r'<track src="([^"]+)"/>', info_webpage):
sturl = unescapeHTML(sturl_html)
m = re.search(r'/(?P<lang>[a-z]+)\.vtt', sturl)
if not m:
continue
res[m.group('lang')] = sturl
return res
| gpl-3.0 |
denny820909/builder | lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/names/dns.py | 5 | 55177 | # -*- test-case-name: twisted.names.test.test_dns -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
DNS protocol implementation.
Future Plans:
- Get rid of some toplevels, maybe.
@author: Moshe Zadka
@author: Jean-Paul Calderone
"""
__all__ = [
'IEncodable', 'IRecord',
'A', 'A6', 'AAAA', 'AFSDB', 'CNAME', 'DNAME', 'HINFO',
'MAILA', 'MAILB', 'MB', 'MD', 'MF', 'MG', 'MINFO', 'MR', 'MX',
'NAPTR', 'NS', 'NULL', 'PTR', 'RP', 'SOA', 'SPF', 'SRV', 'TXT', 'WKS',
'ANY', 'CH', 'CS', 'HS', 'IN',
'ALL_RECORDS', 'AXFR', 'IXFR',
'EFORMAT', 'ENAME', 'ENOTIMP', 'EREFUSED', 'ESERVER',
'Record_A', 'Record_A6', 'Record_AAAA', 'Record_AFSDB', 'Record_CNAME',
'Record_DNAME', 'Record_HINFO', 'Record_MB', 'Record_MD', 'Record_MF',
'Record_MG', 'Record_MINFO', 'Record_MR', 'Record_MX', 'Record_NAPTR',
'Record_NS', 'Record_NULL', 'Record_PTR', 'Record_RP', 'Record_SOA',
'Record_SPF', 'Record_SRV', 'Record_TXT', 'Record_WKS', 'UnknownRecord',
'QUERY_CLASSES', 'QUERY_TYPES', 'REV_CLASSES', 'REV_TYPES', 'EXT_QUERIES',
'Charstr', 'Message', 'Name', 'Query', 'RRHeader', 'SimpleRecord',
'DNSDatagramProtocol', 'DNSMixin', 'DNSProtocol',
'OK', 'OP_INVERSE', 'OP_NOTIFY', 'OP_QUERY', 'OP_STATUS', 'OP_UPDATE',
'PORT',
'AuthoritativeDomainError', 'DNSQueryTimeoutError', 'DomainError',
]
# System imports
import warnings
import struct, random, types, socket
import cStringIO as StringIO
AF_INET6 = socket.AF_INET6
from zope.interface import implements, Interface, Attribute
# Twisted imports
from twisted.internet import protocol, defer
from twisted.internet.error import CannotListenError
from twisted.python import log, failure
from twisted.python import util as tputil
from twisted.python import randbytes
def randomSource():
"""
Wrapper around L{randbytes.secureRandom} to return 2 random chars.
"""
return struct.unpack('H', randbytes.secureRandom(2, fallback=True))[0]
PORT = 53
(A, NS, MD, MF, CNAME, SOA, MB, MG, MR, NULL, WKS, PTR, HINFO, MINFO, MX, TXT,
RP, AFSDB) = range(1, 19)
AAAA = 28
SRV = 33
NAPTR = 35
A6 = 38
DNAME = 39
SPF = 99
QUERY_TYPES = {
A: 'A',
NS: 'NS',
MD: 'MD',
MF: 'MF',
CNAME: 'CNAME',
SOA: 'SOA',
MB: 'MB',
MG: 'MG',
MR: 'MR',
NULL: 'NULL',
WKS: 'WKS',
PTR: 'PTR',
HINFO: 'HINFO',
MINFO: 'MINFO',
MX: 'MX',
TXT: 'TXT',
RP: 'RP',
AFSDB: 'AFSDB',
# 19 through 27? Eh, I'll get to 'em.
AAAA: 'AAAA',
SRV: 'SRV',
NAPTR: 'NAPTR',
A6: 'A6',
DNAME: 'DNAME',
SPF: 'SPF'
}
IXFR, AXFR, MAILB, MAILA, ALL_RECORDS = range(251, 256)
# "Extended" queries (Hey, half of these are deprecated, good job)
EXT_QUERIES = {
IXFR: 'IXFR',
AXFR: 'AXFR',
MAILB: 'MAILB',
MAILA: 'MAILA',
ALL_RECORDS: 'ALL_RECORDS'
}
REV_TYPES = dict([
(v, k) for (k, v) in QUERY_TYPES.items() + EXT_QUERIES.items()
])
IN, CS, CH, HS = range(1, 5)
ANY = 255
QUERY_CLASSES = {
IN: 'IN',
CS: 'CS',
CH: 'CH',
HS: 'HS',
ANY: 'ANY'
}
REV_CLASSES = dict([
(v, k) for (k, v) in QUERY_CLASSES.items()
])
# Opcodes
OP_QUERY, OP_INVERSE, OP_STATUS = range(3)
OP_NOTIFY = 4 # RFC 1996
OP_UPDATE = 5 # RFC 2136
# Response Codes
OK, EFORMAT, ESERVER, ENAME, ENOTIMP, EREFUSED = range(6)
class IRecord(Interface):
"""
An single entry in a zone of authority.
"""
TYPE = Attribute("An indicator of what kind of record this is.")
# Backwards compatibility aliases - these should be deprecated or something I
# suppose. -exarkun
from twisted.names.error import DomainError, AuthoritativeDomainError
from twisted.names.error import DNSQueryTimeoutError
def str2time(s):
suffixes = (
('S', 1), ('M', 60), ('H', 60 * 60), ('D', 60 * 60 * 24),
('W', 60 * 60 * 24 * 7), ('Y', 60 * 60 * 24 * 365)
)
if isinstance(s, types.StringType):
s = s.upper().strip()
for (suff, mult) in suffixes:
if s.endswith(suff):
return int(float(s[:-1]) * mult)
try:
s = int(s)
except ValueError:
raise ValueError, "Invalid time interval specifier: " + s
return s
def readPrecisely(file, l):
buff = file.read(l)
if len(buff) < l:
raise EOFError
return buff
class IEncodable(Interface):
"""
Interface for something which can be encoded to and decoded
from a file object.
"""
def encode(strio, compDict = None):
"""
Write a representation of this object to the given
file object.
@type strio: File-like object
@param strio: The stream to which to write bytes
@type compDict: C{dict} or C{None}
@param compDict: A dictionary of backreference addresses that have
have already been written to this stream and that may be used for
compression.
"""
def decode(strio, length = None):
"""
Reconstruct an object from data read from the given
file object.
@type strio: File-like object
@param strio: The stream from which bytes may be read
@type length: C{int} or C{None}
@param length: The number of bytes in this RDATA field. Most
implementations can ignore this value. Only in the case of
records similar to TXT where the total length is in no way
encoded in the data is it necessary.
"""
class Charstr(object):
implements(IEncodable)
def __init__(self, string=''):
if not isinstance(string, str):
raise ValueError("%r is not a string" % (string,))
self.string = string
def encode(self, strio, compDict=None):
"""
Encode this Character string into the appropriate byte format.
@type strio: file
@param strio: The byte representation of this Charstr will be written
to this file.
"""
string = self.string
ind = len(string)
strio.write(chr(ind))
strio.write(string)
def decode(self, strio, length=None):
"""
Decode a byte string into this Name.
@type strio: file
@param strio: Bytes will be read from this file until the full string
is decoded.
@raise EOFError: Raised when there are not enough bytes available from
C{strio}.
"""
self.string = ''
l = ord(readPrecisely(strio, 1))
self.string = readPrecisely(strio, l)
def __eq__(self, other):
if isinstance(other, Charstr):
return self.string == other.string
return False
def __hash__(self):
return hash(self.string)
def __str__(self):
return self.string
class Name:
implements(IEncodable)
def __init__(self, name=''):
assert isinstance(name, types.StringTypes), "%r is not a string" % (name,)
self.name = name
def encode(self, strio, compDict=None):
"""
Encode this Name into the appropriate byte format.
@type strio: file
@param strio: The byte representation of this Name will be written to
this file.
@type compDict: dict
@param compDict: dictionary of Names that have already been encoded
and whose addresses may be backreferenced by this Name (for the purpose
of reducing the message size).
"""
name = self.name
while name:
if compDict is not None:
if name in compDict:
strio.write(
struct.pack("!H", 0xc000 | compDict[name]))
return
else:
compDict[name] = strio.tell() + Message.headerSize
ind = name.find('.')
if ind > 0:
label, name = name[:ind], name[ind + 1:]
else:
label, name = name, ''
ind = len(label)
strio.write(chr(ind))
strio.write(label)
strio.write(chr(0))
def decode(self, strio, length=None):
"""
Decode a byte string into this Name.
@type strio: file
@param strio: Bytes will be read from this file until the full Name
is decoded.
@raise EOFError: Raised when there are not enough bytes available
from C{strio}.
@raise ValueError: Raised when the name cannot be decoded (for example,
because it contains a loop).
"""
visited = set()
self.name = ''
off = 0
while 1:
l = ord(readPrecisely(strio, 1))
if l == 0:
if off > 0:
strio.seek(off)
return
if (l >> 6) == 3:
new_off = ((l&63) << 8
| ord(readPrecisely(strio, 1)))
if new_off in visited:
raise ValueError("Compression loop in encoded name")
visited.add(new_off)
if off == 0:
off = strio.tell()
strio.seek(new_off)
continue
label = readPrecisely(strio, l)
if self.name == '':
self.name = label
else:
self.name = self.name + '.' + label
def __eq__(self, other):
if isinstance(other, Name):
return str(self) == str(other)
return 0
def __hash__(self):
return hash(str(self))
def __str__(self):
return self.name
class Query:
"""
Represent a single DNS query.
@ivar name: The name about which this query is requesting information.
@ivar type: The query type.
@ivar cls: The query class.
"""
implements(IEncodable)
name = None
type = None
cls = None
def __init__(self, name='', type=A, cls=IN):
"""
@type name: C{str}
@param name: The name about which to request information.
@type type: C{int}
@param type: The query type.
@type cls: C{int}
@param cls: The query class.
"""
self.name = Name(name)
self.type = type
self.cls = cls
def encode(self, strio, compDict=None):
self.name.encode(strio, compDict)
strio.write(struct.pack("!HH", self.type, self.cls))
def decode(self, strio, length = None):
self.name.decode(strio)
buff = readPrecisely(strio, 4)
self.type, self.cls = struct.unpack("!HH", buff)
def __hash__(self):
return hash((str(self.name).lower(), self.type, self.cls))
def __cmp__(self, other):
return isinstance(other, Query) and cmp(
(str(self.name).lower(), self.type, self.cls),
(str(other.name).lower(), other.type, other.cls)
) or cmp(self.__class__, other.__class__)
def __str__(self):
t = QUERY_TYPES.get(self.type, EXT_QUERIES.get(self.type, 'UNKNOWN (%d)' % self.type))
c = QUERY_CLASSES.get(self.cls, 'UNKNOWN (%d)' % self.cls)
return '<Query %s %s %s>' % (self.name, t, c)
def __repr__(self):
return 'Query(%r, %r, %r)' % (str(self.name), self.type, self.cls)
class RRHeader(tputil.FancyEqMixin):
"""
A resource record header.
@cvar fmt: C{str} specifying the byte format of an RR.
@ivar name: The name about which this reply contains information.
@ivar type: The query type of the original request.
@ivar cls: The query class of the original request.
@ivar ttl: The time-to-live for this record.
@ivar payload: An object that implements the IEncodable interface
@ivar auth: A C{bool} indicating whether this C{RRHeader} was parsed from an
authoritative message.
"""
implements(IEncodable)
compareAttributes = ('name', 'type', 'cls', 'ttl', 'payload', 'auth')
fmt = "!HHIH"
name = None
type = None
cls = None
ttl = None
payload = None
rdlength = None
cachedResponse = None
def __init__(self, name='', type=A, cls=IN, ttl=0, payload=None, auth=False):
"""
@type name: C{str}
@param name: The name about which this reply contains information.
@type type: C{int}
@param type: The query type.
@type cls: C{int}
@param cls: The query class.
@type ttl: C{int}
@param ttl: Time to live for this record.
@type payload: An object implementing C{IEncodable}
@param payload: A Query Type specific data object.
@raises ValueError: if the ttl is negative.
"""
assert (payload is None) or isinstance(payload, UnknownRecord) or (payload.TYPE == type)
if ttl < 0:
raise ValueError("TTL cannot be negative")
self.name = Name(name)
self.type = type
self.cls = cls
self.ttl = ttl
self.payload = payload
self.auth = auth
def encode(self, strio, compDict=None):
self.name.encode(strio, compDict)
strio.write(struct.pack(self.fmt, self.type, self.cls, self.ttl, 0))
if self.payload:
prefix = strio.tell()
self.payload.encode(strio, compDict)
aft = strio.tell()
strio.seek(prefix - 2, 0)
strio.write(struct.pack('!H', aft - prefix))
strio.seek(aft, 0)
def decode(self, strio, length = None):
self.name.decode(strio)
l = struct.calcsize(self.fmt)
buff = readPrecisely(strio, l)
r = struct.unpack(self.fmt, buff)
self.type, self.cls, self.ttl, self.rdlength = r
def isAuthoritative(self):
return self.auth
def __str__(self):
t = QUERY_TYPES.get(self.type, EXT_QUERIES.get(self.type, 'UNKNOWN (%d)' % self.type))
c = QUERY_CLASSES.get(self.cls, 'UNKNOWN (%d)' % self.cls)
return '<RR name=%s type=%s class=%s ttl=%ds auth=%s>' % (self.name, t, c, self.ttl, self.auth and 'True' or 'False')
__repr__ = __str__
class SimpleRecord(tputil.FancyStrMixin, tputil.FancyEqMixin):
"""
A Resource Record which consists of a single RFC 1035 domain-name.
@type name: L{Name}
@ivar name: The name associated with this record.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
"""
implements(IEncodable, IRecord)
showAttributes = (('name', 'name', '%s'), 'ttl')
compareAttributes = ('name', 'ttl')
TYPE = None
name = None
def __init__(self, name='', ttl=None):
self.name = Name(name)
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
self.name.encode(strio, compDict)
def decode(self, strio, length = None):
self.name = Name()
self.name.decode(strio)
def __hash__(self):
return hash(self.name)
# Kinds of RRs - oh my!
class Record_NS(SimpleRecord):
"""
An authoritative nameserver.
"""
TYPE = NS
fancybasename = 'NS'
class Record_MD(SimpleRecord):
"""
A mail destination.
This record type is obsolete.
@see: L{Record_MX}
"""
TYPE = MD
fancybasename = 'MD'
class Record_MF(SimpleRecord):
"""
A mail forwarder.
This record type is obsolete.
@see: L{Record_MX}
"""
TYPE = MF
fancybasename = 'MF'
class Record_CNAME(SimpleRecord):
"""
The canonical name for an alias.
"""
TYPE = CNAME
fancybasename = 'CNAME'
class Record_MB(SimpleRecord):
"""
A mailbox domain name.
This is an experimental record type.
"""
TYPE = MB
fancybasename = 'MB'
class Record_MG(SimpleRecord):
"""
A mail group member.
This is an experimental record type.
"""
TYPE = MG
fancybasename = 'MG'
class Record_MR(SimpleRecord):
"""
A mail rename domain name.
This is an experimental record type.
"""
TYPE = MR
fancybasename = 'MR'
class Record_PTR(SimpleRecord):
"""
A domain name pointer.
"""
TYPE = PTR
fancybasename = 'PTR'
class Record_DNAME(SimpleRecord):
"""
A non-terminal DNS name redirection.
This record type provides the capability to map an entire subtree of the
DNS name space to another domain. It differs from the CNAME record which
maps a single node of the name space.
@see: U{http://www.faqs.org/rfcs/rfc2672.html}
@see: U{http://www.faqs.org/rfcs/rfc3363.html}
"""
TYPE = DNAME
fancybasename = 'DNAME'
class Record_A(tputil.FancyEqMixin):
"""
An IPv4 host address.
@type address: C{str}
@ivar address: The packed network-order representation of the IPv4 address
associated with this record.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
"""
implements(IEncodable, IRecord)
compareAttributes = ('address', 'ttl')
TYPE = A
address = None
def __init__(self, address='0.0.0.0', ttl=None):
address = socket.inet_aton(address)
self.address = address
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(self.address)
def decode(self, strio, length = None):
self.address = readPrecisely(strio, 4)
def __hash__(self):
return hash(self.address)
def __str__(self):
return '<A address=%s ttl=%s>' % (self.dottedQuad(), self.ttl)
__repr__ = __str__
def dottedQuad(self):
return socket.inet_ntoa(self.address)
class Record_SOA(tputil.FancyEqMixin, tputil.FancyStrMixin):
"""
Marks the start of a zone of authority.
This record describes parameters which are shared by all records within a
particular zone.
@type mname: L{Name}
@ivar mname: The domain-name of the name server that was the original or
primary source of data for this zone.
@type rname: L{Name}
@ivar rname: A domain-name which specifies the mailbox of the person
responsible for this zone.
@type serial: C{int}
@ivar serial: The unsigned 32 bit version number of the original copy of
the zone. Zone transfers preserve this value. This value wraps and
should be compared using sequence space arithmetic.
@type refresh: C{int}
@ivar refresh: A 32 bit time interval before the zone should be refreshed.
@type minimum: C{int}
@ivar minimum: The unsigned 32 bit minimum TTL field that should be
exported with any RR from this zone.
@type expire: C{int}
@ivar expire: A 32 bit time value that specifies the upper limit on the
time interval that can elapse before the zone is no longer
authoritative.
@type retry: C{int}
@ivar retry: A 32 bit time interval that should elapse before a failed
refresh should be retried.
@type ttl: C{int}
@ivar ttl: The default TTL to use for records served from this zone.
"""
implements(IEncodable, IRecord)
fancybasename = 'SOA'
compareAttributes = ('serial', 'mname', 'rname', 'refresh', 'expire', 'retry', 'minimum', 'ttl')
showAttributes = (('mname', 'mname', '%s'), ('rname', 'rname', '%s'), 'serial', 'refresh', 'retry', 'expire', 'minimum', 'ttl')
TYPE = SOA
def __init__(self, mname='', rname='', serial=0, refresh=0, retry=0, expire=0, minimum=0, ttl=None):
self.mname, self.rname = Name(mname), Name(rname)
self.serial, self.refresh = str2time(serial), str2time(refresh)
self.minimum, self.expire = str2time(minimum), str2time(expire)
self.retry = str2time(retry)
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
self.mname.encode(strio, compDict)
self.rname.encode(strio, compDict)
strio.write(
struct.pack(
'!LlllL',
self.serial, self.refresh, self.retry, self.expire,
self.minimum
)
)
def decode(self, strio, length = None):
self.mname, self.rname = Name(), Name()
self.mname.decode(strio)
self.rname.decode(strio)
r = struct.unpack('!LlllL', readPrecisely(strio, 20))
self.serial, self.refresh, self.retry, self.expire, self.minimum = r
def __hash__(self):
return hash((
self.serial, self.mname, self.rname,
self.refresh, self.expire, self.retry
))
class Record_NULL(tputil.FancyStrMixin, tputil.FancyEqMixin):
"""
A null record.
This is an experimental record type.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
"""
implements(IEncodable, IRecord)
fancybasename = 'NULL'
showAttributes = compareAttributes = ('payload', 'ttl')
TYPE = NULL
def __init__(self, payload=None, ttl=None):
self.payload = payload
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(self.payload)
def decode(self, strio, length = None):
self.payload = readPrecisely(strio, length)
def __hash__(self):
return hash(self.payload)
class Record_WKS(tputil.FancyEqMixin, tputil.FancyStrMixin):
"""
A well known service description.
This record type is obsolete. See L{Record_SRV}.
@type address: C{str}
@ivar address: The packed network-order representation of the IPv4 address
associated with this record.
@type protocol: C{int}
@ivar protocol: The 8 bit IP protocol number for which this service map is
relevant.
@type map: C{str}
@ivar map: A bitvector indicating the services available at the specified
address.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
"""
implements(IEncodable, IRecord)
fancybasename = "WKS"
compareAttributes = ('address', 'protocol', 'map', 'ttl')
showAttributes = [('_address', 'address', '%s'), 'protocol', 'ttl']
TYPE = WKS
_address = property(lambda self: socket.inet_ntoa(self.address))
def __init__(self, address='0.0.0.0', protocol=0, map='', ttl=None):
self.address = socket.inet_aton(address)
self.protocol, self.map = protocol, map
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(self.address)
strio.write(struct.pack('!B', self.protocol))
strio.write(self.map)
def decode(self, strio, length = None):
self.address = readPrecisely(strio, 4)
self.protocol = struct.unpack('!B', readPrecisely(strio, 1))[0]
self.map = readPrecisely(strio, length - 5)
def __hash__(self):
return hash((self.address, self.protocol, self.map))
class Record_AAAA(tputil.FancyEqMixin, tputil.FancyStrMixin):
"""
An IPv6 host address.
@type address: C{str}
@ivar address: The packed network-order representation of the IPv6 address
associated with this record.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
@see: U{http://www.faqs.org/rfcs/rfc1886.html}
"""
implements(IEncodable, IRecord)
TYPE = AAAA
fancybasename = 'AAAA'
showAttributes = (('_address', 'address', '%s'), 'ttl')
compareAttributes = ('address', 'ttl')
_address = property(lambda self: socket.inet_ntop(AF_INET6, self.address))
def __init__(self, address = '::', ttl=None):
self.address = socket.inet_pton(AF_INET6, address)
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(self.address)
def decode(self, strio, length = None):
self.address = readPrecisely(strio, 16)
def __hash__(self):
return hash(self.address)
class Record_A6(tputil.FancyStrMixin, tputil.FancyEqMixin):
"""
An IPv6 address.
This is an experimental record type.
@type prefixLen: C{int}
@ivar prefixLen: The length of the suffix.
@type suffix: C{str}
@ivar suffix: An IPv6 address suffix in network order.
@type prefix: L{Name}
@ivar prefix: If specified, a name which will be used as a prefix for other
A6 records.
@type bytes: C{int}
@ivar bytes: The length of the prefix.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
@see: U{http://www.faqs.org/rfcs/rfc2874.html}
@see: U{http://www.faqs.org/rfcs/rfc3363.html}
@see: U{http://www.faqs.org/rfcs/rfc3364.html}
"""
implements(IEncodable, IRecord)
TYPE = A6
fancybasename = 'A6'
showAttributes = (('_suffix', 'suffix', '%s'), ('prefix', 'prefix', '%s'), 'ttl')
compareAttributes = ('prefixLen', 'prefix', 'suffix', 'ttl')
_suffix = property(lambda self: socket.inet_ntop(AF_INET6, self.suffix))
def __init__(self, prefixLen=0, suffix='::', prefix='', ttl=None):
self.prefixLen = prefixLen
self.suffix = socket.inet_pton(AF_INET6, suffix)
self.prefix = Name(prefix)
self.bytes = int((128 - self.prefixLen) / 8.0)
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(struct.pack('!B', self.prefixLen))
if self.bytes:
strio.write(self.suffix[-self.bytes:])
if self.prefixLen:
# This may not be compressed
self.prefix.encode(strio, None)
def decode(self, strio, length = None):
self.prefixLen = struct.unpack('!B', readPrecisely(strio, 1))[0]
self.bytes = int((128 - self.prefixLen) / 8.0)
if self.bytes:
self.suffix = '\x00' * (16 - self.bytes) + readPrecisely(strio, self.bytes)
if self.prefixLen:
self.prefix.decode(strio)
def __eq__(self, other):
if isinstance(other, Record_A6):
return (self.prefixLen == other.prefixLen and
self.suffix[-self.bytes:] == other.suffix[-self.bytes:] and
self.prefix == other.prefix and
self.ttl == other.ttl)
return NotImplemented
def __hash__(self):
return hash((self.prefixLen, self.suffix[-self.bytes:], self.prefix))
def __str__(self):
return '<A6 %s %s (%d) ttl=%s>' % (
self.prefix,
socket.inet_ntop(AF_INET6, self.suffix),
self.prefixLen, self.ttl
)
class Record_SRV(tputil.FancyEqMixin, tputil.FancyStrMixin):
"""
The location of the server(s) for a specific protocol and domain.
This is an experimental record type.
@type priority: C{int}
@ivar priority: The priority of this target host. A client MUST attempt to
contact the target host with the lowest-numbered priority it can reach;
target hosts with the same priority SHOULD be tried in an order defined
by the weight field.
@type weight: C{int}
@ivar weight: Specifies a relative weight for entries with the same
priority. Larger weights SHOULD be given a proportionately higher
probability of being selected.
@type port: C{int}
@ivar port: The port on this target host of this service.
@type target: L{Name}
@ivar target: The domain name of the target host. There MUST be one or
more address records for this name, the name MUST NOT be an alias (in
the sense of RFC 1034 or RFC 2181). Implementors are urged, but not
required, to return the address record(s) in the Additional Data
section. Unless and until permitted by future standards action, name
compression is not to be used for this field.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
@see: U{http://www.faqs.org/rfcs/rfc2782.html}
"""
implements(IEncodable, IRecord)
TYPE = SRV
fancybasename = 'SRV'
compareAttributes = ('priority', 'weight', 'target', 'port', 'ttl')
showAttributes = ('priority', 'weight', ('target', 'target', '%s'), 'port', 'ttl')
def __init__(self, priority=0, weight=0, port=0, target='', ttl=None):
self.priority = int(priority)
self.weight = int(weight)
self.port = int(port)
self.target = Name(target)
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(struct.pack('!HHH', self.priority, self.weight, self.port))
# This can't be compressed
self.target.encode(strio, None)
def decode(self, strio, length = None):
r = struct.unpack('!HHH', readPrecisely(strio, struct.calcsize('!HHH')))
self.priority, self.weight, self.port = r
self.target = Name()
self.target.decode(strio)
def __hash__(self):
return hash((self.priority, self.weight, self.port, self.target))
class Record_NAPTR(tputil.FancyEqMixin, tputil.FancyStrMixin):
"""
The location of the server(s) for a specific protocol and domain.
@type order: C{int}
@ivar order: An integer specifying the order in which the NAPTR records
MUST be processed to ensure the correct ordering of rules. Low numbers
are processed before high numbers.
@type preference: C{int}
@ivar preference: An integer that specifies the order in which NAPTR
records with equal "order" values SHOULD be processed, low numbers
being processed before high numbers.
@type flag: L{Charstr}
@ivar flag: A <character-string> containing flags to control aspects of the
rewriting and interpretation of the fields in the record. Flags
aresingle characters from the set [A-Z0-9]. The case of the alphabetic
characters is not significant.
At this time only four flags, "S", "A", "U", and "P", are defined.
@type service: L{Charstr}
@ivar service: Specifies the service(s) available down this rewrite path.
It may also specify the particular protocol that is used to talk with a
service. A protocol MUST be specified if the flags field states that
the NAPTR is terminal.
@type regexp: L{Charstr}
@ivar regexp: A STRING containing a substitution expression that is applied
to the original string held by the client in order to construct the
next domain name to lookup.
@type replacement: L{Name}
@ivar replacement: The next NAME to query for NAPTR, SRV, or address
records depending on the value of the flags field. This MUST be a
fully qualified domain-name.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
@see: U{http://www.faqs.org/rfcs/rfc2915.html}
"""
implements(IEncodable, IRecord)
TYPE = NAPTR
compareAttributes = ('order', 'preference', 'flags', 'service', 'regexp',
'replacement')
fancybasename = 'NAPTR'
showAttributes = ('order', 'preference', ('flags', 'flags', '%s'),
('service', 'service', '%s'), ('regexp', 'regexp', '%s'),
('replacement', 'replacement', '%s'), 'ttl')
def __init__(self, order=0, preference=0, flags='', service='', regexp='',
replacement='', ttl=None):
self.order = int(order)
self.preference = int(preference)
self.flags = Charstr(flags)
self.service = Charstr(service)
self.regexp = Charstr(regexp)
self.replacement = Name(replacement)
self.ttl = str2time(ttl)
def encode(self, strio, compDict=None):
strio.write(struct.pack('!HH', self.order, self.preference))
# This can't be compressed
self.flags.encode(strio, None)
self.service.encode(strio, None)
self.regexp.encode(strio, None)
self.replacement.encode(strio, None)
def decode(self, strio, length=None):
r = struct.unpack('!HH', readPrecisely(strio, struct.calcsize('!HH')))
self.order, self.preference = r
self.flags = Charstr()
self.service = Charstr()
self.regexp = Charstr()
self.replacement = Name()
self.flags.decode(strio)
self.service.decode(strio)
self.regexp.decode(strio)
self.replacement.decode(strio)
def __hash__(self):
return hash((
self.order, self.preference, self.flags,
self.service, self.regexp, self.replacement))
class Record_AFSDB(tputil.FancyStrMixin, tputil.FancyEqMixin):
"""
Map from a domain name to the name of an AFS cell database server.
@type subtype: C{int}
@ivar subtype: In the case of subtype 1, the host has an AFS version 3.0
Volume Location Server for the named AFS cell. In the case of subtype
2, the host has an authenticated name server holding the cell-root
directory node for the named DCE/NCA cell.
@type hostname: L{Name}
@ivar hostname: The domain name of a host that has a server for the cell
named by this record.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
@see: U{http://www.faqs.org/rfcs/rfc1183.html}
"""
implements(IEncodable, IRecord)
TYPE = AFSDB
fancybasename = 'AFSDB'
compareAttributes = ('subtype', 'hostname', 'ttl')
showAttributes = ('subtype', ('hostname', 'hostname', '%s'), 'ttl')
def __init__(self, subtype=0, hostname='', ttl=None):
self.subtype = int(subtype)
self.hostname = Name(hostname)
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(struct.pack('!H', self.subtype))
self.hostname.encode(strio, compDict)
def decode(self, strio, length = None):
r = struct.unpack('!H', readPrecisely(strio, struct.calcsize('!H')))
self.subtype, = r
self.hostname.decode(strio)
def __hash__(self):
return hash((self.subtype, self.hostname))
class Record_RP(tputil.FancyEqMixin, tputil.FancyStrMixin):
"""
The responsible person for a domain.
@type mbox: L{Name}
@ivar mbox: A domain name that specifies the mailbox for the responsible
person.
@type txt: L{Name}
@ivar txt: A domain name for which TXT RR's exist (indirection through
which allows information sharing about the contents of this RP record).
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
@see: U{http://www.faqs.org/rfcs/rfc1183.html}
"""
implements(IEncodable, IRecord)
TYPE = RP
fancybasename = 'RP'
compareAttributes = ('mbox', 'txt', 'ttl')
showAttributes = (('mbox', 'mbox', '%s'), ('txt', 'txt', '%s'), 'ttl')
def __init__(self, mbox='', txt='', ttl=None):
self.mbox = Name(mbox)
self.txt = Name(txt)
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
self.mbox.encode(strio, compDict)
self.txt.encode(strio, compDict)
def decode(self, strio, length = None):
self.mbox = Name()
self.txt = Name()
self.mbox.decode(strio)
self.txt.decode(strio)
def __hash__(self):
return hash((self.mbox, self.txt))
class Record_HINFO(tputil.FancyStrMixin, tputil.FancyEqMixin):
"""
Host information.
@type cpu: C{str}
@ivar cpu: Specifies the CPU type.
@type os: C{str}
@ivar os: Specifies the OS.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
"""
implements(IEncodable, IRecord)
TYPE = HINFO
fancybasename = 'HINFO'
showAttributes = compareAttributes = ('cpu', 'os', 'ttl')
def __init__(self, cpu='', os='', ttl=None):
self.cpu, self.os = cpu, os
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(struct.pack('!B', len(self.cpu)) + self.cpu)
strio.write(struct.pack('!B', len(self.os)) + self.os)
def decode(self, strio, length = None):
cpu = struct.unpack('!B', readPrecisely(strio, 1))[0]
self.cpu = readPrecisely(strio, cpu)
os = struct.unpack('!B', readPrecisely(strio, 1))[0]
self.os = readPrecisely(strio, os)
def __eq__(self, other):
if isinstance(other, Record_HINFO):
return (self.os.lower() == other.os.lower() and
self.cpu.lower() == other.cpu.lower() and
self.ttl == other.ttl)
return NotImplemented
def __hash__(self):
return hash((self.os.lower(), self.cpu.lower()))
class Record_MINFO(tputil.FancyEqMixin, tputil.FancyStrMixin):
"""
Mailbox or mail list information.
This is an experimental record type.
@type rmailbx: L{Name}
@ivar rmailbx: A domain-name which specifies a mailbox which is responsible
for the mailing list or mailbox. If this domain name names the root,
the owner of the MINFO RR is responsible for itself.
@type emailbx: L{Name}
@ivar emailbx: A domain-name which specifies a mailbox which is to receive
error messages related to the mailing list or mailbox specified by the
owner of the MINFO record. If this domain name names the root, errors
should be returned to the sender of the message.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
"""
implements(IEncodable, IRecord)
TYPE = MINFO
rmailbx = None
emailbx = None
fancybasename = 'MINFO'
compareAttributes = ('rmailbx', 'emailbx', 'ttl')
showAttributes = (('rmailbx', 'responsibility', '%s'),
('emailbx', 'errors', '%s'),
'ttl')
def __init__(self, rmailbx='', emailbx='', ttl=None):
self.rmailbx, self.emailbx = Name(rmailbx), Name(emailbx)
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
self.rmailbx.encode(strio, compDict)
self.emailbx.encode(strio, compDict)
def decode(self, strio, length = None):
self.rmailbx, self.emailbx = Name(), Name()
self.rmailbx.decode(strio)
self.emailbx.decode(strio)
def __hash__(self):
return hash((self.rmailbx, self.emailbx))
class Record_MX(tputil.FancyStrMixin, tputil.FancyEqMixin):
"""
Mail exchange.
@type preference: C{int}
@ivar preference: Specifies the preference given to this RR among others at
the same owner. Lower values are preferred.
@type name: L{Name}
@ivar name: A domain-name which specifies a host willing to act as a mail
exchange.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be
cached.
"""
implements(IEncodable, IRecord)
TYPE = MX
fancybasename = 'MX'
compareAttributes = ('preference', 'name', 'ttl')
showAttributes = ('preference', ('name', 'name', '%s'), 'ttl')
def __init__(self, preference=0, name='', ttl=None, **kwargs):
self.preference, self.name = int(preference), Name(kwargs.get('exchange', name))
self.ttl = str2time(ttl)
def encode(self, strio, compDict = None):
strio.write(struct.pack('!H', self.preference))
self.name.encode(strio, compDict)
def decode(self, strio, length = None):
self.preference = struct.unpack('!H', readPrecisely(strio, 2))[0]
self.name = Name()
self.name.decode(strio)
def __hash__(self):
return hash((self.preference, self.name))
# Oh god, Record_TXT how I hate thee.
class Record_TXT(tputil.FancyEqMixin, tputil.FancyStrMixin):
"""
Freeform text.
@type data: C{list} of C{str}
@ivar data: Freeform text which makes up this record.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be cached.
"""
implements(IEncodable, IRecord)
TYPE = TXT
fancybasename = 'TXT'
showAttributes = compareAttributes = ('data', 'ttl')
def __init__(self, *data, **kw):
self.data = list(data)
# arg man python sucks so bad
self.ttl = str2time(kw.get('ttl', None))
def encode(self, strio, compDict = None):
for d in self.data:
strio.write(struct.pack('!B', len(d)) + d)
def decode(self, strio, length = None):
soFar = 0
self.data = []
while soFar < length:
L = struct.unpack('!B', readPrecisely(strio, 1))[0]
self.data.append(readPrecisely(strio, L))
soFar += L + 1
if soFar != length:
log.msg(
"Decoded %d bytes in %s record, but rdlength is %d" % (
soFar, self.fancybasename, length
)
)
def __hash__(self):
return hash(tuple(self.data))
# This is a fallback record
class UnknownRecord(tputil.FancyEqMixin, tputil.FancyStrMixin, object):
"""
Encapsulate the wire data for unkown record types so that they can
pass through the system unchanged.
@type data: C{str}
@ivar data: Wire data which makes up this record.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be cached.
@since: 11.1
"""
implements(IEncodable, IRecord)
fancybasename = 'UNKNOWN'
compareAttributes = ('data', 'ttl')
showAttributes = ('data', 'ttl')
def __init__(self, data='', ttl=None):
self.data = data
self.ttl = str2time(ttl)
def encode(self, strio, compDict=None):
"""
Write the raw bytes corresponding to this record's payload to the
stream.
"""
strio.write(self.data)
def decode(self, strio, length=None):
"""
Load the bytes which are part of this record from the stream and store
them unparsed and unmodified.
"""
if length is None:
raise Exception('must know length for unknown record types')
self.data = readPrecisely(strio, length)
def __hash__(self):
return hash((self.data, self.ttl))
class Record_SPF(Record_TXT):
"""
Structurally, freeform text. Semantically, a policy definition, formatted
as defined in U{rfc 4408<http://www.faqs.org/rfcs/rfc4408.html>}.
@type data: C{list} of C{str}
@ivar data: Freeform text which makes up this record.
@type ttl: C{int}
@ivar ttl: The maximum number of seconds which this record should be cached.
"""
TYPE = SPF
fancybasename = 'SPF'
class Message:
"""
L{Message} contains all the information represented by a single
DNS request or response.
"""
headerFmt = "!H2B4H"
headerSize = struct.calcsize(headerFmt)
# Question, answer, additional, and nameserver lists
queries = answers = add = ns = None
def __init__(self, id=0, answer=0, opCode=0, recDes=0, recAv=0,
auth=0, rCode=OK, trunc=0, maxSize=512):
self.maxSize = maxSize
self.id = id
self.answer = answer
self.opCode = opCode
self.auth = auth
self.trunc = trunc
self.recDes = recDes
self.recAv = recAv
self.rCode = rCode
self.queries = []
self.answers = []
self.authority = []
self.additional = []
def addQuery(self, name, type=ALL_RECORDS, cls=IN):
"""
Add another query to this Message.
@type name: C{str}
@param name: The name to query.
@type type: C{int}
@param type: Query type
@type cls: C{int}
@param cls: Query class
"""
self.queries.append(Query(name, type, cls))
def encode(self, strio):
compDict = {}
body_tmp = StringIO.StringIO()
for q in self.queries:
q.encode(body_tmp, compDict)
for q in self.answers:
q.encode(body_tmp, compDict)
for q in self.authority:
q.encode(body_tmp, compDict)
for q in self.additional:
q.encode(body_tmp, compDict)
body = body_tmp.getvalue()
size = len(body) + self.headerSize
if self.maxSize and size > self.maxSize:
self.trunc = 1
body = body[:self.maxSize - self.headerSize]
byte3 = (( ( self.answer & 1 ) << 7 )
| ((self.opCode & 0xf ) << 3 )
| ((self.auth & 1 ) << 2 )
| ((self.trunc & 1 ) << 1 )
| ( self.recDes & 1 ) )
byte4 = ( ( (self.recAv & 1 ) << 7 )
| (self.rCode & 0xf ) )
strio.write(struct.pack(self.headerFmt, self.id, byte3, byte4,
len(self.queries), len(self.answers),
len(self.authority), len(self.additional)))
strio.write(body)
def decode(self, strio, length=None):
self.maxSize = 0
header = readPrecisely(strio, self.headerSize)
r = struct.unpack(self.headerFmt, header)
self.id, byte3, byte4, nqueries, nans, nns, nadd = r
self.answer = ( byte3 >> 7 ) & 1
self.opCode = ( byte3 >> 3 ) & 0xf
self.auth = ( byte3 >> 2 ) & 1
self.trunc = ( byte3 >> 1 ) & 1
self.recDes = byte3 & 1
self.recAv = ( byte4 >> 7 ) & 1
self.rCode = byte4 & 0xf
self.queries = []
for i in range(nqueries):
q = Query()
try:
q.decode(strio)
except EOFError:
return
self.queries.append(q)
items = ((self.answers, nans), (self.authority, nns), (self.additional, nadd))
for (l, n) in items:
self.parseRecords(l, n, strio)
def parseRecords(self, list, num, strio):
for i in range(num):
header = RRHeader(auth=self.auth)
try:
header.decode(strio)
except EOFError:
return
t = self.lookupRecordType(header.type)
if not t:
continue
header.payload = t(ttl=header.ttl)
try:
header.payload.decode(strio, header.rdlength)
except EOFError:
return
list.append(header)
# Create a mapping from record types to their corresponding Record_*
# classes. This relies on the global state which has been created so
# far in initializing this module (so don't define Record classes after
# this).
_recordTypes = {}
for name in globals():
if name.startswith('Record_'):
_recordTypes[globals()[name].TYPE] = globals()[name]
# Clear the iteration variable out of the class namespace so it
# doesn't become an attribute.
del name
def lookupRecordType(self, type):
"""
Retrieve the L{IRecord} implementation for the given record type.
@param type: A record type, such as L{A} or L{NS}.
@type type: C{int}
@return: An object which implements L{IRecord} or C{None} if none
can be found for the given type.
@rtype: L{types.ClassType}
"""
return self._recordTypes.get(type, UnknownRecord)
def toStr(self):
strio = StringIO.StringIO()
self.encode(strio)
return strio.getvalue()
def fromStr(self, str):
strio = StringIO.StringIO(str)
self.decode(strio)
class DNSMixin(object):
"""
DNS protocol mixin shared by UDP and TCP implementations.
@ivar _reactor: A L{IReactorTime} and L{IReactorUDP} provider which will
be used to issue DNS queries and manage request timeouts.
"""
id = None
liveMessages = None
def __init__(self, controller, reactor=None):
self.controller = controller
self.id = random.randrange(2 ** 10, 2 ** 15)
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
def pickID(self):
"""
Return a unique ID for queries.
"""
while True:
id = randomSource()
if id not in self.liveMessages:
return id
def callLater(self, period, func, *args):
"""
Wrapper around reactor.callLater, mainly for test purpose.
"""
return self._reactor.callLater(period, func, *args)
def _query(self, queries, timeout, id, writeMessage):
"""
Send out a message with the given queries.
@type queries: C{list} of C{Query} instances
@param queries: The queries to transmit
@type timeout: C{int} or C{float}
@param timeout: How long to wait before giving up
@type id: C{int}
@param id: Unique key for this request
@type writeMessage: C{callable}
@param writeMessage: One-parameter callback which writes the message
@rtype: C{Deferred}
@return: a C{Deferred} which will be fired with the result of the
query, or errbacked with any errors that could happen (exceptions
during writing of the query, timeout errors, ...).
"""
m = Message(id, recDes=1)
m.queries = queries
try:
writeMessage(m)
except:
return defer.fail()
resultDeferred = defer.Deferred()
cancelCall = self.callLater(timeout, self._clearFailed, resultDeferred, id)
self.liveMessages[id] = (resultDeferred, cancelCall)
return resultDeferred
def _clearFailed(self, deferred, id):
"""
Clean the Deferred after a timeout.
"""
try:
del self.liveMessages[id]
except KeyError:
pass
deferred.errback(failure.Failure(DNSQueryTimeoutError(id)))
class DNSDatagramProtocol(DNSMixin, protocol.DatagramProtocol):
"""
DNS protocol over UDP.
"""
resends = None
def stopProtocol(self):
"""
Stop protocol: reset state variables.
"""
self.liveMessages = {}
self.resends = {}
self.transport = None
def startProtocol(self):
"""
Upon start, reset internal state.
"""
self.liveMessages = {}
self.resends = {}
def writeMessage(self, message, address):
"""
Send a message holding DNS queries.
@type message: L{Message}
"""
self.transport.write(message.toStr(), address)
def startListening(self):
self._reactor.listenUDP(0, self, maxPacketSize=512)
def datagramReceived(self, data, addr):
"""
Read a datagram, extract the message in it and trigger the associated
Deferred.
"""
m = Message()
try:
m.fromStr(data)
except EOFError:
log.msg("Truncated packet (%d bytes) from %s" % (len(data), addr))
return
except:
# Nothing should trigger this, but since we're potentially
# invoking a lot of different decoding methods, we might as well
# be extra cautious. Anything that triggers this is itself
# buggy.
log.err(failure.Failure(), "Unexpected decoding error")
return
if m.id in self.liveMessages:
d, canceller = self.liveMessages[m.id]
del self.liveMessages[m.id]
canceller.cancel()
# XXX we shouldn't need this hack of catching exception on callback()
try:
d.callback(m)
except:
log.err()
else:
if m.id not in self.resends:
self.controller.messageReceived(m, self, addr)
def removeResend(self, id):
"""
Mark message ID as no longer having duplication suppression.
"""
try:
del self.resends[id]
except KeyError:
pass
def query(self, address, queries, timeout=10, id=None):
"""
Send out a message with the given queries.
@type address: C{tuple} of C{str} and C{int}
@param address: The address to which to send the query
@type queries: C{list} of C{Query} instances
@param queries: The queries to transmit
@rtype: C{Deferred}
"""
if not self.transport:
# XXX transport might not get created automatically, use callLater?
try:
self.startListening()
except CannotListenError:
return defer.fail()
if id is None:
id = self.pickID()
else:
self.resends[id] = 1
def writeMessage(m):
self.writeMessage(m, address)
return self._query(queries, timeout, id, writeMessage)
class DNSProtocol(DNSMixin, protocol.Protocol):
"""
DNS protocol over TCP.
"""
length = None
buffer = ''
def writeMessage(self, message):
"""
Send a message holding DNS queries.
@type message: L{Message}
"""
s = message.toStr()
self.transport.write(struct.pack('!H', len(s)) + s)
def connectionMade(self):
"""
Connection is made: reset internal state, and notify the controller.
"""
self.liveMessages = {}
self.controller.connectionMade(self)
def connectionLost(self, reason):
"""
Notify the controller that this protocol is no longer
connected.
"""
self.controller.connectionLost(self)
def dataReceived(self, data):
self.buffer += data
while self.buffer:
if self.length is None and len(self.buffer) >= 2:
self.length = struct.unpack('!H', self.buffer[:2])[0]
self.buffer = self.buffer[2:]
if len(self.buffer) >= self.length:
myChunk = self.buffer[:self.length]
m = Message()
m.fromStr(myChunk)
try:
d, canceller = self.liveMessages[m.id]
except KeyError:
self.controller.messageReceived(m, self)
else:
del self.liveMessages[m.id]
canceller.cancel()
# XXX we shouldn't need this hack
try:
d.callback(m)
except:
log.err()
self.buffer = self.buffer[self.length:]
self.length = None
else:
break
def query(self, queries, timeout=60):
"""
Send out a message with the given queries.
@type queries: C{list} of C{Query} instances
@param queries: The queries to transmit
@rtype: C{Deferred}
"""
id = self.pickID()
return self._query(queries, timeout, id, self.writeMessage)
| mit |
frappe/frappe | frappe/commands/scheduler.py | 1 | 5626 | import click
import sys
import frappe
from frappe.utils import cint
from frappe.commands import pass_context, get_site
from frappe.exceptions import SiteNotSpecifiedError
def _is_scheduler_enabled():
enable_scheduler = False
try:
frappe.connect()
enable_scheduler = cint(frappe.db.get_single_value("System Settings", "enable_scheduler")) and True or False
except:
pass
finally:
frappe.db.close()
return enable_scheduler
@click.command("trigger-scheduler-event", help="Trigger a scheduler event")
@click.argument("event")
@pass_context
def trigger_scheduler_event(context, event):
import frappe.utils.scheduler
exit_code = 0
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
try:
frappe.get_doc("Scheduled Job Type", {"method": event}).execute()
except frappe.DoesNotExistError:
click.secho(f"Event {event} does not exist!", fg="red")
exit_code = 1
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
sys.exit(exit_code)
@click.command('enable-scheduler')
@pass_context
def enable_scheduler(context):
"Enable scheduler"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.enable_scheduler()
frappe.db.commit()
print("Enabled for", site)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('disable-scheduler')
@pass_context
def disable_scheduler(context):
"Disable scheduler"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.disable_scheduler()
frappe.db.commit()
print("Disabled for", site)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('scheduler')
@click.option('--site', help='site name')
@click.argument('state', type=click.Choice(['pause', 'resume', 'disable', 'enable']))
@pass_context
def scheduler(context, state, site=None):
from frappe.installer import update_site_config
import frappe.utils.scheduler
if not site:
site = get_site(context)
try:
frappe.init(site=site)
if state == 'pause':
update_site_config('pause_scheduler', 1)
elif state == 'resume':
update_site_config('pause_scheduler', 0)
elif state == 'disable':
frappe.connect()
frappe.utils.scheduler.disable_scheduler()
frappe.db.commit()
elif state == 'enable':
frappe.connect()
frappe.utils.scheduler.enable_scheduler()
frappe.db.commit()
print('Scheduler {0}d for site {1}'.format(state, site))
finally:
frappe.destroy()
@click.command('set-maintenance-mode')
@click.option('--site', help='site name')
@click.argument('state', type=click.Choice(['on', 'off']))
@pass_context
def set_maintenance_mode(context, state, site=None):
from frappe.installer import update_site_config
if not site:
site = get_site(context)
try:
frappe.init(site=site)
update_site_config('maintenance_mode', 1 if (state == 'on') else 0)
finally:
frappe.destroy()
@click.command('doctor') #Passing context always gets a site and if there is no use site it breaks
@click.option('--site', help='site name')
@pass_context
def doctor(context, site=None):
"Get diagnostic info about background workers"
from frappe.utils.doctor import doctor as _doctor
if not site:
site = get_site(context, raise_err=False)
return _doctor(site=site)
@click.command('show-pending-jobs')
@click.option('--site', help='site name')
@pass_context
def show_pending_jobs(context, site=None):
"Get diagnostic info about background jobs"
from frappe.utils.doctor import pending_jobs as _pending_jobs
if not site:
site = get_site(context)
with frappe.init_site(site):
pending_jobs = _pending_jobs(site=site)
return pending_jobs
@click.command('purge-jobs')
@click.option('--site', help='site name')
@click.option('--queue', default=None, help='one of "low", "default", "high')
@click.option('--event', default=None, help='one of "all", "weekly", "monthly", "hourly", "daily", "weekly_long", "daily_long"')
def purge_jobs(site=None, queue=None, event=None):
"Purge any pending periodic tasks, if event option is not given, it will purge everything for the site"
from frappe.utils.doctor import purge_pending_jobs
frappe.init(site or '')
count = purge_pending_jobs(event=event, site=site, queue=queue)
print("Purged {} jobs".format(count))
@click.command('schedule')
def start_scheduler():
from frappe.utils.scheduler import start_scheduler
start_scheduler()
@click.command('worker')
@click.option('--queue', type=str)
@click.option('--quiet', is_flag = True, default = False, help = 'Hide Log Outputs')
def start_worker(queue, quiet = False):
from frappe.utils.background_jobs import start_worker
start_worker(queue, quiet = quiet)
@click.command('ready-for-migration')
@click.option('--site', help='site name')
@pass_context
def ready_for_migration(context, site=None):
from frappe.utils.doctor import get_pending_jobs
if not site:
site = get_site(context)
try:
frappe.init(site=site)
pending_jobs = get_pending_jobs(site=site)
if pending_jobs:
print('NOT READY for migration: site {0} has pending background jobs'.format(site))
sys.exit(1)
else:
print('READY for migration: site {0} does not have any background jobs'.format(site))
return 0
finally:
frappe.destroy()
commands = [
disable_scheduler,
doctor,
enable_scheduler,
purge_jobs,
ready_for_migration,
scheduler,
set_maintenance_mode,
show_pending_jobs,
start_scheduler,
start_worker,
trigger_scheduler_event,
]
| mit |
UKPLab/sentence-transformers | examples/applications/clustering/kmeans.py | 1 | 1393 | """
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then k-mean clustering is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans
embedder = SentenceTransformer('paraphrase-MiniLM-L6-v2')
# Corpus with example sentences
corpus = ['A man is eating food.',
'A man is eating a piece of bread.',
'A man is eating pasta.',
'The girl is carrying a baby.',
'The baby is carried by the woman',
'A man is riding a horse.',
'A man is riding a white horse on an enclosed ground.',
'A monkey is playing drums.',
'Someone in a gorilla costume is playing a set of drums.',
'A cheetah is running behind its prey.',
'A cheetah chases prey on across a field.'
]
corpus_embeddings = embedder.encode(corpus)
# Perform kmean clustering
num_clusters = 5
clustering_model = KMeans(n_clusters=num_clusters)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = [[] for i in range(num_clusters)]
for sentence_id, cluster_id in enumerate(cluster_assignment):
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in enumerate(clustered_sentences):
print("Cluster ", i+1)
print(cluster)
print("")
| apache-2.0 |
deathping1994/sendmail-api | venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansi.py | 442 | 2304 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
OSC = '\033]'
BEL = '\007'
def code_to_chars(code):
return CSI + str(code) + 'm'
class AnsiCodes(object):
def __init__(self, codes):
for name in dir(codes):
if not name.startswith('_'):
value = getattr(codes, name)
setattr(self, name, code_to_chars(value))
class AnsiCursor(object):
def UP(self, n=1):
return CSI + str(n) + "A"
def DOWN(self, n=1):
return CSI + str(n) + "B"
def FORWARD(self, n=1):
return CSI + str(n) + "C"
def BACK(self, n=1):
return CSI + str(n) + "D"
def POS(self, x=1, y=1):
return CSI + str(y) + ";" + str(x) + "H"
def set_title(title):
return OSC + "2;" + title + BEL
def clear_screen(mode=2):
return CSI + str(mode) + "J"
def clear_line(mode=2):
return CSI + str(mode) + "K"
class AnsiFore:
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
class AnsiBack:
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
class AnsiStyle:
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiCodes( AnsiFore )
Back = AnsiCodes( AnsiBack )
Style = AnsiCodes( AnsiStyle )
Cursor = AnsiCursor()
| apache-2.0 |
ChenJunor/hue | desktop/core/ext-py/boto-2.38.0/boto/gs/resumable_upload_handler.py | 153 | 31419 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import errno
import httplib
import os
import random
import re
import socket
import time
import urlparse
from hashlib import md5
from boto import config, UserAgent
from boto.connection import AWSAuthConnection
from boto.exception import InvalidUriError
from boto.exception import ResumableTransferDisposition
from boto.exception import ResumableUploadException
from boto.s3.keyfile import KeyFile
"""
Handler for Google Cloud Storage resumable uploads. See
http://code.google.com/apis/storage/docs/developer-guide.html#resumable
for details.
Resumable uploads will retry failed uploads, resuming at the byte
count completed by the last upload attempt. If too many retries happen with
no progress (per configurable num_retries param), the upload will be
aborted in the current process.
The caller can optionally specify a tracker_file_name param in the
ResumableUploadHandler constructor. If you do this, that file will
save the state needed to allow retrying later, in a separate process
(e.g., in a later run of gsutil).
"""
class ResumableUploadHandler(object):
BUFFER_SIZE = 8192
RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error,
socket.gaierror)
# (start, end) response indicating server has nothing (upload protocol uses
# inclusive numbering).
SERVER_HAS_NOTHING = (0, -1)
def __init__(self, tracker_file_name=None, num_retries=None):
"""
Constructor. Instantiate once for each uploaded file.
:type tracker_file_name: string
:param tracker_file_name: optional file name to save tracker URI.
If supplied and the current process fails the upload, it can be
retried in a new process. If called with an existing file containing
a valid tracker URI, we'll resume the upload from this URI; else
we'll start a new resumable upload (and write the URI to this
tracker file).
:type num_retries: int
:param num_retries: the number of times we'll re-try a resumable upload
making no progress. (Count resets every time we get progress, so
upload can span many more than this number of retries.)
"""
self.tracker_file_name = tracker_file_name
self.num_retries = num_retries
self.server_has_bytes = 0 # Byte count at last server check.
self.tracker_uri = None
if tracker_file_name:
self._load_tracker_uri_from_file()
# Save upload_start_point in instance state so caller can find how
# much was transferred by this ResumableUploadHandler (across retries).
self.upload_start_point = None
def _load_tracker_uri_from_file(self):
f = None
try:
f = open(self.tracker_file_name, 'r')
uri = f.readline().strip()
self._set_tracker_uri(uri)
except IOError as e:
# Ignore non-existent file (happens first time an upload
# is attempted on a file), but warn user for other errors.
if e.errno != errno.ENOENT:
# Will restart because self.tracker_uri is None.
print('Couldn\'t read URI tracker file (%s): %s. Restarting '
'upload from scratch.' %
(self.tracker_file_name, e.strerror))
except InvalidUriError as e:
# Warn user, but proceed (will restart because
# self.tracker_uri is None).
print('Invalid tracker URI (%s) found in URI tracker file '
'(%s). Restarting upload from scratch.' %
(uri, self.tracker_file_name))
finally:
if f:
f.close()
def _save_tracker_uri_to_file(self):
"""
Saves URI to tracker file if one was passed to constructor.
"""
if not self.tracker_file_name:
return
f = None
try:
with os.fdopen(os.open(self.tracker_file_name,
os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:
f.write(self.tracker_uri)
except IOError as e:
raise ResumableUploadException(
'Couldn\'t write URI tracker file (%s): %s.\nThis can happen'
'if you\'re using an incorrectly configured upload tool\n'
'(e.g., gsutil configured to save tracker files to an '
'unwritable directory)' %
(self.tracker_file_name, e.strerror),
ResumableTransferDisposition.ABORT)
def _set_tracker_uri(self, uri):
"""
Called when we start a new resumable upload or get a new tracker
URI for the upload. Saves URI and resets upload state.
Raises InvalidUriError if URI is syntactically invalid.
"""
parse_result = urlparse.urlparse(uri)
if (parse_result.scheme.lower() not in ['http', 'https'] or
not parse_result.netloc):
raise InvalidUriError('Invalid tracker URI (%s)' % uri)
self.tracker_uri = uri
self.tracker_uri_host = parse_result.netloc
self.tracker_uri_path = '%s?%s' % (
parse_result.path, parse_result.query)
self.server_has_bytes = 0
def get_tracker_uri(self):
"""
Returns upload tracker URI, or None if the upload has not yet started.
"""
return self.tracker_uri
def get_upload_id(self):
"""
Returns the upload ID for the resumable upload, or None if the upload
has not yet started.
"""
# We extract the upload_id from the tracker uri. We could retrieve the
# upload_id from the headers in the response but this only works for
# the case where we get the tracker uri from the service. In the case
# where we get the tracker from the tracking file we need to do this
# logic anyway.
delim = '?upload_id='
if self.tracker_uri and delim in self.tracker_uri:
return self.tracker_uri[self.tracker_uri.index(delim) + len(delim):]
else:
return None
def _remove_tracker_file(self):
if (self.tracker_file_name and
os.path.exists(self.tracker_file_name)):
os.unlink(self.tracker_file_name)
def _build_content_range_header(self, range_spec='*', length_spec='*'):
return 'bytes %s/%s' % (range_spec, length_spec)
def _query_server_state(self, conn, file_length):
"""
Queries server to find out state of given upload.
Note that this method really just makes special case use of the
fact that the upload server always returns the current start/end
state whenever a PUT doesn't complete.
Returns HTTP response from sending request.
Raises ResumableUploadException if problem querying server.
"""
# Send an empty PUT so that server replies with this resumable
# transfer's state.
put_headers = {}
put_headers['Content-Range'] = (
self._build_content_range_header('*', file_length))
put_headers['Content-Length'] = '0'
return AWSAuthConnection.make_request(conn, 'PUT',
path=self.tracker_uri_path,
auth_path=self.tracker_uri_path,
headers=put_headers,
host=self.tracker_uri_host)
def _query_server_pos(self, conn, file_length):
"""
Queries server to find out what bytes it currently has.
Returns (server_start, server_end), where the values are inclusive.
For example, (0, 2) would mean that the server has bytes 0, 1, *and* 2.
Raises ResumableUploadException if problem querying server.
"""
resp = self._query_server_state(conn, file_length)
if resp.status == 200:
# To handle the boundary condition where the server has the complete
# file, we return (server_start, file_length-1). That way the
# calling code can always simply read up through server_end. (If we
# didn't handle this boundary condition here, the caller would have
# to check whether server_end == file_length and read one fewer byte
# in that case.)
return (0, file_length - 1) # Completed upload.
if resp.status != 308:
# This means the server didn't have any state for the given
# upload ID, which can happen (for example) if the caller saved
# the tracker URI to a file and then tried to restart the transfer
# after that upload ID has gone stale. In that case we need to
# start a new transfer (and the caller will then save the new
# tracker URI to the tracker file).
raise ResumableUploadException(
'Got non-308 response (%s) from server state query' %
resp.status, ResumableTransferDisposition.START_OVER)
got_valid_response = False
range_spec = resp.getheader('range')
if range_spec:
# Parse 'bytes=<from>-<to>' range_spec.
m = re.search('bytes=(\d+)-(\d+)', range_spec)
if m:
server_start = long(m.group(1))
server_end = long(m.group(2))
got_valid_response = True
else:
# No Range header, which means the server does not yet have
# any bytes. Note that the Range header uses inclusive 'from'
# and 'to' values. Since Range 0-0 would mean that the server
# has byte 0, omitting the Range header is used to indicate that
# the server doesn't have any bytes.
return self.SERVER_HAS_NOTHING
if not got_valid_response:
raise ResumableUploadException(
'Couldn\'t parse upload server state query response (%s)' %
str(resp.getheaders()), ResumableTransferDisposition.START_OVER)
if conn.debug >= 1:
print('Server has: Range: %d - %d.' % (server_start, server_end))
return (server_start, server_end)
def _start_new_resumable_upload(self, key, headers=None):
"""
Starts a new resumable upload.
Raises ResumableUploadException if any errors occur.
"""
conn = key.bucket.connection
if conn.debug >= 1:
print('Starting new resumable upload.')
self.server_has_bytes = 0
# Start a new resumable upload by sending a POST request with an
# empty body and the "X-Goog-Resumable: start" header. Include any
# caller-provided headers (e.g., Content-Type) EXCEPT Content-Length
# (and raise an exception if they tried to pass one, since it's
# a semantic error to specify it at this point, and if we were to
# include one now it would cause the server to expect that many
# bytes; the POST doesn't include the actual file bytes We set
# the Content-Length in the subsequent PUT, based on the uploaded
# file size.
post_headers = {}
for k in headers:
if k.lower() == 'content-length':
raise ResumableUploadException(
'Attempt to specify Content-Length header (disallowed)',
ResumableTransferDisposition.ABORT)
post_headers[k] = headers[k]
post_headers[conn.provider.resumable_upload_header] = 'start'
resp = conn.make_request(
'POST', key.bucket.name, key.name, post_headers)
# Get tracker URI from response 'Location' header.
body = resp.read()
# Check for various status conditions.
if resp.status in [500, 503]:
# Retry status 500 and 503 errors after a delay.
raise ResumableUploadException(
'Got status %d from attempt to start resumable upload. '
'Will wait/retry' % resp.status,
ResumableTransferDisposition.WAIT_BEFORE_RETRY)
elif resp.status != 200 and resp.status != 201:
raise ResumableUploadException(
'Got status %d from attempt to start resumable upload. '
'Aborting' % resp.status,
ResumableTransferDisposition.ABORT)
# Else we got 200 or 201 response code, indicating the resumable
# upload was created.
tracker_uri = resp.getheader('Location')
if not tracker_uri:
raise ResumableUploadException(
'No resumable tracker URI found in resumable initiation '
'POST response (%s)' % body,
ResumableTransferDisposition.WAIT_BEFORE_RETRY)
self._set_tracker_uri(tracker_uri)
self._save_tracker_uri_to_file()
def _upload_file_bytes(self, conn, http_conn, fp, file_length,
total_bytes_uploaded, cb, num_cb, headers):
"""
Makes one attempt to upload file bytes, using an existing resumable
upload connection.
Returns (etag, generation, metageneration) from server upon success.
Raises ResumableUploadException if any problems occur.
"""
buf = fp.read(self.BUFFER_SIZE)
if cb:
# The cb_count represents the number of full buffers to send between
# cb executions.
if num_cb > 2:
cb_count = file_length / self.BUFFER_SIZE / (num_cb-2)
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(total_bytes_uploaded, file_length)
# Build resumable upload headers for the transfer. Don't send a
# Content-Range header if the file is 0 bytes long, because the
# resumable upload protocol uses an *inclusive* end-range (so, sending
# 'bytes 0-0/1' would actually mean you're sending a 1-byte file).
if not headers:
put_headers = {}
else:
put_headers = headers.copy()
if file_length:
if total_bytes_uploaded == file_length:
range_header = self._build_content_range_header(
'*', file_length)
else:
range_header = self._build_content_range_header(
'%d-%d' % (total_bytes_uploaded, file_length - 1),
file_length)
put_headers['Content-Range'] = range_header
# Set Content-Length to the total bytes we'll send with this PUT.
put_headers['Content-Length'] = str(file_length - total_bytes_uploaded)
http_request = AWSAuthConnection.build_base_http_request(
conn, 'PUT', path=self.tracker_uri_path, auth_path=None,
headers=put_headers, host=self.tracker_uri_host)
http_conn.putrequest('PUT', http_request.path)
for k in put_headers:
http_conn.putheader(k, put_headers[k])
http_conn.endheaders()
# Turn off debug on http connection so upload content isn't included
# in debug stream.
http_conn.set_debuglevel(0)
while buf:
http_conn.send(buf)
for alg in self.digesters:
self.digesters[alg].update(buf)
total_bytes_uploaded += len(buf)
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(total_bytes_uploaded, file_length)
i = 0
buf = fp.read(self.BUFFER_SIZE)
http_conn.set_debuglevel(conn.debug)
if cb:
cb(total_bytes_uploaded, file_length)
if total_bytes_uploaded != file_length:
# Abort (and delete the tracker file) so if the user retries
# they'll start a new resumable upload rather than potentially
# attempting to pick back up later where we left off.
raise ResumableUploadException(
'File changed during upload: EOF at %d bytes of %d byte file.' %
(total_bytes_uploaded, file_length),
ResumableTransferDisposition.ABORT)
resp = http_conn.getresponse()
# Restore http connection debug level.
http_conn.set_debuglevel(conn.debug)
if resp.status == 200:
# Success.
return (resp.getheader('etag'),
resp.getheader('x-goog-generation'),
resp.getheader('x-goog-metageneration'))
# Retry timeout (408) and status 500 and 503 errors after a delay.
elif resp.status in [408, 500, 503]:
disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY
else:
# Catch all for any other error codes.
disposition = ResumableTransferDisposition.ABORT
raise ResumableUploadException('Got response code %d while attempting '
'upload (%s)' %
(resp.status, resp.reason), disposition)
def _attempt_resumable_upload(self, key, fp, file_length, headers, cb,
num_cb):
"""
Attempts a resumable upload.
Returns (etag, generation, metageneration) from server upon success.
Raises ResumableUploadException if any problems occur.
"""
(server_start, server_end) = self.SERVER_HAS_NOTHING
conn = key.bucket.connection
if self.tracker_uri:
# Try to resume existing resumable upload.
try:
(server_start, server_end) = (
self._query_server_pos(conn, file_length))
self.server_has_bytes = server_start
if server_end:
# If the server already has some of the content, we need to
# update the digesters with the bytes that have already been
# uploaded to ensure we get a complete hash in the end.
print('Catching up hash digest(s) for resumed upload')
fp.seek(0)
# Read local file's bytes through position server has. For
# example, if server has (0, 3) we want to read 3-0+1=4 bytes.
bytes_to_go = server_end + 1
while bytes_to_go:
chunk = fp.read(min(key.BufferSize, bytes_to_go))
if not chunk:
raise ResumableUploadException(
'Hit end of file during resumable upload hash '
'catchup. This should not happen under\n'
'normal circumstances, as it indicates the '
'server has more bytes of this transfer\nthan'
' the current file size. Restarting upload.',
ResumableTransferDisposition.START_OVER)
for alg in self.digesters:
self.digesters[alg].update(chunk)
bytes_to_go -= len(chunk)
if conn.debug >= 1:
print('Resuming transfer.')
except ResumableUploadException as e:
if conn.debug >= 1:
print('Unable to resume transfer (%s).' % e.message)
self._start_new_resumable_upload(key, headers)
else:
self._start_new_resumable_upload(key, headers)
# upload_start_point allows the code that instantiated the
# ResumableUploadHandler to find out the point from which it started
# uploading (e.g., so it can correctly compute throughput).
if self.upload_start_point is None:
self.upload_start_point = server_end
total_bytes_uploaded = server_end + 1
# Corner case: Don't attempt to seek if we've already uploaded the
# entire file, because if the file is a stream (e.g., the KeyFile
# wrapper around input key when copying between providers), attempting
# to seek to the end of file would result in an InvalidRange error.
if file_length < total_bytes_uploaded:
fp.seek(total_bytes_uploaded)
conn = key.bucket.connection
# Get a new HTTP connection (vs conn.get_http_connection(), which reuses
# pool connections) because httplib requires a new HTTP connection per
# transaction. (Without this, calling http_conn.getresponse() would get
# "ResponseNotReady".)
http_conn = conn.new_http_connection(self.tracker_uri_host, conn.port,
conn.is_secure)
http_conn.set_debuglevel(conn.debug)
# Make sure to close http_conn at end so if a local file read
# failure occurs partway through server will terminate current upload
# and can report that progress on next attempt.
try:
return self._upload_file_bytes(conn, http_conn, fp, file_length,
total_bytes_uploaded, cb, num_cb,
headers)
except (ResumableUploadException, socket.error):
resp = self._query_server_state(conn, file_length)
if resp.status == 400:
raise ResumableUploadException('Got 400 response from server '
'state query after failed resumable upload attempt. This '
'can happen for various reasons, including specifying an '
'invalid request (e.g., an invalid canned ACL) or if the '
'file size changed between upload attempts',
ResumableTransferDisposition.ABORT)
else:
raise
finally:
http_conn.close()
def _check_final_md5(self, key, etag):
"""
Checks that etag from server agrees with md5 computed before upload.
This is important, since the upload could have spanned a number of
hours and multiple processes (e.g., gsutil runs), and the user could
change some of the file and not realize they have inconsistent data.
"""
if key.bucket.connection.debug >= 1:
print('Checking md5 against etag.')
if key.md5 != etag.strip('"\''):
# Call key.open_read() before attempting to delete the
# (incorrect-content) key, so we perform that request on a
# different HTTP connection. This is neededb because httplib
# will return a "Response not ready" error if you try to perform
# a second transaction on the connection.
key.open_read()
key.close()
key.delete()
raise ResumableUploadException(
'File changed during upload: md5 signature doesn\'t match etag '
'(incorrect uploaded object deleted)',
ResumableTransferDisposition.ABORT)
def handle_resumable_upload_exception(self, e, debug):
if (e.disposition == ResumableTransferDisposition.ABORT_CUR_PROCESS):
if debug >= 1:
print('Caught non-retryable ResumableUploadException (%s); '
'aborting but retaining tracker file' % e.message)
raise
elif (e.disposition == ResumableTransferDisposition.ABORT):
if debug >= 1:
print('Caught non-retryable ResumableUploadException (%s); '
'aborting and removing tracker file' % e.message)
self._remove_tracker_file()
raise
else:
if debug >= 1:
print('Caught ResumableUploadException (%s) - will retry' %
e.message)
def track_progress_less_iterations(self, server_had_bytes_before_attempt,
roll_back_md5=True, debug=0):
# At this point we had a re-tryable failure; see if made progress.
if self.server_has_bytes > server_had_bytes_before_attempt:
self.progress_less_iterations = 0 # If progress, reset counter.
else:
self.progress_less_iterations += 1
if roll_back_md5:
# Rollback any potential hash updates, as we did not
# make any progress in this iteration.
self.digesters = self.digesters_before_attempt
if self.progress_less_iterations > self.num_retries:
# Don't retry any longer in the current process.
raise ResumableUploadException(
'Too many resumable upload attempts failed without '
'progress. You might try this upload again later',
ResumableTransferDisposition.ABORT_CUR_PROCESS)
# Use binary exponential backoff to desynchronize client requests.
sleep_time_secs = random.random() * (2**self.progress_less_iterations)
if debug >= 1:
print('Got retryable failure (%d progress-less in a row).\n'
'Sleeping %3.1f seconds before re-trying' %
(self.progress_less_iterations, sleep_time_secs))
time.sleep(sleep_time_secs)
def send_file(self, key, fp, headers, cb=None, num_cb=10, hash_algs=None):
"""
Upload a file to a key into a bucket on GS, using GS resumable upload
protocol.
:type key: :class:`boto.s3.key.Key` or subclass
:param key: The Key object to which data is to be uploaded
:type fp: file-like object
:param fp: The file pointer to upload
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type cb: function
:param cb: a callback function that will be called to report progress on
the upload. The callback should accept two integer parameters, the
first representing the number of bytes that have been successfully
transmitted to GS, and the second representing the total number of
bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer. Providing a negative integer will cause
your callback to be called with each buffer read.
:type hash_algs: dictionary
:param hash_algs: (optional) Dictionary mapping hash algorithm
descriptions to corresponding state-ful hashing objects that
implement update(), digest(), and copy() (e.g. hashlib.md5()).
Defaults to {'md5': md5()}.
Raises ResumableUploadException if a problem occurs during the transfer.
"""
if not headers:
headers = {}
# If Content-Type header is present and set to None, remove it.
# This is gsutil's way of asking boto to refrain from auto-generating
# that header.
CT = 'Content-Type'
if CT in headers and headers[CT] is None:
del headers[CT]
headers['User-Agent'] = UserAgent
# Determine file size different ways for case where fp is actually a
# wrapper around a Key vs an actual file.
if isinstance(fp, KeyFile):
file_length = fp.getkey().size
else:
fp.seek(0, os.SEEK_END)
file_length = fp.tell()
fp.seek(0)
debug = key.bucket.connection.debug
# Compute the MD5 checksum on the fly.
if hash_algs is None:
hash_algs = {'md5': md5}
self.digesters = dict(
(alg, hash_algs[alg]()) for alg in hash_algs or {})
# Use num-retries from constructor if one was provided; else check
# for a value specified in the boto config file; else default to 5.
if self.num_retries is None:
self.num_retries = config.getint('Boto', 'num_retries', 6)
self.progress_less_iterations = 0
while True: # Retry as long as we're making progress.
server_had_bytes_before_attempt = self.server_has_bytes
self.digesters_before_attempt = dict(
(alg, self.digesters[alg].copy())
for alg in self.digesters)
try:
# Save generation and metageneration in class state so caller
# can find these values, for use in preconditions of future
# operations on the uploaded object.
(etag, self.generation, self.metageneration) = (
self._attempt_resumable_upload(key, fp, file_length,
headers, cb, num_cb))
# Get the final digests for the uploaded content.
for alg in self.digesters:
key.local_hashes[alg] = self.digesters[alg].digest()
# Upload succceded, so remove the tracker file (if have one).
self._remove_tracker_file()
self._check_final_md5(key, etag)
key.generation = self.generation
if debug >= 1:
print('Resumable upload complete.')
return
except self.RETRYABLE_EXCEPTIONS as e:
if debug >= 1:
print('Caught exception (%s)' % e.__repr__())
if isinstance(e, IOError) and e.errno == errno.EPIPE:
# Broken pipe error causes httplib to immediately
# close the socket (http://bugs.python.org/issue5542),
# so we need to close the connection before we resume
# the upload (which will cause a new connection to be
# opened the next time an HTTP request is sent).
key.bucket.connection.connection.close()
except ResumableUploadException as e:
self.handle_resumable_upload_exception(e, debug)
self.track_progress_less_iterations(server_had_bytes_before_attempt,
True, debug)
| apache-2.0 |
kustodian/ansible | lib/ansible/modules/cloud/google/gcp_compute_subnetwork.py | 9 | 20957 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_subnetwork
description:
- A VPC network is a virtual version of the traditional physical networks that exist
within and between physical data centers. A VPC network provides connectivity for
your Compute Engine virtual machine (VM) instances, Container Engine containers,
App Engine Flex services, and other network-related resources.
- Each GCP project contains one or more VPC networks. Each VPC network is a global
entity spanning all GCP regions. This global VPC network allows VM instances and
other resources to communicate with each other via internal, private IP addresses.
- Each VPC network is subdivided into subnets, and each subnet is contained within
a single region. You can have more than one subnet in a region for a given VPC network.
Each subnet has a contiguous private RFC1918 IP space. You create instances, containers,
and the like in these subnets.
- When you create an instance, you must create it in a subnet, and the instance draws
its internal IP address from that subnet.
- Virtual machine (VM) instances in a VPC network can communicate with instances in
all other subnets of the same VPC network, regardless of region, using their RFC1918
private IP addresses. You can isolate portions of the network, even entire subnets,
using firewall rules.
short_description: Creates a GCP Subnetwork
version_added: '2.6'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource. This field can be set only at resource creation time.
required: false
type: str
ip_cidr_range:
description:
- The range of internal addresses that are owned by this subnetwork.
- Provide this property when you create the subnetwork. For example, 10.0.0.0/8
or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network.
Only IPv4 is supported.
required: true
type: str
name:
description:
- The name of the resource, provided by the client when initially creating the
resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
type: str
network:
description:
- The network this subnet belongs to.
- Only networks that are in the distributed mode can have subnetworks.
- 'This field represents a link to a Network resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''selfLink'' and value
of your resource''s selfLink Alternatively, you can add `register: name-of-resource`
to a gcp_compute_network task and then set this network field to "{{ name-of-resource
}}"'
required: true
type: dict
secondary_ip_ranges:
description:
- An array of configurations for secondary IP ranges for VM instances contained
in this subnetwork. The primary IP of such VM must belong to the primary ipCidrRange
of the subnetwork. The alias IPs may belong to either primary or secondary ranges.
required: false
type: list
version_added: '2.8'
suboptions:
range_name:
description:
- The name associated with this subnetwork secondary range, used when adding
an alias IP range to a VM instance. The name must be 1-63 characters long,
and comply with RFC1035. The name must be unique within the subnetwork.
required: true
type: str
ip_cidr_range:
description:
- The range of IP addresses belonging to this subnetwork secondary range.
Provide this property when you create the subnetwork.
- Ranges must be unique and non-overlapping with all primary and secondary
IP ranges within a network. Only IPv4 is supported.
required: true
type: str
private_ip_google_access:
description:
- When enabled, VMs in this subnetwork without external IP addresses can access
Google APIs and services by using Private Google Access.
required: false
type: bool
region:
description:
- URL of the GCP region for this subnetwork.
required: true
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/beta/subnetworks)'
- 'Private Google Access: U(https://cloud.google.com/vpc/docs/configure-private-google-access)'
- 'Cloud Networking: U(https://cloud.google.com/vpc/docs/using-vpc)'
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a network
gcp_compute_network:
name: network-subnetwork
auto_create_subnetworks: 'true'
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: network
- name: create a subnetwork
gcp_compute_subnetwork:
name: ansiblenet
region: us-west1
network: "{{ network }}"
ip_cidr_range: 172.16.0.0/16
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource. This field can be set only at resource creation time.
returned: success
type: str
gatewayAddress:
description:
- The gateway address for default routes to reach destination addresses outside
this subnetwork.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
ipCidrRange:
description:
- The range of internal addresses that are owned by this subnetwork.
- Provide this property when you create the subnetwork. For example, 10.0.0.0/8
or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network.
Only IPv4 is supported.
returned: success
type: str
name:
description:
- The name of the resource, provided by the client when initially creating the resource.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
network:
description:
- The network this subnet belongs to.
- Only networks that are in the distributed mode can have subnetworks.
returned: success
type: dict
secondaryIpRanges:
description:
- An array of configurations for secondary IP ranges for VM instances contained
in this subnetwork. The primary IP of such VM must belong to the primary ipCidrRange
of the subnetwork. The alias IPs may belong to either primary or secondary ranges.
returned: success
type: complex
contains:
rangeName:
description:
- The name associated with this subnetwork secondary range, used when adding
an alias IP range to a VM instance. The name must be 1-63 characters long,
and comply with RFC1035. The name must be unique within the subnetwork.
returned: success
type: str
ipCidrRange:
description:
- The range of IP addresses belonging to this subnetwork secondary range. Provide
this property when you create the subnetwork.
- Ranges must be unique and non-overlapping with all primary and secondary IP
ranges within a network. Only IPv4 is supported.
returned: success
type: str
privateIpGoogleAccess:
description:
- When enabled, VMs in this subnetwork without external IP addresses can access
Google APIs and services by using Private Google Access.
returned: success
type: bool
region:
description:
- URL of the GCP region for this subnetwork.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(type='str'),
ip_cidr_range=dict(required=True, type='str'),
name=dict(required=True, type='str'),
network=dict(required=True, type='dict'),
secondary_ip_ranges=dict(
type='list', elements='dict', options=dict(range_name=dict(required=True, type='str'), ip_cidr_range=dict(required=True, type='str'))
),
private_ip_google_access=dict(type='bool'),
region=dict(required=True, type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#subnetwork'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind, fetch)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind, fetch):
update_fields(module, resource_to_request(module), response_to_hash(module, fetch))
return fetch_resource(module, self_link(module), kind)
def update_fields(module, request, response):
if response.get('ipCidrRange') != request.get('ipCidrRange'):
ip_cidr_range_update(module, request, response)
if response.get('secondaryIpRanges') != request.get('secondaryIpRanges'):
secondary_ip_ranges_update(module, request, response)
if response.get('privateIpGoogleAccess') != request.get('privateIpGoogleAccess'):
private_ip_google_access_update(module, request, response)
def ip_cidr_range_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/regions/{region}/subnetworks/{name}/expandIpCidrRange"]).format(**module.params),
{u'ipCidrRange': module.params.get('ip_cidr_range')},
)
def secondary_ip_ranges_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.patch(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/regions/{region}/subnetworks/{name}"]).format(**module.params),
{u'secondaryIpRanges': SubnetworkSecondaryiprangesArray(module.params.get('secondary_ip_ranges', []), module).to_request()},
)
def private_ip_google_access_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/regions/{region}/subnetworks/{name}/setPrivateIpGoogleAccess"]).format(
**module.params
),
{u'privateIpGoogleAccess': module.params.get('private_ip_google_access')},
)
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#subnetwork',
u'description': module.params.get('description'),
u'ipCidrRange': module.params.get('ip_cidr_range'),
u'name': module.params.get('name'),
u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'),
u'secondaryIpRanges': SubnetworkSecondaryiprangesArray(module.params.get('secondary_ip_ranges', []), module).to_request(),
u'privateIpGoogleAccess': module.params.get('private_ip_google_access'),
u'region': module.params.get('region'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/subnetworks".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': response.get(u'description'),
u'gatewayAddress': response.get(u'gatewayAddress'),
u'id': response.get(u'id'),
u'ipCidrRange': response.get(u'ipCidrRange'),
u'name': response.get(u'name'),
u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'),
u'secondaryIpRanges': SubnetworkSecondaryiprangesArray(response.get(u'secondaryIpRanges', []), module).from_response(),
u'privateIpGoogleAccess': response.get(u'privateIpGoogleAccess'),
u'region': module.params.get('region'),
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#subnetwork')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class SubnetworkSecondaryiprangesArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({u'rangeName': item.get('range_name'), u'ipCidrRange': item.get('ip_cidr_range')})
def _response_from_item(self, item):
return remove_nones_from_dict({u'rangeName': item.get(u'rangeName'), u'ipCidrRange': item.get(u'ipCidrRange')})
if __name__ == '__main__':
main()
| gpl-3.0 |
RaresO/test | node_modules/node-gyp/gyp/pylib/gyp/win_tool.py | 1417 | 12751 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
import os
import re
import shutil
import subprocess
import stat
import string
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# A regex matching an argument corresponding to the output filename passed to
# link.exe.
_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def _UseSeparateMspdbsrv(self, env, args):
"""Allows to use a unique instance of mspdbsrv.exe per linker instead of a
shared one."""
if len(args) < 1:
raise Exception("Not enough arguments")
if args[0] != 'link.exe':
return
# Use the output filename passed to the linker to generate an endpoint name
# for mspdbsrv.exe.
endpoint_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
endpoint_name = re.sub(r'\W+', '',
'%s_%d' % (m.group('out'), os.getpid()))
break
if endpoint_name is None:
return
# Adds the appropriate environment variable. This will be read by link.exe
# to know which instance of mspdbsrv.exe it should connect to (if it's
# not set then the default endpoint is used).
env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
def _on_error(fn, path, excinfo):
# The operation failed, possibly because the file is set to
# read-only. If that's why, make it writable and try the op again.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWRITE)
fn(path)
shutil.rmtree(dest, onerror=_on_error)
else:
if not os.access(dest, os.W_OK):
# Attempt to make the file writable before deleting it.
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
env = self._GetEnv(arch)
if use_separate_mspdbsrv == 'True':
self._UseSeparateMspdbsrv(env, args)
link = subprocess.Popen([args[0].replace('/', '\\')] + list(args[1:]),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = link.communicate()
for line in out.splitlines():
if (not line.startswith(' Creating library ') and
not line.startswith('Generating code') and
not line.startswith('Finished generating code')):
print line
return link.returncode
def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname,
mt, rc, intermediate_manifest, *manifests):
"""A wrapper for handling creating a manifest resource and then executing
a link command."""
# The 'normal' way to do manifests is to have link generate a manifest
# based on gathering dependencies from the object files, then merge that
# manifest with other manifests supplied as sources, convert the merged
# manifest to a resource, and then *relink*, including the compiled
# version of the manifest resource. This breaks incremental linking, and
# is generally overly complicated. Instead, we merge all the manifests
# provided (along with one that includes what would normally be in the
# linker-generated one, see msvs_emulation.py), and include that into the
# first and only link. We still tell link to generate a manifest, but we
# only use that to assert that our simpler process did not miss anything.
variables = {
'python': sys.executable,
'arch': arch,
'out': out,
'ldcmd': ldcmd,
'resname': resname,
'mt': mt,
'rc': rc,
'intermediate_manifest': intermediate_manifest,
'manifests': ' '.join(manifests),
}
add_to_ld = ''
if manifests:
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(manifests)s -out:%(out)s.manifest' % variables)
if embed_manifest == 'True':
subprocess.check_call(
'%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest'
' %(out)s.manifest.rc %(resname)s' % variables)
subprocess.check_call(
'%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s '
'%(out)s.manifest.rc' % variables)
add_to_ld = ' %(out)s.manifest.res' % variables
subprocess.check_call(ldcmd + add_to_ld)
# Run mt.exe on the theoretically complete manifest we generated, merging
# it with the one the linker generated to confirm that the linker
# generated one does not add anything. This is strictly unnecessary for
# correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not
# used in a #pragma comment.
if manifests:
# Merge the intermediate one with ours to .assert.manifest, then check
# that .assert.manifest is identical to ours.
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(out)s.manifest %(intermediate_manifest)s '
'-out:%(out)s.assert.manifest' % variables)
assert_manifest = '%(out)s.assert.manifest' % variables
our_manifest = '%(out)s.manifest' % variables
# Load and normalize the manifests. mt.exe sometimes removes whitespace,
# and sometimes doesn't unfortunately.
with open(our_manifest, 'rb') as our_f:
with open(assert_manifest, 'rb') as assert_f:
our_data = our_f.read().translate(None, string.whitespace)
assert_data = assert_f.read().translate(None, string.whitespace)
if our_data != assert_data:
os.unlink(out)
def dump(filename):
sys.stderr.write('%s\n-----\n' % filename)
with open(filename, 'rb') as f:
sys.stderr.write(f.read() + '\n-----\n')
dump(intermediate_manifest)
dump(our_manifest)
dump(assert_manifest)
sys.stderr.write(
'Linker generated manifest "%s" added to final manifest "%s" '
'(result in "%s"). '
'Were /MANIFEST switches used in #pragma statements? ' % (
intermediate_manifest, our_manifest, assert_manifest))
return 1
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print line
return popen.returncode
def ExecManifestToRc(self, arch, *args):
"""Creates a resource file pointing a SxS assembly manifest.
|args| is tuple containing path to resource file, path to manifest file
and resource name which can be "1" (for executables) or "2" (for DLLs)."""
manifest_path, resource_path, resource_name = args
with open(resource_path, 'wb') as output:
output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % (
resource_name,
os.path.abspath(manifest_path).replace('\\', '/')))
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefixes = ('Processing ', '64 bit Processing ')
processing = set(os.path.basename(x)
for x in lines if x.startswith(prefixes))
for line in lines:
if not line.startswith(prefixes) and line not in processing:
print line
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print line
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print line
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after gyp-time. http://crbug.com/333738.
for k, v in os.environ.iteritems():
if k not in env:
env[k] = v
args = open(rspfile).read()
dir = dir[0] if dir else None
return subprocess.call(args, shell=True, env=env, cwd=dir)
def ExecClCompile(self, project_dir, selected_files):
"""Executed by msvs-ninja projects when the 'ClCompile' target is used to
build selected C/C++ files."""
project_dir = os.path.relpath(project_dir, BASE_DIR)
selected_files = selected_files.split(';')
ninja_targets = [os.path.join(project_dir, filename) + '^^'
for filename in selected_files]
cmd = ['ninja.exe']
cmd.extend(ninja_targets)
return subprocess.call(cmd, shell=True, cwd=BASE_DIR)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
openmv/micropython | tests/wipy/time.py | 14 | 3272 | import time
DAYS_PER_MONTH = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def is_leap(year):
return (year % 4) == 0
def test():
seconds = 0
wday = 5 # Jan 1, 2000 was a Saturday
for year in range(2000, 2049):
print("Testing %d" % year)
yday = 1
for month in range(1, 13):
if month == 2 and is_leap(year):
DAYS_PER_MONTH[2] = 29
else:
DAYS_PER_MONTH[2] = 28
for day in range(1, DAYS_PER_MONTH[month] + 1):
secs = time.mktime((year, month, day, 0, 0, 0, 0, 0))
if secs != seconds:
print(
"mktime failed for %d-%02d-%02d got %d expected %d"
% (year, month, day, secs, seconds)
)
tuple = time.localtime(seconds)
secs = time.mktime(tuple)
if secs != seconds:
print(
"localtime failed for %d-%02d-%02d got %d expected %d"
% (year, month, day, secs, seconds)
)
return
seconds += 86400
if yday != tuple[7]:
print(
"locatime for %d-%02d-%02d got yday %d, expecting %d"
% (year, month, day, tuple[7], yday)
)
return
if wday != tuple[6]:
print(
"locatime for %d-%02d-%02d got wday %d, expecting %d"
% (year, month, day, tuple[6], wday)
)
return
yday += 1
wday = (wday + 1) % 7
def spot_test(seconds, expected_time):
actual_time = time.localtime(seconds)
for i in range(len(actual_time)):
if actual_time[i] != expected_time[i]:
print(
"time.localtime(", seconds, ") returned", actual_time, "expecting", expected_time
)
return
print("time.localtime(", seconds, ") returned", actual_time, "(pass)")
test()
# fmt: off
spot_test( 0, (2000, 1, 1, 0, 0, 0, 5, 1))
spot_test( 1, (2000, 1, 1, 0, 0, 1, 5, 1))
spot_test( 59, (2000, 1, 1, 0, 0, 59, 5, 1))
spot_test( 60, (2000, 1, 1, 0, 1, 0, 5, 1))
spot_test( 3599, (2000, 1, 1, 0, 59, 59, 5, 1))
spot_test( 3600, (2000, 1, 1, 1, 0, 0, 5, 1))
spot_test( -1, (1999, 12, 31, 23, 59, 59, 4, 365))
spot_test( 447549467, (2014, 3, 7, 23, 17, 47, 4, 66))
spot_test( -940984933, (1970, 3, 7, 23, 17, 47, 5, 66))
spot_test(-1072915199, (1966, 1, 1, 0, 0, 1, 5, 1))
spot_test(-1072915200, (1966, 1, 1, 0, 0, 0, 5, 1))
spot_test(-1072915201, (1965, 12, 31, 23, 59, 59, 4, 365))
# fmt: on
t1 = time.time()
time.sleep(2)
t2 = time.time()
print(abs(time.ticks_diff(t1, t2) - 2) <= 1)
t1 = time.ticks_ms()
time.sleep_ms(50)
t2 = time.ticks_ms()
print(abs(time.ticks_diff(t1, t2) - 50) <= 1)
t1 = time.ticks_us()
time.sleep_us(1000)
t2 = time.ticks_us()
print(time.ticks_diff(t1, t2) < 1500)
print(time.ticks_diff(time.ticks_cpu(), time.ticks_cpu()) < 16384)
| mit |
cbrewster/servo | tests/wpt/web-platform-tests/xhr/resources/authentication.py | 10 | 1115 | def main(request, response):
session_user = request.auth.username
session_pass = request.auth.password
expected_user_name = request.headers.get("X-User", None)
token = expected_user_name
if session_user is None and session_pass is None:
if token is not None and request.server.stash.take(token) is not None:
return 'FAIL (did not authorize)'
else:
if token is not None:
request.server.stash.put(token, "1")
status = (401, 'Unauthorized')
headers = [('WWW-Authenticate', 'Basic realm="test"'),
('XHR-USER', expected_user_name),
('SES-USER', session_user)]
return status, headers, 'FAIL (should be transparent)'
else:
if request.server.stash.take(token) == "1":
challenge = "DID"
else:
challenge = "DID-NOT"
headers = [('XHR-USER', expected_user_name),
('SES-USER', session_user),
("X-challenge", challenge)]
return headers, session_user + "\n" + session_pass
| mpl-2.0 |
MQQiang/kbengine | kbe/src/lib/python/Lib/test/test_marshal.py | 72 | 15097 | from test import support
import array
import io
import marshal
import sys
import unittest
import os
import types
class HelperMixin:
def helper(self, sample, *extra):
new = marshal.loads(marshal.dumps(sample, *extra))
self.assertEqual(sample, new)
try:
with open(support.TESTFN, "wb") as f:
marshal.dump(sample, f, *extra)
with open(support.TESTFN, "rb") as f:
new = marshal.load(f)
self.assertEqual(sample, new)
finally:
support.unlink(support.TESTFN)
class IntTestCase(unittest.TestCase, HelperMixin):
def test_ints(self):
# Test a range of Python ints larger than the machine word size.
n = sys.maxsize ** 2
while n:
for expected in (-n, n):
self.helper(expected)
n = n >> 1
def test_bool(self):
for b in (True, False):
self.helper(b)
class FloatTestCase(unittest.TestCase, HelperMixin):
def test_floats(self):
# Test a few floats
small = 1e-25
n = sys.maxsize * 3.7e250
while n > small:
for expected in (-n, n):
self.helper(float(expected))
n /= 123.4567
f = 0.0
s = marshal.dumps(f, 2)
got = marshal.loads(s)
self.assertEqual(f, got)
# and with version <= 1 (floats marshalled differently then)
s = marshal.dumps(f, 1)
got = marshal.loads(s)
self.assertEqual(f, got)
n = sys.maxsize * 3.7e-250
while n < small:
for expected in (-n, n):
f = float(expected)
self.helper(f)
self.helper(f, 1)
n *= 123.4567
class StringTestCase(unittest.TestCase, HelperMixin):
def test_unicode(self):
for s in ["", "Andr\xe8 Previn", "abc", " "*10000]:
self.helper(marshal.loads(marshal.dumps(s)))
def test_string(self):
for s in ["", "Andr\xe8 Previn", "abc", " "*10000]:
self.helper(s)
def test_bytes(self):
for s in [b"", b"Andr\xe8 Previn", b"abc", b" "*10000]:
self.helper(s)
class ExceptionTestCase(unittest.TestCase):
def test_exceptions(self):
new = marshal.loads(marshal.dumps(StopIteration))
self.assertEqual(StopIteration, new)
class CodeTestCase(unittest.TestCase):
def test_code(self):
co = ExceptionTestCase.test_exceptions.__code__
new = marshal.loads(marshal.dumps(co))
self.assertEqual(co, new)
def test_many_codeobjects(self):
# Issue2957: bad recursion count on code objects
count = 5000 # more than MAX_MARSHAL_STACK_DEPTH
codes = (ExceptionTestCase.test_exceptions.__code__,) * count
marshal.loads(marshal.dumps(codes))
def test_different_filenames(self):
co1 = compile("x", "f1", "exec")
co2 = compile("y", "f2", "exec")
co1, co2 = marshal.loads(marshal.dumps((co1, co2)))
self.assertEqual(co1.co_filename, "f1")
self.assertEqual(co2.co_filename, "f2")
@support.cpython_only
def test_same_filename_used(self):
s = """def f(): pass\ndef g(): pass"""
co = compile(s, "myfile", "exec")
co = marshal.loads(marshal.dumps(co))
for obj in co.co_consts:
if isinstance(obj, types.CodeType):
self.assertIs(co.co_filename, obj.co_filename)
class ContainerTestCase(unittest.TestCase, HelperMixin):
d = {'astring': '[email protected]',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2,
'alist': ['.zyx.41'],
'atuple': ('.zyx.41',)*10,
'aboolean': False,
'aunicode': "Andr\xe8 Previn"
}
def test_dict(self):
self.helper(self.d)
def test_list(self):
self.helper(list(self.d.items()))
def test_tuple(self):
self.helper(tuple(self.d.keys()))
def test_sets(self):
for constructor in (set, frozenset):
self.helper(constructor(self.d.keys()))
class BufferTestCase(unittest.TestCase, HelperMixin):
def test_bytearray(self):
b = bytearray(b"abc")
self.helper(b)
new = marshal.loads(marshal.dumps(b))
self.assertEqual(type(new), bytes)
def test_memoryview(self):
b = memoryview(b"abc")
self.helper(b)
new = marshal.loads(marshal.dumps(b))
self.assertEqual(type(new), bytes)
def test_array(self):
a = array.array('B', b"abc")
new = marshal.loads(marshal.dumps(a))
self.assertEqual(new, b"abc")
class BugsTestCase(unittest.TestCase):
def test_bug_5888452(self):
# Simple-minded check for SF 588452: Debug build crashes
marshal.dumps([128] * 1000)
def test_patch_873224(self):
self.assertRaises(Exception, marshal.loads, '0')
self.assertRaises(Exception, marshal.loads, 'f')
self.assertRaises(Exception, marshal.loads, marshal.dumps(2**65)[:-1])
def test_version_argument(self):
# Python 2.4.0 crashes for any call to marshal.dumps(x, y)
self.assertEqual(marshal.loads(marshal.dumps(5, 0)), 5)
self.assertEqual(marshal.loads(marshal.dumps(5, 1)), 5)
def test_fuzz(self):
# simple test that it's at least not *totally* trivial to
# crash from bad marshal data
for c in [chr(i) for i in range(256)]:
try:
marshal.loads(c)
except Exception:
pass
def test_loads_2x_code(self):
s = b'c' + (b'X' * 4*4) + b'{' * 2**20
self.assertRaises(ValueError, marshal.loads, s)
def test_loads_recursion(self):
s = b'c' + (b'X' * 4*5) + b'{' * 2**20
self.assertRaises(ValueError, marshal.loads, s)
def test_recursion_limit(self):
# Create a deeply nested structure.
head = last = []
# The max stack depth should match the value in Python/marshal.c.
if os.name == 'nt' and hasattr(sys, 'gettotalrefcount'):
MAX_MARSHAL_STACK_DEPTH = 1500
else:
MAX_MARSHAL_STACK_DEPTH = 2000
for i in range(MAX_MARSHAL_STACK_DEPTH - 2):
last.append([0])
last = last[-1]
# Verify we don't blow out the stack with dumps/load.
data = marshal.dumps(head)
new_head = marshal.loads(data)
# Don't use == to compare objects, it can exceed the recursion limit.
self.assertEqual(len(new_head), len(head))
self.assertEqual(len(new_head[0]), len(head[0]))
self.assertEqual(len(new_head[-1]), len(head[-1]))
last.append([0])
self.assertRaises(ValueError, marshal.dumps, head)
def test_exact_type_match(self):
# Former bug:
# >>> class Int(int): pass
# >>> type(loads(dumps(Int())))
# <type 'int'>
for typ in (int, float, complex, tuple, list, dict, set, frozenset):
# Note: str subclasses are not tested because they get handled
# by marshal's routines for objects supporting the buffer API.
subtyp = type('subtyp', (typ,), {})
self.assertRaises(ValueError, marshal.dumps, subtyp())
# Issue #1792 introduced a change in how marshal increases the size of its
# internal buffer; this test ensures that the new code is exercised.
def test_large_marshal(self):
size = int(1e6)
testString = 'abc' * size
marshal.dumps(testString)
def test_invalid_longs(self):
# Issue #7019: marshal.loads shouldn't produce unnormalized PyLongs
invalid_string = b'l\x02\x00\x00\x00\x00\x00\x00\x00'
self.assertRaises(ValueError, marshal.loads, invalid_string)
def test_multiple_dumps_and_loads(self):
# Issue 12291: marshal.load() should be callable multiple times
# with interleaved data written by non-marshal code
# Adapted from a patch by Engelbert Gruber.
data = (1, 'abc', b'def', 1.0, (2, 'a', ['b', b'c']))
for interleaved in (b'', b'0123'):
ilen = len(interleaved)
positions = []
try:
with open(support.TESTFN, 'wb') as f:
for d in data:
marshal.dump(d, f)
if ilen:
f.write(interleaved)
positions.append(f.tell())
with open(support.TESTFN, 'rb') as f:
for i, d in enumerate(data):
self.assertEqual(d, marshal.load(f))
if ilen:
f.read(ilen)
self.assertEqual(positions[i], f.tell())
finally:
support.unlink(support.TESTFN)
def test_loads_reject_unicode_strings(self):
# Issue #14177: marshal.loads() should not accept unicode strings
unicode_string = 'T'
self.assertRaises(TypeError, marshal.loads, unicode_string)
def test_bad_reader(self):
class BadReader(io.BytesIO):
def readinto(self, buf):
n = super().readinto(buf)
if n is not None and n > 4:
n += 10**6
return n
for value in (1.0, 1j, b'0123456789', '0123456789'):
self.assertRaises(ValueError, marshal.load,
BadReader(marshal.dumps(value)))
def _test_eof(self):
data = marshal.dumps(("hello", "dolly", None))
for i in range(len(data)):
self.assertRaises(EOFError, marshal.loads, data[0: i])
LARGE_SIZE = 2**31
pointer_size = 8 if sys.maxsize > 0xFFFFFFFF else 4
class NullWriter:
def write(self, s):
pass
@unittest.skipIf(LARGE_SIZE > sys.maxsize, "test cannot run on 32-bit systems")
class LargeValuesTestCase(unittest.TestCase):
def check_unmarshallable(self, data):
self.assertRaises(ValueError, marshal.dump, data, NullWriter())
@support.bigmemtest(size=LARGE_SIZE, memuse=2, dry_run=False)
def test_bytes(self, size):
self.check_unmarshallable(b'x' * size)
@support.bigmemtest(size=LARGE_SIZE, memuse=2, dry_run=False)
def test_str(self, size):
self.check_unmarshallable('x' * size)
@support.bigmemtest(size=LARGE_SIZE, memuse=pointer_size + 1, dry_run=False)
def test_tuple(self, size):
self.check_unmarshallable((None,) * size)
@support.bigmemtest(size=LARGE_SIZE, memuse=pointer_size + 1, dry_run=False)
def test_list(self, size):
self.check_unmarshallable([None] * size)
@support.bigmemtest(size=LARGE_SIZE,
memuse=pointer_size*12 + sys.getsizeof(LARGE_SIZE-1),
dry_run=False)
def test_set(self, size):
self.check_unmarshallable(set(range(size)))
@support.bigmemtest(size=LARGE_SIZE,
memuse=pointer_size*12 + sys.getsizeof(LARGE_SIZE-1),
dry_run=False)
def test_frozenset(self, size):
self.check_unmarshallable(frozenset(range(size)))
@support.bigmemtest(size=LARGE_SIZE, memuse=2, dry_run=False)
def test_bytearray(self, size):
self.check_unmarshallable(bytearray(size))
def CollectObjectIDs(ids, obj):
"""Collect object ids seen in a structure"""
if id(obj) in ids:
return
ids.add(id(obj))
if isinstance(obj, (list, tuple, set, frozenset)):
for e in obj:
CollectObjectIDs(ids, e)
elif isinstance(obj, dict):
for k, v in obj.items():
CollectObjectIDs(ids, k)
CollectObjectIDs(ids, v)
return len(ids)
class InstancingTestCase(unittest.TestCase, HelperMixin):
intobj = 123321
floatobj = 1.2345
strobj = "abcde"*3
dictobj = {"hello":floatobj, "goodbye":floatobj, floatobj:"hello"}
def helper3(self, rsample, recursive=False, simple=False):
#we have two instances
sample = (rsample, rsample)
n0 = CollectObjectIDs(set(), sample)
s3 = marshal.dumps(sample, 3)
n3 = CollectObjectIDs(set(), marshal.loads(s3))
#same number of instances generated
self.assertEqual(n3, n0)
if not recursive:
#can compare with version 2
s2 = marshal.dumps(sample, 2)
n2 = CollectObjectIDs(set(), marshal.loads(s2))
#old format generated more instances
self.assertGreater(n2, n0)
#if complex objects are in there, old format is larger
if not simple:
self.assertGreater(len(s2), len(s3))
else:
self.assertGreaterEqual(len(s2), len(s3))
def testInt(self):
self.helper(self.intobj)
self.helper3(self.intobj, simple=True)
def testFloat(self):
self.helper(self.floatobj)
self.helper3(self.floatobj)
def testStr(self):
self.helper(self.strobj)
self.helper3(self.strobj)
def testDict(self):
self.helper(self.dictobj)
self.helper3(self.dictobj)
def testModule(self):
with open(__file__, "rb") as f:
code = f.read()
if __file__.endswith(".py"):
code = compile(code, __file__, "exec")
self.helper(code)
self.helper3(code)
def testRecursion(self):
d = dict(self.dictobj)
d["self"] = d
self.helper3(d, recursive=True)
l = [self.dictobj]
l.append(l)
self.helper3(l, recursive=True)
class CompatibilityTestCase(unittest.TestCase):
def _test(self, version):
with open(__file__, "rb") as f:
code = f.read()
if __file__.endswith(".py"):
code = compile(code, __file__, "exec")
data = marshal.dumps(code, version)
marshal.loads(data)
def test0To3(self):
self._test(0)
def test1To3(self):
self._test(1)
def test2To3(self):
self._test(2)
def test3To3(self):
self._test(3)
class InterningTestCase(unittest.TestCase, HelperMixin):
strobj = "this is an interned string"
strobj = sys.intern(strobj)
def testIntern(self):
s = marshal.loads(marshal.dumps(self.strobj))
self.assertEqual(s, self.strobj)
self.assertEqual(id(s), id(self.strobj))
s2 = sys.intern(s)
self.assertEqual(id(s2), id(s))
def testNoIntern(self):
s = marshal.loads(marshal.dumps(self.strobj, 2))
self.assertEqual(s, self.strobj)
self.assertNotEqual(id(s), id(self.strobj))
s2 = sys.intern(s)
self.assertNotEqual(id(s2), id(s))
def test_main():
support.run_unittest(IntTestCase,
FloatTestCase,
StringTestCase,
CodeTestCase,
ContainerTestCase,
ExceptionTestCase,
BufferTestCase,
BugsTestCase,
LargeValuesTestCase,
)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
minhphung171093/GreenERP_V8 | openerp/addons/edi/edi_service.py | 377 | 2517 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
_logger = logging.getLogger(__name__)
# TODO this is not needed anymore:
# - the exposed new service just forward to the model service
# - the service is called by the web controller, which can
# now directly call into openerp as the web server is always
# embedded in openerp.
def _edi_dispatch(db_name, method_name, *method_args):
try:
registry = openerp.modules.registry.RegistryManager.get(db_name)
assert registry, 'Unknown database %s' % db_name
with registry.cursor() as cr:
edi = registry['edi.edi']
return getattr(edi, method_name)(cr, *method_args)
except Exception, e:
_logger.exception('Failed to execute EDI method %s with args %r.',
method_name, method_args)
raise
def exp_import_edi_document(db_name, uid, passwd, edi_document, context=None):
return _edi_dispatch(db_name, 'import_edi', uid, edi_document, None)
def exp_import_edi_url(db_name, uid, passwd, edi_url, context=None):
return _edi_dispatch(db_name, 'import_edi', uid, None, edi_url)
def dispatch(method, params):
if method in ['import_edi_document', 'import_edi_url']:
(db, uid, passwd) = params[0:3]
openerp.service.security.check(db, uid, passwd)
else:
raise KeyError("Method not found: %s." % method)
fn = globals()['exp_' + method]
return fn(*params)
openerp.service.wsgi_server.register_rpc_endpoint('edi', dispatch)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ChenJunor/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/xlwt3/UnicodeUtils.py | 46 | 3408 | '''
From BIFF8 on, strings are always stored using UTF-16LE text encoding. The
character array is a sequence of 16-bit values4. Additionally it is
possible to use a compressed format, which omits the high bytes of all
characters, if they are all zero.
The following tables describe the standard format of the entire string, but
in many records the strings differ from this format. This will be mentioned
separately. It is possible (but not required) to store Rich-Text formatting
information and Asian phonetic information inside a Unicode string. This
results in four different ways to store a string. The character array
is not zero-terminated.
The string consists of the character count (as usual an 8-bit value or
a 16-bit value), option flags, the character array and optional formatting
information. If the string is empty, sometimes the option flags field will
not occur. This is mentioned at the respective place.
Offset Size Contents
0 1 or 2 Length of the string (character count, ln)
1 or 2 1 Option flags:
Bit Mask Contents
0 01H Character compression (ccompr):
0 = Compressed (8-bit characters)
1 = Uncompressed (16-bit characters)
2 04H Asian phonetic settings (phonetic):
0 = Does not contain Asian phonetic settings
1 = Contains Asian phonetic settings
3 08H Rich-Text settings (richtext):
0 = Does not contain Rich-Text settings
1 = Contains Rich-Text settings
[2 or 3] 2 (optional, only if richtext=1) Number of Rich-Text formatting runs (rt)
[var.] 4 (optional, only if phonetic=1) Size of Asian phonetic settings block (in bytes, sz)
var. ln or
2·ln Character array (8-bit characters or 16-bit characters, dependent on ccompr)
[var.] 4·rt (optional, only if richtext=1) List of rt formatting runs
[var.] sz (optional, only if phonetic=1) Asian Phonetic Settings Block
'''
from struct import pack
def upack2(s, encoding='ascii'):
# If not unicode, make it so.
if isinstance(s, str):
us = s
else:
us = str(s, encoding)
# Limit is based on number of content characters
# (not on number of bytes in packed result)
len_us = len(us)
if len_us > 65535:
raise Exception('String longer than 65535 characters')
try:
encs = us.encode('latin1')
# Success here means all chars are in U+0000 to U+00FF
# inclusive, meaning that we can use "compressed format".
flag = 0
except UnicodeEncodeError:
encs = us.encode('utf_16_le')
flag = 1
return pack('<HB', len_us, flag) + encs
def upack1(s, encoding='ascii'):
# Same as upack2(), but with a one-byte length field.
if isinstance(s, str):
us = s
else:
us = str(s, encoding)
len_us = len(us)
if len_us > 255:
raise Exception('String longer than 255 characters')
try:
encs = us.encode('latin1')
flag = 0
except UnicodeEncodeError:
encs = us.encode('utf_16_le')
flag = 1
return pack('<BB', len_us, flag) + encs
| apache-2.0 |
mohamed--abdel-maksoud/chromium.src | build/extract_from_cab.py | 164 | 2059 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extracts a single file from a CAB archive."""
import os
import shutil
import subprocess
import sys
import tempfile
def run_quiet(*args):
"""Run 'expand' suppressing noisy output. Returns returncode from process."""
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
out, _ = popen.communicate()
if popen.returncode:
# expand emits errors to stdout, so if we fail, then print that out.
print out
return popen.returncode
def main():
if len(sys.argv) != 4:
print 'Usage: extract_from_cab.py cab_path archived_file output_dir'
return 1
[cab_path, archived_file, output_dir] = sys.argv[1:]
# Expand.exe does its work in a fixed-named temporary directory created within
# the given output directory. This is a problem for concurrent extractions, so
# create a unique temp dir within the desired output directory to work around
# this limitation.
temp_dir = tempfile.mkdtemp(dir=output_dir)
try:
# Invoke the Windows expand utility to extract the file.
level = run_quiet('expand', cab_path, '-F:' + archived_file, temp_dir)
if level == 0:
# Move the output file into place, preserving expand.exe's behavior of
# paving over any preexisting file.
output_file = os.path.join(output_dir, archived_file)
try:
os.remove(output_file)
except OSError:
pass
os.rename(os.path.join(temp_dir, archived_file), output_file)
finally:
shutil.rmtree(temp_dir, True)
if level != 0:
return level
# The expand utility preserves the modification date and time of the archived
# file. Touch the extracted file. This helps build systems that compare the
# modification times of input and output files to determine whether to do an
# action.
os.utime(os.path.join(output_dir, archived_file), None)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
tsnoam/Flexget | flexget/plugins/modify/headers.py | 8 | 2491 | from __future__ import unicode_literals, division, absolute_import
import logging
import urllib2
from flexget import plugin
from flexget.event import event
log = logging.getLogger('headers')
class HTTPHeadersProcessor(urllib2.BaseHandler):
# run first
handler_order = urllib2.HTTPHandler.handler_order - 10
def __init__(self, headers=None):
if headers:
self.headers = headers
else:
self.headers = {}
def http_request(self, request):
for name, value in self.headers.iteritems():
if not request.has_header(name):
log.debug('Adding %s: %s' % (name, value))
request.add_unredirected_header(name.capitalize(), value.strip())
else:
log.debug('Header "%s" exists with value "%s"' % (name, request.get_header(name)))
return request
def http_response(self, request, response):
return response
https_request = http_request
https_response = http_response
class PluginHeaders(object):
"""Allow setting up any headers in all requests (which use urllib2)
Example:
headers:
cookie: uid=<YOUR UID>; pass=<YOUR PASS>
"""
schema = {'type': 'object', 'additionalProperties': {'type': 'string'}}
@plugin.priority(130)
def on_task_start(self, task, config):
"""Task starting"""
# Set the headers for this task's request session
log.debug('headers to add: %s' % config)
if task.requests.headers:
task.requests.headers.update(config)
else:
task.requests.headers = config
# Set the headers in urllib2 for backwards compatibility
if urllib2._opener:
log.debug('Adding HTTPHeadersProcessor to default opener')
urllib2._opener.add_handler(HTTPHeadersProcessor(config))
else:
log.debug('Creating new opener and installing it')
opener = urllib2.build_opener(HTTPHeadersProcessor(config))
urllib2.install_opener(opener)
def on_task_exit(self, task, config):
"""Task exiting, remove additions"""
if urllib2._opener:
log.debug('Removing urllib2 default opener')
# TODO: this uninstalls all other handlers as well, but does it matter?
urllib2.install_opener(None)
on_task_abort = on_task_exit
@event('plugin.register')
def register_plugin():
plugin.register(PluginHeaders, 'headers', api_ver=2)
| mit |
rahulunair/nova | nova/debugger.py | 13 | 2100 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(markmc): this is imported before monkey patching in nova.cmd
# so we avoid extra imports here
import sys
def enabled():
return ('--remote_debug-host' in sys.argv and
'--remote_debug-port' in sys.argv)
def init():
import nova.conf
CONF = nova.conf.CONF
# NOTE(markmc): gracefully handle the CLI options not being registered
if 'remote_debug' not in CONF:
return
if not (CONF.remote_debug.host and CONF.remote_debug.port):
return
from nova.i18n import _LW
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
LOG.debug('Listening on %(host)s:%(port)s for debug connection',
{'host': CONF.remote_debug.host,
'port': CONF.remote_debug.port})
try:
from pydev import pydevd
except ImportError:
import pydevd
pydevd.settrace(host=CONF.remote_debug.host,
port=CONF.remote_debug.port,
stdoutToServer=False,
stderrToServer=False)
LOG.warning(_LW('WARNING: Using the remote debug option changes how '
'Nova uses the eventlet library to support async IO. This '
'could result in failures that do not occur under normal '
'operation. Use at your own risk.'))
| apache-2.0 |
bmya/tkobr-addons | tko_point_of_sale_discount_cards/__init__.py | 3 | 1096 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# Thinkopen Brasil
# Copyright (C) Thinkopen Solutions Brasil (<http://www.tkobr.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import point_of_sale | agpl-3.0 |
ghislainp/iris | docs/iris/example_code/General/custom_file_loading.py | 4 | 11862 | """
Loading a cube from a custom file format
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This example shows how a custom text file can be loaded using the standard Iris load mechanism.
The first stage in the process is to define an Iris :class:`FormatSpecification <iris.io.format_picker.FormatSpecification>` for the file format.
To create a format specification we need to define the following:
* format_name - Some text that describes the format specification we are creating
* file_element - FileElement object describing the element which identifies
this FormatSpecification.
Possible values are:
``iris.io.format_picker.MagicNumber(n, o)`` - The n bytes from the file \
at offset o.
``iris.io.format_picker.FileExtension()`` - The file's extension.
``iris.io.format_picker.LeadingLine()`` - The first line of the file.
* file_element_value - The value that the file_element should take if a file matches this FormatSpecification
* handler (optional) - A generator function that will be called when the file specification has been identified. This function is
provided by the user and provides the means to parse the whole file. If no handler function is provided, then identification
is still possible without any handling.
The handler function must define the following arguments:
* list of filenames to process
* callback function - An optional function to filter/alter the Iris cubes returned
The handler function must be defined as generator which yields each cube as they are produced.
* priority (optional) - Integer giving a priority for considering this specification where higher priority means sooner consideration
In the following example, the function :func:`load_NAME_III` has been defined to handle the loading of the raw data from the custom file format.
This function is called from :func:`NAME_to_cube` which uses this data to create and yield Iris cubes.
In the ``main()`` function the filenames are loaded via the ``iris.load_cube`` function which automatically
invokes the ``FormatSpecification`` we defined. The cube returned from the load function is then used to produce a plot.
"""
import datetime
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.coords as icoords
import iris.coord_systems as icoord_systems
import iris.fileformats
import iris.io.format_picker as format_picker
import iris.plot as iplt
UTC_format = '%H%M%Z %d/%m/%Y'
FLOAT_HEADERS = ['X grid origin', 'Y grid origin',
'X grid resolution', 'Y grid resolution']
INT_HEADERS = ['X grid size', 'Y grid size', 'Number of fields']
DATE_HEADERS = ['Run time', 'Start of release', 'End of release']
COLUMN_NAMES = ['species_category', 'species', 'cell_measure', 'quantity',
'unit', 'z_level', 'time']
def load_NAME_III(filename):
"""
Loads the Met Office's NAME III grid output files returning headers, column definitions and data arrays as 3 separate lists.
"""
# Loading a file gives a generator of lines which can be progressed using
# the next() function. This will come in handy as we wish to progress
# through the file line by line.
with open(filename) as file_handle:
# Define a dictionary which can hold the header metadata for this file.
headers = {}
# Skip the NAME header of the file which looks something like
# 'NAME III (version X.X.X)'.
next(file_handle)
# Read the next 16 lines of header information, putting the form
# "header name: header value" into a dictionary.
for _ in range(16):
header_name, header_value = next(file_handle).split(':')
# Strip off any spurious space characters in the header name and
# value.
header_name = header_name.strip()
header_value = header_value.strip()
# Cast some headers into floats or integers if they match a given
# header name.
if header_name in FLOAT_HEADERS:
header_value = float(header_value)
elif header_name in INT_HEADERS:
header_value = int(header_value)
elif header_name in DATE_HEADERS:
# convert the time to python datetimes
header_value = datetime.datetime.strptime(header_value,
UTC_format)
headers[header_name] = header_value
# Skip the next blank line in the file.
next(file_handle)
# Read the next 7 lines of column definitions.
column_headings = {}
for column_header_name in COLUMN_NAMES:
column_headings[column_header_name] = [
col.strip() for col in next(file_handle).split(',')
][:-1]
# Convert the time to python datetimes.
new_time_column_header = []
for i, t in enumerate(column_headings['time']):
# The first 4 columns aren't time at all, so don't convert them to
# datetimes.
if i >= 4:
t = datetime.datetime.strptime(t, UTC_format)
new_time_column_header.append(t)
column_headings['time'] = new_time_column_header
# Skip the blank line after the column headers.
next(file_handle)
# Make a list of data arrays to hold the data for each column.
data_shape = (headers['Y grid size'], headers['X grid size'])
data_arrays = [np.zeros(data_shape, dtype=np.float32)
for i in range(headers['Number of fields'])]
# Iterate over the remaining lines which represent the data in a column
# form.
for line in file_handle:
# Split the line by comma, removing the last empty column caused by
# the trailing comma.
vals = line.split(',')[:-1]
# Cast the x and y grid positions to floats and convert them to
# zero based indices (the numbers are 1 based grid positions where
# 0.5 represents half a grid point.)
x = float(vals[0]) - 1.5
y = float(vals[1]) - 1.5
# Populate the data arrays (i.e. all columns but the leading 4).
for i, data_array in enumerate(data_arrays):
data_array[y, x] = float(vals[i + 4])
return headers, column_headings, data_arrays
def NAME_to_cube(filenames, callback):
"""Returns a generator of cubes given a list of filenames and a callback."""
for filename in filenames:
header, column_headings, data_arrays = load_NAME_III(filename)
for i, data_array in enumerate(data_arrays):
# turn the dictionary of column headers with a list of header information for each field into a dictionary of
# headers for just this field. Ignore the first 4 columns of grid position (data was located with the data array).
field_headings = dict((k, v[i + 4])
for k, v in column_headings.items())
# make an cube
cube = iris.cube.Cube(data_array)
# define the name and unit
name = ('%s %s' % (field_headings['species'], field_headings['quantity'])).upper().replace(' ', '_')
cube.rename(name)
# Some units are badly encoded in the file, fix this by putting a space in between. (if gs is not found, then the
# string will be returned unchanged)
cube.units = field_headings['unit'].replace('gs', 'g s')
# define and add the singular coordinates of the field (flight level, time etc.)
cube.add_aux_coord(icoords.AuxCoord(field_headings['z_level'], long_name='flight_level', units='1'))
# define the time unit and use it to serialise the datetime for the time coordinate
time_unit = iris.unit.Unit('hours since epoch', calendar=iris.unit.CALENDAR_GREGORIAN)
time_coord = icoords.AuxCoord(time_unit.date2num(field_headings['time']), standard_name='time', units=time_unit)
cube.add_aux_coord(time_coord)
# build a coordinate system which can be referenced by latitude and longitude coordinates
lat_lon_coord_system = icoord_systems.GeogCS(6371229)
# build regular latitude and longitude coordinates which have bounds
start = header['X grid origin'] + header['X grid resolution']
step = header['X grid resolution']
count = header['X grid size']
pts = start + np.arange(count, dtype=np.float32) * step
lon_coord = icoords.DimCoord(pts, standard_name='longitude', units='degrees', coord_system=lat_lon_coord_system)
lon_coord.guess_bounds()
start = header['Y grid origin'] + header['Y grid resolution']
step = header['Y grid resolution']
count = header['Y grid size']
pts = start + np.arange(count, dtype=np.float32) * step
lat_coord = icoords.DimCoord(pts, standard_name='latitude', units='degrees', coord_system=lat_lon_coord_system)
lat_coord.guess_bounds()
# add the latitude and longitude coordinates to the cube, with mappings to data dimensions
cube.add_dim_coord(lat_coord, 0)
cube.add_dim_coord(lon_coord, 1)
# implement standard iris callback capability. Although callbacks are not used in this example, the standard
# mechanism for a custom loader to implement a callback is shown:
cube = iris.io.run_callback(callback, cube, [header, field_headings, data_array], filename)
# yield the cube created (the loop will continue when the next() element is requested)
yield cube
# Create a format_picker specification of the NAME file format giving it a
# priority greater than the built in NAME loader.
_NAME_III_spec = format_picker.FormatSpecification(
'Name III',
format_picker.LeadingLine(),
lambda line: line.startswith(b"NAME III"),
NAME_to_cube,
priority=6)
# Register the NAME loader with iris
iris.fileformats.FORMAT_AGENT.add_spec(_NAME_III_spec)
# ---------------------------------------------
# | Using the new loader |
# ---------------------------------------------
def main():
fname = iris.sample_data_path('NAME_output.txt')
boundary_volc_ash_constraint = iris.Constraint('VOLCANIC_ASH_AIR_CONCENTRATION', flight_level='From FL000 - FL200')
# Callback shown as None to illustrate where a cube-level callback function would be used if required
cube = iris.load_cube(fname, boundary_volc_ash_constraint, callback=None)
# draw contour levels for the data (the top level is just a catch-all)
levels = (0.0002, 0.002, 0.004, 1e10)
cs = iplt.contourf(cube, levels=levels,
colors=('#80ffff', '#939598', '#e00404'),
)
# draw a black outline at the lowest contour to highlight affected areas
iplt.contour(cube, levels=(levels[0], 100),
colors='black')
# set an extent and a background image for the map
ax = plt.gca()
ax.set_extent((-90, 20, 20, 75))
ax.stock_img('ne_shaded')
# make a legend, with custom labels, for the coloured contour set
artists, _ = cs.legend_elements()
labels = [
r'$%s < x \leq %s$' % (levels[0], levels[1]),
r'$%s < x \leq %s$' % (levels[1], levels[2]),
r'$x > %s$' % levels[2]]
ax.legend(artists, labels, title='Ash concentration / g m-3', loc='upper left')
time = cube.coord('time')
time_date = time.units.num2date(time.points[0]).strftime(UTC_format)
plt.title('Volcanic ash concentration forecast\nvalid at %s' % time_date)
iplt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
uclouvain/osis_louvain | base/forms/utils/uppercase.py | 1 | 1402 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
def convert_to_uppercase(string_value):
if string_value:
return string_value.upper()
return string_value
| agpl-3.0 |
Smarsh/django | django/core/management/commands/testserver.py | 19 | 1661 | from django.core.management.base import BaseCommand
from optparse import make_option
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--addrport', action='store', dest='addrport',
type='string', default='',
help='port number or ipaddr:port to run the server on'),
)
help = 'Runs a development server with data from the given fixture(s).'
args = '[fixture ...]'
requires_model_validation = False
def handle(self, *fixture_labels, **options):
from django.core.management import call_command
from django.db import connection
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive', True)
addrport = options.get('addrport')
# Create a test database.
db_name = connection.creation.create_test_db(verbosity=verbosity, autoclobber=not interactive)
# Import the fixture data into the test database.
call_command('loaddata', *fixture_labels, **{'verbosity': verbosity})
# Run the development server. Turn off auto-reloading because it causes
# a strange error -- it causes this handle() method to be called
# multiple times.
shutdown_message = '\nServer stopped.\nNote that the test database, %r, has not been deleted. You can explore it on your own.' % db_name
call_command('runserver', addrport=addrport, shutdown_message=shutdown_message, use_reloader=False)
| bsd-3-clause |
COOLMASON/ThinkStats2 | code/analytic.py | 69 | 6265 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import numpy as np
import pandas
import nsfg
import thinkplot
import thinkstats2
def ParetoMedian(xmin, alpha):
"""Computes the median of a Pareto distribution."""
return xmin * pow(2, 1/alpha)
def MakeExpoCdf():
"""Generates a plot of the exponential CDF."""
thinkplot.PrePlot(3)
for lam in [2.0, 1, 0.5]:
xs, ps = thinkstats2.RenderExpoCdf(lam, 0, 3.0, 50)
label = r'$\lambda=%g$' % lam
thinkplot.Plot(xs, ps, label=label)
thinkplot.Save(root='analytic_expo_cdf',
title='Exponential CDF',
xlabel='x',
ylabel='CDF')
def ReadBabyBoom(filename='babyboom.dat'):
"""Reads the babyboom data.
filename: string
returns: DataFrame
"""
var_info = [
('time', 1, 8, int),
('sex', 9, 16, int),
('weight_g', 17, 24, int),
('minutes', 25, 32, int),
]
columns = ['name', 'start', 'end', 'type']
variables = pandas.DataFrame(var_info, columns=columns)
variables.end += 1
dct = thinkstats2.FixedWidthVariables(variables, index_base=1)
df = dct.ReadFixedWidth(filename, skiprows=59)
return df
def MakeBabyBoom():
"""Plot CDF of interarrival time on log and linear scales.
"""
# compute the interarrival times
df = ReadBabyBoom()
diffs = df.minutes.diff()
cdf = thinkstats2.Cdf(diffs, label='actual')
thinkplot.PrePlot(cols=2)
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel='minutes',
ylabel='CDF',
legend=False)
thinkplot.SubPlot(2)
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(xlabel='minutes',
ylabel='CCDF',
yscale='log',
legend=False)
thinkplot.Save(root='analytic_interarrivals',
legend=False)
def MakeParetoCdf():
"""Generates a plot of the Pareto CDF."""
xmin = 0.5
thinkplot.PrePlot(3)
for alpha in [2.0, 1.0, 0.5]:
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 10.0, n=100)
thinkplot.Plot(xs, ps, label=r'$\alpha=%g$' % alpha)
thinkplot.Save(root='analytic_pareto_cdf',
title='Pareto CDF',
xlabel='x',
ylabel='CDF')
def MakeParetoCdf2():
"""Generates a plot of the CDF of height in Pareto World."""
xmin = 100
alpha = 1.7
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 1000.0, n=100)
thinkplot.Plot(xs, ps)
thinkplot.Save(root='analytic_pareto_height',
title='Pareto CDF',
xlabel='height (cm)',
ylabel='CDF',
legend=False)
def MakeNormalCdf():
"""Generates a plot of the normal CDF."""
thinkplot.PrePlot(3)
mus = [1.0, 2.0, 3.0]
sigmas = [0.5, 0.4, 0.3]
for mu, sigma in zip(mus, sigmas):
xs, ps = thinkstats2.RenderNormalCdf(mu=mu, sigma=sigma,
low=-1.0, high=4.0)
label = r'$\mu=%g$, $\sigma=%g$' % (mu, sigma)
thinkplot.Plot(xs, ps, label=label)
thinkplot.Save(root='analytic_normal_cdf',
title='Normal CDF',
xlabel='x',
ylabel='CDF',
loc=2)
def MakeNormalModel(weights):
"""Plot the CDF of birthweights with a normal model."""
# estimate parameters: trimming outliers yields a better fit
mu, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
print('Mean, Var', mu, var)
# plot the model
sigma = math.sqrt(var)
print('Sigma', sigma)
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=12.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
# plot the data
cdf = thinkstats2.Cdf(weights, label='data')
thinkplot.PrePlot(1)
thinkplot.Cdf(cdf)
thinkplot.Save(root='analytic_birthwgt_model',
title='Birth weights',
xlabel='birth weight (lbs)',
ylabel='CDF')
def MakeExampleNormalPlot():
"""Generates a sample normal probability plot.
"""
n = 1000
thinkplot.PrePlot(3)
mus = [0, 1, 5]
sigmas = [1, 1, 2]
for mu, sigma in zip(mus, sigmas):
sample = np.random.normal(mu, sigma, n)
xs, ys = thinkstats2.NormalProbability(sample)
label = '$\mu=%d$, $\sigma=%d$' % (mu, sigma)
thinkplot.Plot(xs, ys, label=label)
thinkplot.Save(root='analytic_normal_prob_example',
title='Normal probability plot',
xlabel='standard normal sample',
ylabel='sample values')
def MakeNormalPlot(weights, term_weights):
"""Generates a normal probability plot of birth weights."""
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = math.sqrt(var)
xs = [-4, 4]
fxs, fys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(fxs, fys, linewidth=4, color='0.8')
thinkplot.PrePlot(2)
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label='all live')
xs, ys = thinkstats2.NormalProbability(term_weights)
thinkplot.Plot(xs, ys, label='full term')
thinkplot.Save(root='analytic_birthwgt_normal',
title='Normal probability plot',
xlabel='Standard deviations from mean',
ylabel='Birth weight (lbs)')
def main():
thinkstats2.RandomSeed(18)
MakeExampleNormalPlot()
# make the analytic CDFs
MakeExpoCdf()
MakeBabyBoom()
MakeParetoCdf()
MakeParetoCdf2()
MakeNormalCdf()
# test the distribution of birth weights for normality
preg = nsfg.ReadFemPreg()
full_term = preg[preg.prglngth >= 37]
weights = preg.totalwgt_lb.dropna()
term_weights = full_term.totalwgt_lb.dropna()
MakeNormalModel(weights)
MakeNormalPlot(weights, term_weights)
if __name__ == "__main__":
main()
| gpl-3.0 |
horance-liu/tensorflow | tensorflow/contrib/distributions/python/ops/vector_laplace_diag.py | 60 | 8326 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution of a vectorized Laplace, with uncorrelated components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import vector_laplace_linear_operator as vector_laplace_linop
from tensorflow.python.framework import ops
__all__ = [
"VectorLaplaceDiag",
]
class VectorLaplaceDiag(
vector_laplace_linop.VectorLaplaceLinearOperator):
"""The vectorization of the Laplace distribution on `R^k`.
The vector laplace distribution is defined over `R^k`, and parameterized by
a (batch of) length-`k` `loc` vector (the means) and a (batch of) `k x k`
`scale` matrix: `covariance = 2 * scale @ scale.T`, where `@` denotes
matrix-multiplication.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; loc, scale) = exp(-||y||_1) / Z,
y = inv(scale) @ (x - loc),
Z = 2**k |det(scale)|,
```
where:
* `loc` is a vector in `R^k`,
* `scale` is a linear operator in `R^{k x k}`, `cov = scale @ scale.T`,
* `Z` denotes the normalization constant, and,
* `||y||_1` denotes the `l1` norm of `y`, `sum_i |y_i|.
A (non-batch) `scale` matrix is:
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
where:
* `scale_diag.shape = [k]`, and,
* `scale_identity_multiplier.shape = []`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
The VectorLaplace distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X = (X_1, ..., X_k), each X_i ~ Laplace(loc=0, scale=1)
Y = (Y_1, ...,Y_k) = scale @ X + loc
```
#### About `VectorLaplace` and `Vector` distributions in TensorFlow.
The `VectorLaplace` is a non-standard distribution that has useful properties.
The marginals `Y_1, ..., Y_k` are *not* Laplace random variables, due to
the fact that the sum of Laplace random variables is not Laplace.
Instead, `Y` is a vector whose components are linear combinations of Laplace
random variables. Thus, `Y` lives in the vector space generated by `vectors`
of Laplace distributions. This allows the user to decide the mean and
covariance (by setting `loc` and `scale`), while preserving some properties of
the Laplace distribution. In particular, the tails of `Y_i` will be (up to
polynomial factors) exponentially decaying.
To see this last statement, note that the pdf of `Y_i` is the convolution of
the pdf of `k` independent Laplace random variables. One can then show by
induction that distributions with exponential (up to polynomial factors) tails
are closed under convolution.
#### Examples
```python
ds = tf.contrib.distributions
# Initialize a single 2-variate VectorLaplace.
vla = ds.VectorLaplaceDiag(
loc=[1., -1],
scale_diag=[1, 2.])
vla.mean().eval()
# ==> [1., -1]
vla.stddev().eval()
# ==> [1., 2] * sqrt(2)
# Evaluate this on an observation in `R^2`, returning a scalar.
vla.prob([-1., 0]).eval() # shape: []
# Initialize a 3-batch, 2-variate scaled-identity VectorLaplace.
vla = ds.VectorLaplaceDiag(
loc=[1., -1],
scale_identity_multiplier=[1, 2., 3])
vla.mean().eval() # shape: [3, 2]
# ==> [[1., -1]
# [1, -1],
# [1, -1]]
vla.stddev().eval() # shape: [3, 2]
# ==> sqrt(2) * [[1., 1],
# [2, 2],
# [3, 3]]
# Evaluate this on an observation in `R^2`, returning a length-3 vector.
vla.prob([-1., 0]).eval() # shape: [3]
# Initialize a 2-batch of 3-variate VectorLaplace's.
vla = ds.VectorLaplaceDiag(
loc=[[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
scale_diag=[[1., 2, 3],
[0.5, 1, 1.5]]) # shape: [2, 3]
# Evaluate this on a two observations, each in `R^3`, returning a length-2
# vector.
x = [[-1., 0, 1],
[-11, 0, 11.]] # shape: [2, 3].
vla.prob(x).eval() # shape: [2]
```
"""
def __init__(self,
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
validate_args=False,
allow_nan_stats=True,
name="VectorLaplaceDiag"):
"""Construct Vector Laplace distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this.
Recall that `covariance = 2 * scale @ scale.T`.
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
where:
* `scale_diag.shape = [k]`, and,
* `scale_identity_multiplier.shape = []`.
Additional leading dimensions (if any) will index batches.
If both `scale_diag` and `scale_identity_multiplier` are `None`, then
`scale` is the Identity matrix.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_diag: Non-zero, floating-point `Tensor` representing a diagonal
matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,
and characterizes `b`-batches of `k x k` diagonal matrices added to
`scale`. When both `scale_identity_multiplier` and `scale_diag` are
`None` then `scale` is the `Identity`.
scale_identity_multiplier: Non-zero, floating-point `Tensor` representing
a scaled-identity-matrix added to `scale`. May have shape
`[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scaled
`k x k` identity matrices added to `scale`. When both
`scale_identity_multiplier` and `scale_diag` are `None` then `scale` is
the `Identity`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = locals()
with ops.name_scope(name):
with ops.name_scope("init", values=[
loc, scale_diag, scale_identity_multiplier]):
# No need to validate_args while making diag_scale. The returned
# LinearOperatorDiag has an assert_non_singular method that is called by
# the Bijector.
scale = distribution_util.make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=False,
assert_positive=False)
super(VectorLaplaceDiag, self).__init__(
loc=loc,
scale=scale,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
| apache-2.0 |
zenefits/sentry | src/sentry/south_migrations/0064_index_checksum.py | 36 | 19092 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.create_index('sentry_groupedmessage', ['project_id', 'checksum'])
def backwards(self, orm):
db.delete_index('sentry_groupedmessage', ['project_id', 'checksum'])
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
genos/Programming | workbench/nn.py | 1 | 5367 | #!/usr/bin/env python3
# coding: utf-8
"""nn.py
A simple feed-forward neural network, inspired by:
- iamtrask.github.io/2015/07/12/basic-python-network/
- rolisz.ro/2013/04/18/neural-networks-in-python
- mattmazur.com/2015/03/17/a-step-by-step-backpropagation-example/
"""
from dataclasses import dataclass, field
from typing import List
import numpy as np
def sigma(x):
"""Logistic activation function."""
return 1 / (1 + np.exp(-x))
def d_sigma(y):
"""Derivative of logistic function; assumes that y is σ(x) for some x, so
this is σ(x) * (1 - σ(x))
"""
return y * (1 - y)
@dataclass
class NeuralNetwork:
"""A simple neural network implementation.
Attributes:
shape (list of integers, required): the shape of our network
learning rate (positive float): constant multiplier of our gradient step
iterations (positive integer): number of rounds in training
seed (integer): seed for random state
weights (list of arrays, set in `__post_init__`): weights of our network
"""
shape: List[int]
learning_rate: float = 1.0
iterations: int = int(1e4)
seed: int = 1729
weights: List[np.ndarray] = field(init=False)
def __post_init__(self):
"""Validate attributes, then initialize `weights`"""
# error checking
assert isinstance(
self.shape, (tuple, list, np.ndarray)
), f"{self.shape} is not array-like"
assert np.ndim(self.shape) == 1, f"{self.shape} is not one dimensional"
assert len(self.shape) > 2, f"{self.shape} too short; length should be > 2"
assert all(
isinstance(s, int) and s > 0 for s in self.shape
), f"{self.shape} should contain only positive integers"
assert (
isinstance(self.learning_rate, float) and self.learning_rate > 0
), f"{self.learning_rate} is not a positive real number"
assert (
isinstance(self.iterations, int) and self.iterations > 0
), f"{self.iterations} is not a positive integer"
assert isinstance(self.seed, int), f"{self.seed} is not an integer"
# initialize weights
rng = np.random.default_rng(self.seed)
self.weights = [
rng.uniform(low=-1, high=1, size=(row, col))
for row, col in zip(self.shape, self.shape[1:])
]
def __repr__(self):
"""Pretty printing"""
w_str = "\n".join(str(w) for w in self.weights)
return f"""NeuralNetwork
shape: {self.shape}
learning rate: {self.learning_rate}
iterations: {self.iterations}
weights:
{w_str}"""
def fit(self, X, y):
"""Use X and y to train our neural network.
Args:
X (array-like, two dimensional): training input values
y (array-like, two dimensional): training output values
Notes:
Shape requirements:
X.shape[1] == self.shape[0]
y.shape[1] == self.shape[-1]
X.shape[0] == y.shape[0]
"""
# conversion, if necessary
X = np.asarray(X)
y = np.asarray(y)
# error checking
assert np.ndim(X) == 2, "input should be two dimensional"
assert np.ndim(y) == 2, "output should be two dimensional"
assert X.shape[1] == self.shape[0], "input shape doesn't match"
assert y.shape[1] == self.shape[-1], "output shape doesn't match"
assert X.shape[0] == y.shape[0], "input and output shapes don't match"
# result of feeding data through each layer
output = [np.zeros((X.shape[0], s)) for s in self.shape]
output[0] = X
# deltas for updating weights
delta = [np.zeros_like(w) for w in self.weights]
for _ in range(self.iterations):
# feed forward
for i, w in enumerate(self.weights):
output[i + 1] = sigma(output[i] @ w)
# backpropagate
delta[-1] = (y - output[-1]) * d_sigma(output[-1])
for i in range(len(self.shape) - 3, -1, -1):
delta[i] = delta[i + 1] @ self.weights[i + 1].T * d_sigma(output[i + 1])
for i, (o, d) in enumerate(zip(output, delta)):
self.weights[i] += self.learning_rate * o.T @ d
def predict(self, X):
"""Predict output given new input X.
Args:
X (numpy array): new input values
Returns:
numpy array: predicted y_hat for given X
Notes:
Shape requirements:
ndim(X) <= 2
if len(X.shape) == 1:
X.shape[0] == self.shape[0]
else:
X.shape[1] == self.shape[0]
"""
# conversion, if necessary
X = np.asarray(X)
# error checking
assert np.ndim(X) <= 2, "input should be at most two dimensional"
assert (
X.shape[0 if np.ndim(X) == 1 else 1] == self.shape[0]
), "input shape doesn't match"
# feed forward
y_hat = X
for w in self.weights:
y_hat = sigma(y_hat @ w)
return y_hat
if __name__ == "__main__":
NN = NeuralNetwork([2, 7, 4, 5, 1])
X = [[0, 0], [0, 1], [1, 0], [1, 1]]
y = [[0], [1], [1], [0]]
NN.fit(X, y)
for x in X:
print(f"{x}: {NN.predict(x)}")
| mit |
JaguarSecurity/SMG | security_monkey/exceptions.py | 2 | 3893 | # Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: exceptions
:synopsis: Defines all security_monkey specific exceptions
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <[email protected]> @monkeysecurity
"""
from security_monkey import app
class SecurityMonkeyException(Exception):
"""Base class for all security monkey exceptions."""
pass
class InvalidARN(SecurityMonkeyException):
"""Found an indecipherable ARN"""
def __init__(self, bad_arn):
self.bad_arn = bad_arn
app.logger.info(self)
def __str__(self):
return repr("Given an invalid ARN: {}".format(self.bad_arn))
class InvalidSourceOwner(SecurityMonkeyException):
"""Source Owners should be an integer representing an AWS account owner."""
def __init__(self, bad_source_owner):
self.bad_source_owner = bad_source_owner
app.logger.info(self)
def __str__(self):
return repr("Given an invalid SourceOwner: {}".format(self.bad_source_owner))
class InvalidAWSJSON(SecurityMonkeyException):
"""The JSON returned from AWS is not valid."""
def __init__(self, bad_json):
self.bad_json = bad_json
app.logger.info(self)
def __str__(self):
return repr("Could not parse invalid JSON from AWS:\n {}".format(self.bad_json))
class BotoConnectionIssue(SecurityMonkeyException):
"""Boto could not connect. This could be a permissions issue."""
def __init__(self, connection_message, tech, account, region):
self.connection_message = connection_message
self.tech = tech
self.account = account
self.region = region
app.logger.info(self)
def __str__(self):
return repr("Problem Connecting to {}/{}/{}:\n{}".format(
self.tech, self.account, self.region, self.connection_message))
class S3PermissionsIssue(SecurityMonkeyException):
"""Boto could not read metadata about an S3 bucket. Check permissions."""
def __init__(self, bucket_name):
self.bucket_name = bucket_name
app.logger.info(self)
def __str__(self):
return repr("AWS returned an exception while attempting "+
"to obtain information on a bucket I should "+
"have access to. Bucket Name: {}".format(self.bucket_name))
class S3ACLReturnedNoneDisplayName(SecurityMonkeyException):
"""The XML representation of an S3 ACL is not providing a proper DisplayName."""
def __init__(self, bucket_name):
self.bucket_name = bucket_name
app.logger.info(self)
def __str__(self):
return repr("AWS returned <DisplayName>None</DisplayName>"+
" in the output of bhandle.get_acl().to_xml()."+
" Bucket Name:{}".format(self.bucket_name))
class AWSRateLimitReached(SecurityMonkeyException):
"""Security Monkey is being throttled by AWS."""
def __init__(self, connection_message, tech, account, region):
self.connection_message = connection_message
self.tech = tech
self.account = account
self.region = region
app.logger.info(self)
def __str__(self):
return repr("Likely reached the AWS rate limit. {}/{}/{}:\n{}".format(
self.tech, self.account, self.region, self.connection_message))
| apache-2.0 |
cyrustabatab/mptcp | examples/wireless/wifi-ap.py | 108 | 5883 | # -*- Mode: Python; -*-
# /*
# * Copyright (c) 2005,2006,2007 INRIA
# * Copyright (c) 2009 INESC Porto
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation;
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# *
# * Authors: Mathieu Lacage <[email protected]>
# * Gustavo Carneiro <[email protected]>
# */
import sys
import ns.applications
import ns.core
import ns.internet
import ns.mobility
import ns.network
import ns.point_to_point
import ns.wifi
# void
# DevTxTrace (std::string context, Ptr<const Packet> p, Mac48Address address)
# {
# std::cout << " TX to=" << address << " p: " << *p << std::endl;
# }
# void
# DevRxTrace(std::string context, Ptr<const Packet> p, Mac48Address address)
# {
# std::cout << " RX from=" << address << " p: " << *p << std::endl;
# }
# void
# PhyRxOkTrace(std::string context, Ptr<const Packet> packet, double snr, WifiMode mode, enum WifiPreamble preamble)
# {
# std::cout << "PHYRXOK mode=" << mode << " snr=" << snr << " " << *packet << std::endl;
# }
# void
# PhyRxErrorTrace(std::string context, Ptr<const Packet> packet, double snr)
# {
# std::cout << "PHYRXERROR snr=" << snr << " " << *packet << std::endl;
# }
# void
# PhyTxTrace(std::string context, Ptr<const Packet> packet, WifiMode mode, WifiPreamble preamble, uint8_t txPower)
# {
# std::cout << "PHYTX mode=" << mode << " " << *packet << std::endl;
# }
# void
# PhyStateTrace(std::string context, Time start, Time duration, enum WifiPhy::State state)
# {
# std::cout << " state=";
# switch(state) {
# case WifiPhy::TX:
# std::cout << "tx ";
# break;
# case WifiPhy::SYNC:
# std::cout << "sync ";
# break;
# case WifiPhy::CCA_BUSY:
# std::cout << "cca-busy";
# break;
# case WifiPhy::IDLE:
# std::cout << "idle ";
# break;
# }
# std::cout << " start="<<start<<" duration="<<duration<<std::endl;
# }
def SetPosition(node, position):
mobility = node.GetObject(ns.mobility.MobilityModel.GetTypeId())
mobility.SetPosition(position)
def GetPosition(node):
mobility = node.GetObject(ns.mobility.MobilityModel.GetTypeId())
return mobility.GetPosition()
def AdvancePosition(node):
pos = GetPosition(node);
pos.x += 5.0
if pos.x >= 210.0:
return
SetPosition(node, pos)
ns.core.Simulator.Schedule(ns.core.Seconds(1.0), AdvancePosition, node)
def main(argv):
ns.core.CommandLine().Parse(argv)
ns.network.Packet.EnablePrinting();
# enable rts cts all the time.
ns.core.Config.SetDefault("ns3::WifiRemoteStationManager::RtsCtsThreshold", ns.core.StringValue("0"))
# disable fragmentation
ns.core.Config.SetDefault("ns3::WifiRemoteStationManager::FragmentationThreshold", ns.core.StringValue("2200"))
wifi = ns.wifi.WifiHelper.Default()
mobility = ns.mobility.MobilityHelper()
stas = ns.network.NodeContainer()
ap = ns.network.NodeContainer()
#NetDeviceContainer staDevs;
packetSocket = ns.network.PacketSocketHelper()
stas.Create(2)
ap.Create(1)
# give packet socket powers to nodes.
packetSocket.Install(stas)
packetSocket.Install(ap)
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac = ns.wifi.NqosWifiMacHelper.Default()
# setup stas.
wifiMac.SetType("ns3::StaWifiMac",
"Ssid", ns.wifi.SsidValue(ssid),
"ActiveProbing", ns.core.BooleanValue(False))
staDevs = wifi.Install(wifiPhy, wifiMac, stas)
# setup ap.
wifiMac.SetType("ns3::ApWifiMac",
"Ssid", ns.wifi.SsidValue(ssid),
"BeaconGeneration", ns.core.BooleanValue(True),
"BeaconInterval", ns.core.TimeValue(ns.core.Seconds(2.5)))
wifi.Install(wifiPhy, wifiMac, ap)
# mobility.
mobility.Install(stas)
mobility.Install(ap)
ns.core.Simulator.Schedule(ns.core.Seconds(1.0), AdvancePosition, ap.Get(0))
socket = ns.network.PacketSocketAddress()
socket.SetSingleDevice(staDevs.Get(0).GetIfIndex())
socket.SetPhysicalAddress(staDevs.Get(1).GetAddress())
socket.SetProtocol(1)
onoff = ns.applications.OnOffHelper("ns3::PacketSocketFactory", ns.network.Address(socket))
onoff.SetConstantRate (ns.network.DataRate ("500kb/s"))
apps = onoff.Install(ns.network.NodeContainer(stas.Get(0)))
apps.Start(ns.core.Seconds(0.5))
apps.Stop(ns.core.Seconds(43.0))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
# Config::Connect("/NodeList/*/DeviceList/*/Tx", MakeCallback(&DevTxTrace));
# Config::Connect("/NodeList/*/DeviceList/*/Rx", MakeCallback(&DevRxTrace));
# Config::Connect("/NodeList/*/DeviceList/*/Phy/RxOk", MakeCallback(&PhyRxOkTrace));
# Config::Connect("/NodeList/*/DeviceList/*/Phy/RxError", MakeCallback(&PhyRxErrorTrace));
# Config::Connect("/NodeList/*/DeviceList/*/Phy/Tx", MakeCallback(&PhyTxTrace));
# Config::Connect("/NodeList/*/DeviceList/*/Phy/State", MakeCallback(&PhyStateTrace));
ns.core.Simulator.Run()
ns.core.Simulator.Destroy()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 |
isandlaTech/cohorte-runtime | python/cohorte/config/finder.py | 2 | 10351 | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
COHORTE file finder
**TODO:**
* Review API
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python standard library
import fnmatch
import glob
import logging
import os
import cohorte.version
from pelix.ipopo.decorators import ComponentFactory, Instantiate, Provides, \
Validate, Invalidate
# iPOPO decorators
# COHORTE constants
# ------------------------------------------------------------------------------
# Bundle version
__version__ = cohorte.version.__version__
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class FileFinderAbs(object):
"""
Simple file finder : tries to find the given file in the platform main
directories.
"""
def __init__(self):
"""
Sets up the finder
"""
# Search roots
self._roots = []
self._custom_roots = set()
def _get_context(self):
"""
return the bundle context
"""
pass
def _extract_platform_path(self, path):
"""
Tries to remove a platform root prefix from the given path.
Non-None result indicates that the given path is a root sub-path.
:param path: Path to be transformed
:return: The root-path if any, else None
"""
if not path:
return None
# COHORTE root directories
for root_dir in self._gen_roots():
if path.startswith(root_dir):
return os.path.relpath(path, root_dir)
# No match found
return None
def _gen_roots(self):
"""
Generator to have the Cohorte roots (base then home) and the custom
roots.
"""
realpath = os.path.realpath
for root_list in (self._roots, self._custom_roots):
if root_list:
for root in root_list:
# given root
yield root
# real path
real_root = realpath(root)
if real_root != root:
yield real_root
def _internal_find(self, filename):
"""
A generator to find the given files in the platform directories that matches the filename.
:param filename: Name of the file to find
"""
# Look into root directories
for root_dir in self._gen_roots():
_logger.debug("_internal_find : root_dir=[{0}]".format(root_dir))
path = os.path.realpath(os.path.join(root_dir, filename))
_logger.debug("_internal_find : path=[{0}]".format(path))
paths = glob.iglob(path)
for real_path in paths:
yield real_path
# Test the absolute file name
path = os.path.realpath(filename)
if os.path.exists(path):
yield path
# call from java only
def _set_roots(self, roots):
self._roots = roots
def find_rel(self, filename, base_file=None):
"""
A generator to find the given files in the platform folders
:param filename: The file to look for (tries its absolute path then its
name)
:param base_file: Base file reference (filename can be relative to it)
:return: The matching files
"""
# Avoid to give the same file twice
handled = set()
if base_file:
abspath = os.path.abspath
file_exists = os.path.exists
# Multiple possibilities...
base_dirs = set()
if os.path.isdir(base_file):
# The given base is a directory (absolute or relative)
base_dirs.add(abspath(base_file))
elif file_exists(base_file):
# The given base exists: get its directory
base_dirs.add(abspath(os.path.dirname(base_file)))
if not os.path.isabs(base_file):
# Keep relative paths, as they can be platform-relative
base_dirs.add(base_file)
# Remove the platform parts (home or base)
filtered_dirs = set()
for base_dir in base_dirs:
local_dir = self._extract_platform_path(base_dir)
if local_dir is not None:
filtered_dirs.add(local_dir)
else:
filtered_dirs.add(base_dir)
for base_dir in filtered_dirs:
# Try the base directory directly (as a relative directory)
path = os.path.join(base_dir, filename)
for found_files in self._internal_find(path):
for found_file in found_files:
if found_file in handled:
# Already known
continue
else:
handled.add(found_file)
yield found_files
# Try without the platform prefix, if any
platform_subdir = self._extract_platform_path(base_dir)
if platform_subdir:
path = os.path.join(platform_subdir, filename)
for found_files in self._internal_find(path):
for found_file in found_files:
if found_file in handled:
# Already known
continue
else:
handled.add(found_file)
yield found_files
else:
# Find files, the standard way
for found_files in self._internal_find(filename):
for found_file in found_files:
if found_file in handled:
# Already known
continue
else:
handled.add(found_file)
yield found_files
def find_gen(self, pattern, base_dir=None, recursive=True):
"""
Generator to find the files matching the given pattern looking
recursively in the given directory in the roots (base, home and
customs)
:param pattern: A file pattern
:param base_dir: The name of a sub-directory of "home" or "base"
:param recursive: If True, searches recursively for the file
:return: The matching files
"""
if base_dir[0] == os.path.sep:
# os.path.join won't work if the name starts with a path separator
base_dir = base_dir[len(os.path.sep):]
for root in self._gen_roots():
# Prepare the base directory
if base_dir is not None:
base_path = os.path.join(root, base_dir)
else:
base_path = root
# Walk the directory
for sub_root, _, filenames in os.walk(base_path, followlinks=True):
for filename in fnmatch.filter(filenames, pattern):
# Return the real path of the matching file
yield os.path.realpath(os.path.join(sub_root, filename))
if not recursive:
# Stop on first directory
return
def add_custom_root(self, root):
"""
Adds a custom search root (not ordered)
:param root: The custom root to add
"""
if root:
self._custom_roots.add(root)
def remove_custom_root(self, root):
"""
Removes a custom search root
:param root: The custom root to remove
"""
self._custom_roots.discard(root)
def update_roots(self):
"""
Updates the platform roots, according to framework properties
"""
del self._roots[:]
# Search in Base, then Home
for name in (cohorte.PROP_BASE, cohorte.PROP_HOME):
value = self._get_context().get_property(name)
if value and value not in self._roots:
self._roots.append(value)
def validate(self):
"""
treaitment to do on validate. should be call by a component
"""
try:
# Prepare the sets
del self._roots[:]
self._custom_roots.clear()
# Update the roots list
self.update_roots()
except Exception as e:
_logger.error("failed to init roots path {0}".format(e))
def invalidate(self):
"""
treaitment to do on invalidate. should be call by a component
"""
# Store the framework access
del self._roots[:]
self._custom_roots.clear()
@ComponentFactory('cohorte-file-finder-factory')
@Provides(cohorte.SERVICE_FILE_FINDER)
@Instantiate('cohorte-file-finder')
class FileFinder(FileFinderAbs):
"""
Simple file finder : tries to find the given file in the platform main
directories.
"""
def __init__(self):
"""
Sets up the finder
"""
super(FileFinder, self).__init__()
# override
def _get_context(self):
return self._context
@Validate
def validate(self, context):
"""
Component validated
:param context: The bundle context
"""
# Store the framework access
self._context = context
super(FileFinder, self).validate()
@Invalidate
def invalidate(self, context):
"""
Component invalidated
:param context: The bundle context
"""
super(FileFinder, self).invalidate()
| apache-2.0 |
bitmazk/django-generic-positions | generic_positions/models.py | 1 | 1493 | """Models for the ``generic_positions`` app."""
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import fields
from django.db import models
from django.utils.translation import ugettext_lazy as _
class ObjectPosition(models.Model):
"""
Model to add a position field to any kind of object.
:content_object: Object, which now has a position field.
:position: Current position integer of the object.
"""
# Generic Foreign Key Bundle
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE,
)
object_id = models.PositiveIntegerField()
content_object = fields.GenericForeignKey()
# Other fields
position = models.PositiveIntegerField(
verbose_name=_('Position'),
null=True, blank=True,
)
def save_positions(post_data, queryset=None):
"""
Function to update a queryset of position objects with a post data dict.
:post_data: Typical post data dictionary like ``request.POST``, which
contains the keys of the position inputs.
:queryset: Queryset of the model ``ObjectPosition``.
"""
if not queryset:
queryset = ObjectPosition.objects.all()
for key in post_data:
if key.startswith('position-'):
try:
obj_id = int(key.replace('position-', ''))
except ValueError:
continue
queryset.filter(pk=obj_id).update(position=post_data[key])
| mit |
Yong-Lee/liyong | admin.py | 4 | 17722 | # -*- coding: utf-8 -*-
import logging
import re
try:
import json
except:
import simplejson as json
from hashlib import md5
from time import time
from datetime import datetime,timedelta
from urllib import urlencode
from common import BaseHandler, authorized, safe_encode, cnnow, clear_cache_by_pathlist, quoted_string, clear_all_cache
from setting import *
from model import Article, Comment, Link, Category, Tag, User, MyData
if not debug:
import sae.mail
from sae.taskqueue import add_task
from sae.storage import Bucket
######
def put_obj2storage(file_name = '', data = '', expires='365', type=None, encoding= None, domain_name = STORAGE_DOMAIN_NAME):
bucket = Bucket(domain_name)
bucket.put_object(file_name, data, content_type=type, content_encoding= encoding)
return bucket.generate_url(file_name)
def upload_qiniu(savename="test.txt", filedata=None):
if QN_BUCKET and savename and filedata:
import qiniu.conf
qiniu.conf.ACCESS_KEY = QN_AK
qiniu.conf.SECRET_KEY = QN_SK
import qiniu.rs
policy = qiniu.rs.PutPolicy(QN_BUCKET)
uptoken = policy.token()
import qiniu.io
key = savename
if key[0] == "/":
key = key[1:]
ret, err = qiniu.io.put(uptoken, key, filedata)
if err is not None:
return False
###下面返回的网址有可能不同,有的是 xxxx.u.qiniudn.com 请改为自己的
return "http://%s.qiniudn.com/%s" % (QN_BUCKET, key)
else:
return False
######
class HomePage(BaseHandler):
@authorized()
def get(self):
output = self.render('admin_index.html', {
'title': "%s - %s"%(SITE_TITLE,SITE_SUB_TITLE),
'keywords':KEYWORDS,
'description':SITE_DECR,
'test': '',
},layout='_layout_admin.html')
self.write(output)
return output
class Login(BaseHandler):
def get(self):
self.echo('admin_login.html', {
'title': "管理员登录",
'has_user': User.check_has_user()
},layout='_layout_admin.html')
def post(self):
try:
name = self.get_argument("name")
password = self.get_argument("password")
except:
self.redirect('%s/admin/login'% BASE_URL)
return
if name and password:
has_user = User.check_has_user()
if has_user:
#check user
password = md5(password.encode('utf-8')).hexdigest()
user = User.check_user( name, password)
if user:
#logging.error('user ok')
self.set_cookie('username', name, path="/", expires_days = 365 )
self.set_cookie('userpw', password, path="/", expires_days = 365 )
self.redirect('%s/admin/'% BASE_URL)
return
else:
#logging.error('user not ok')
self.redirect('%s/admin/login'% BASE_URL)
return
else:
#add new user
newuser = User.add_new_user( name, password)
if newuser:
self.set_cookie('username', name, path="/", expires_days = 365 )
self.set_cookie('userpw', md5(password.encode('utf-8')).hexdigest(), path="/", expires_days = 365 )
self.redirect('%s/admin/'% BASE_URL)
return
else:
self.redirect('%s/admin/login'% BASE_URL)
return
else:
self.redirect('%s/admin/login'% BASE_URL)
class Logout(BaseHandler):
def get(self):
self.clear_all_cookies()
self.redirect('%s/admin/login'% BASE_URL)
class AddUser(BaseHandler):
@authorized()
def get(self):
pass
class Forbidden(BaseHandler):
def get(self):
self.write('Forbidden page')
class FileUpload(BaseHandler):
@authorized()
def post(self):
self.set_header('Content-Type','text/html')
rspd = {'status': 201, 'msg':'ok'}
filetoupload = self.request.files['filetoupload']
if filetoupload:
myfile = filetoupload[0]
try:
file_type = myfile['filename'].split('.')[-1].lower()
new_file_name = "%s.%s"% (str(int(time())), file_type)
except:
file_type = ''
new_file_name = str(int(time()))
##
mime_type = myfile['content_type']
encoding = None
###
try:
if STORAGE_DOMAIN_NAME:
attachment_url = put_obj2storage(file_name = str(new_file_name), data = myfile['body'], expires='365', type= mime_type, encoding= encoding)
elif QN_AK and QN_SK and QN_BUCKET:
attachment_url = upload_qiniu(str(new_file_name), myfile['body'])
except:
attachment_url = ''
if attachment_url:
rspd['status'] = 200
rspd['filename'] = myfile['filename']
rspd['msg'] = attachment_url
else:
rspd['status'] = 500
rspd['msg'] = 'put_obj2storage erro, try it again.'
else:
rspd['msg'] = 'No file uploaded'
self.write(json.dumps(rspd))
return
class AddPost(BaseHandler):
@authorized()
def get(self):
self.echo('admin_addpost.html', {
'title': "添加文章",
'cats': Category.get_all_cat_name(),
'tags': Tag.get_all_tag_name(),
},layout='_layout_admin.html')
@authorized()
def post(self):
self.set_header('Content-Type','application/json')
rspd = {'status': 201, 'msg':'ok'}
try:
tf = {'true':1,'false':0}
timestamp = int(time())
post_dic = {
'category': self.get_argument("cat"),
'title': self.get_argument("tit"),
'content': self.get_argument("con"),
'tags': self.get_argument("tag",'').replace(u',',','),
'closecomment': self.get_argument("clo",'0'),
'password': self.get_argument("password",''),
'add_time': timestamp,
'edit_time': timestamp,
}
if post_dic['tags']:
tagslist = set([x.strip() for x in post_dic['tags'].split(',')])
try:
tagslist.remove('')
except:
pass
if tagslist:
post_dic['tags'] = ','.join(tagslist)
post_dic['closecomment'] = tf[post_dic['closecomment'].lower()]
except:
rspd['status'] = 500
rspd['msg'] = '错误: 注意必填的三项'
self.write(json.dumps(rspd))
return
postid = Article.add_new_article(post_dic)
if postid:
Category.add_postid_to_cat(post_dic['category'], str(postid))
if post_dic['tags']:
Tag.add_postid_to_tags(post_dic['tags'].split(','), str(postid))
rspd['status'] = 200
rspd['msg'] = '完成: 你已经成功添加了一篇文章 <a href="/t/%s" target="_blank">查看</a>' % str(postid)
clear_cache_by_pathlist(['/', 'cat:%s' % quoted_string(post_dic['category'])])
if not debug:
add_task('default', '/task/pingrpctask')
self.write(json.dumps(rspd))
return
else:
rspd['status'] = 500
rspd['msg'] = '错误: 未知错误,请尝试重新提交'
self.write(json.dumps(rspd))
return
class EditPost(BaseHandler):
@authorized()
def get(self, id = ''):
obj = None
if id:
obj = Article.get_article_by_id_edit(id)
self.echo('admin_editpost.html', {
'title': "编辑文章",
'cats': Category.get_all_cat_name(),
'tags': Tag.get_all_tag_name(),
'obj': obj
},layout='_layout_admin.html')
@authorized()
def post(self, id = ''):
act = self.get_argument("act",'')
if act == 'findid':
eid = self.get_argument("id",'')
self.redirect('%s/admin/edit_post/%s'% (BASE_URL, eid))
return
self.set_header('Content-Type','application/json')
rspd = {'status': 201, 'msg':'ok'}
oldobj = Article.get_article_by_id_edit(id)
try:
tf = {'true':1,'false':0}
timestamp = int(time())
post_dic = {
'category': self.get_argument("cat"),
'title': self.get_argument("tit"),
'content': self.get_argument("con"),
'tags': self.get_argument("tag",'').replace(u',',','),
'closecomment': self.get_argument("clo",'0'),
'password': self.get_argument("password",''),
'edit_time': timestamp,
'id': id
}
if post_dic['tags']:
tagslist = set([x.strip() for x in post_dic['tags'].split(',')])
try:
tagslist.remove('')
except:
pass
if tagslist:
post_dic['tags'] = ','.join(tagslist)
post_dic['closecomment'] = tf[post_dic['closecomment'].lower()]
except:
rspd['status'] = 500
rspd['msg'] = '错误: 注意必填的三项'
self.write(json.dumps(rspd))
return
postid = Article.update_post_edit(post_dic)
if postid:
cache_key_list = ['/', 'post:%s'% id, 'cat:%s' % quoted_string(oldobj.category)]
if oldobj.category != post_dic['category']:
#cat changed
Category.add_postid_to_cat(post_dic['category'], str(postid))
Category.remove_postid_from_cat(post_dic['category'], str(postid))
cache_key_list.append('cat:%s' % quoted_string(post_dic['category']))
if oldobj.tags != post_dic['tags']:
#tag changed
old_tags = set(oldobj.tags.split(','))
new_tags = set(post_dic['tags'].split(','))
removed_tags = old_tags - new_tags
added_tags = new_tags - old_tags
if added_tags:
Tag.add_postid_to_tags(added_tags, str(postid))
if removed_tags:
Tag.remove_postid_from_tags(removed_tags, str(postid))
clear_cache_by_pathlist(cache_key_list)
rspd['status'] = 200
rspd['msg'] = '完成: 你已经成功编辑了一篇文章 <a href="/t/%s" target="_blank">查看编辑后的文章</a>' % str(postid)
self.write(json.dumps(rspd))
return
else:
rspd['status'] = 500
rspd['msg'] = '错误: 未知错误,请尝试重新提交'
self.write(json.dumps(rspd))
return
class DelPost(BaseHandler):
@authorized()
def get(self, id = ''):
Article.del_post_by_id(id)
clear_cache_by_pathlist(['post:%s'%id])
self.redirect('%s/admin/edit_post/'% (BASE_URL))
class EditComment(BaseHandler):
@authorized()
def get(self, id = ''):
obj = None
if id:
obj = Comment.get_comment_by_id(id)
if obj:
act = self.get_argument("act",'')
if act == 'del':
Comment.del_comment_by_id(id)
clear_cache_by_pathlist(['post:%d'%obj.postid])
self.redirect('%s/admin/comment/'% (BASE_URL))
return
self.echo('admin_comment.html', {
'title': "管理评论",
'cats': Category.get_all_cat_name(),
'tags': Tag.get_all_tag_name(),
'obj': obj,
'comments': Comment.get_recent_comments(),
},layout='_layout_admin.html')
@authorized()
def post(self, id = ''):
act = self.get_argument("act",'')
if act == 'findid':
eid = self.get_argument("id",'')
self.redirect('%s/admin/comment/%s'% (BASE_URL, eid))
return
tf = {'true':1,'false':0}
post_dic = {
'author': self.get_argument("author"),
'email': self.get_argument("email"),
'content': safe_encode(self.get_argument("content").replace('\r','\n')),
'url': self.get_argument("url",''),
'visible': self.get_argument("visible",'false'),
'id': id
}
post_dic['visible'] = tf[post_dic['visible'].lower()]
Comment.update_comment_edit(post_dic)
clear_cache_by_pathlist(['post:%s'%id])
self.redirect('%s/admin/comment/%s'% (BASE_URL, id))
return
class LinkBroll(BaseHandler):
@authorized()
def get(self):
act = self.get_argument("act",'')
id = self.get_argument("id",'')
obj = None
if act == 'del':
if id:
Link.del_link_by_id(id)
clear_cache_by_pathlist(['/'])
self.redirect('%s/admin/links'% (BASE_URL))
return
elif act == 'edit':
if id:
obj = Link.get_link_by_id(id)
clear_cache_by_pathlist(['/'])
self.echo('admin_link.html', {
'title': "管理友情链接",
'objs': Link.get_all_links(),
'obj': obj,
},layout='_layout_admin.html')
@authorized()
def post(self):
act = self.get_argument("act",'')
id = self.get_argument("id",'')
name = self.get_argument("name",'')
sort = self.get_argument("sort",'0')
url = self.get_argument("url",'')
if name and url:
params = {'id': id, 'name': name, 'url': url, 'displayorder': sort}
if act == 'add':
Link.add_new_link(params)
if act == 'edit':
Link.update_link_edit(params)
clear_cache_by_pathlist(['/'])
self.redirect('%s/admin/links'% (BASE_URL))
return
class FlushData(BaseHandler):
@authorized()
def get(self):
act = self.get_argument("act",'')
if act == 'flush':
MyData.flush_all_data()
clear_all_cache()
self.redirect('/admin/flushdata')
return
elif act == 'flushcache':
clear_all_cache()
self.redirect('/admin/flushdata')
return
self.echo('admin_flushdata.html', {
'title': "清空缓存/数据",
},layout='_layout_admin.html')
class PingRPCTask(BaseHandler):
def get(self):
for n in range(len(XML_RPC_ENDPOINTS)):
add_task('default', '%s/task/pingrpc/%d' % (BASE_URL, n))
self.write(str(time()))
post = get
class PingRPC(BaseHandler):
def get(self, n = 0):
import urllib2
pingstr = self.render('rpc.xml', {'article_id':Article.get_max_id()})
headers = {
'User-Agent':'request',
'Content-Type' : 'text/xml',
'Content-length' : str(len(pingstr))
}
req = urllib2.Request(
url = XML_RPC_ENDPOINTS[int(n)],
headers = headers,
data = pingstr,
)
try:
content = urllib2.urlopen(req).read()
tip = 'Ping ok' + content
except:
tip = 'ping erro'
self.write(str(time()) + ": " + tip)
#add_task('default', '%s/task/sendmail'%BASE_URL, urlencode({'subject': tip, 'content': tip + " " + str(n)}))
post = get
class SendMail(BaseHandler):
def post(self):
subject = self.get_argument("subject",'')
content = self.get_argument("content",'')
if subject and content:
sae.mail.send_mail(NOTICE_MAIL, subject, content,(MAIL_SMTP, int(MAIL_PORT), MAIL_FROM, MAIL_PASSWORD, True))
class Install(BaseHandler):
def get(self):
try:
self.write('如果出现错误请尝试刷新本页。')
has_user = User.check_has_user()
if has_user:
self.write('博客已经成功安装了,你可以直接 <a href="/admin/flushdata">清空网站数据</a>')
else:
self.write('博客数据库已经建立,现在就去 <a href="/admin/">设置一个管理员帐号</a>')
except:
MyData.creat_table()
self.write('博客已经成功安装了,现在就去 <a href="/admin/">设置一个管理员帐号</a>')
class NotFoundPage(BaseHandler):
def get(self):
self.set_status(404)
self.echo('error.html', {
'page': '404',
'title': "Can't find out this URL",
'h2': 'Oh, my god!',
'msg': 'Something seems to be lost...'
})
#####
urls = [
(r"/admin/", HomePage),
(r"/admin/login", Login),
(r"/admin/logout", Logout),
(r"/admin/403", Forbidden),
(r"/admin/add_post", AddPost),
(r"/admin/edit_post/(\d*)", EditPost),
(r"/admin/del_post/(\d+)", DelPost),
(r"/admin/comment/(\d*)", EditComment),
(r"/admin/flushdata", FlushData),
(r"/task/pingrpctask", PingRPCTask),
(r"/task/pingrpc/(\d+)", PingRPC),
(r"/task/sendmail", SendMail),
(r"/install", Install),
(r"/admin/fileupload", FileUpload),
(r"/admin/links", LinkBroll),
(r".*", NotFoundPage)
]
| mit |
pckiller2008/pyfpdf | tutorial/tuto3.py | 21 | 1565 | from fpdf import FPDF
title='20000 Leagues Under the Seas'
class PDF(FPDF):
def header(self):
#Arial bold 15
self.set_font('Arial','B',15)
#Calculate width of title and position
w=self.get_string_width(title)+6
self.set_x((210-w)/2)
#Colors of frame, background and text
self.set_draw_color(0,80,180)
self.set_fill_color(230,230,0)
self.set_text_color(220,50,50)
#Thickness of frame (1 mm)
self.set_line_width(1)
#Title
self.cell(w,9,title,1,1,'C',1)
#Line break
self.ln(10)
def footer(self):
#Position at 1.5 cm from bottom
self.set_y(-15)
#Arial italic 8
self.set_font('Arial','I',8)
#Text color in gray
self.set_text_color(128)
#Page number
self.cell(0,10,'Page '+str(self.page_no()),0,0,'C')
def chapter_title(self,num,label):
#Arial 12
self.set_font('Arial','',12)
#Background color
self.set_fill_color(200,220,255)
#Title
self.cell(0,6,"Chapter %d : %s"%(num,label),0,1,'L',1)
#Line break
self.ln(4)
def chapter_body(self,name):
#Read text file
txt=file(name).read()
#Times 12
self.set_font('Times','',12)
#Output justified text
self.multi_cell(0,5,txt)
#Line break
self.ln()
#Mention in italics
self.set_font('','I')
self.cell(0,5,'(end of excerpt)')
def print_chapter(self,num,title,name):
self.add_page()
self.chapter_title(num,title)
self.chapter_body(name)
pdf=PDF()
pdf.set_title(title)
pdf.set_author('Jules Verne')
pdf.print_chapter(1,'A RUNAWAY REEF','20k_c1.txt')
pdf.print_chapter(2,'THE PROS AND CONS','20k_c2.txt')
pdf.output('tuto3.pdf','F')
| lgpl-3.0 |
stackforge/python-solumclient | solumclient/tests/common/test_auth.py | 1 | 3915 | # Copyright 2013 - Noorul Islam K M
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient.v2_0 import client as ksclient
import mock
import testtools
from solumclient.common import auth
from solumclient.common import client
from solumclient.tests import base
@mock.patch.object(ksclient, 'Client')
class KeystoneAuthPluginTest(base.TestCase):
def setUp(self):
super(KeystoneAuthPluginTest, self).setUp()
plugin = auth.KeystoneAuthPlugin(
username="fake-username",
password="fake-password",
tenant_name="fake-tenant-name",
project_domain_name="default",
user_domain_name="default",
auth_url="http://auth")
self.cs = client.HTTPClient(auth_plugin=plugin)
@testtools.skip("Skip it when found solution")
def test_authenticate(self, mock_ksclient):
self.cs.authenticate()
mock_ksclient.assert_called_with(
username="fake-username",
password="fake-password",
tenant_name="fake-tenant-name",
project_domain_name="default",
user_domain_name="default",
auth_url="http://auth")
def test_token_and_endpoint(self, mock_ksclient):
plugin = auth.KeystoneAuthPlugin(
endpoint="http://solum",
token="test_token")
self.cs = client.HTTPClient(auth_plugin=plugin)
self.cs.authenticate()
(token, endpoint) = self.cs.auth_plugin.token_and_endpoint(
"fake-endpoint-type", "fake-service-type")
self.assertEqual(token, "test_token")
self.assertEqual("http://solum", endpoint)
def test_token_and_endpoint_before_auth(self, mock_ksclient):
(token, endpoint) = self.cs.auth_plugin.token_and_endpoint(
"fake-endpoint-type", "fake-service-type")
self.assertIsNone(token, None)
self.assertIsNone(endpoint, None)
@testtools.skip("Skip it when found solution")
def test_endpoint_with_no_token(self, mock_ksclient):
plugin = auth.KeystoneAuthPlugin(
username="fake-username",
password="fake-password",
tenant_name="fake-tenant-name",
project_domain_name="default",
user_domain_name="default",
auth_url="http://auth",
endpoint="http://solum")
self.cs = client.HTTPClient(auth_plugin=plugin)
self.cs.authenticate()
mock_ksclient.assert_called_with(
username="fake-username",
password="fake-password",
tenant_name="fake-tenant-name",
auth_url="http://auth")
(token, endpoint) = self.cs.auth_plugin.token_and_endpoint(
"fake-endpoint-type", "fake-service-type")
self.assertIsInstance(token, mock.MagicMock)
self.assertEqual("http://solum", endpoint)
@mock.patch.object(ksclient, 'Client')
class KeystoneAuthPluginTokenTest(base.TestCase):
def test_token_and_endpoint(self, mock_ksclient):
plugin = auth.KeystoneAuthPlugin(
token="fake-token",
endpoint="http://solum")
cs = client.HTTPClient(auth_plugin=plugin)
cs.authenticate()
(token, endpoint) = cs.auth_plugin.token_and_endpoint(
"fake-endpoint-type", "fake-service-type")
self.assertEqual('fake-token', token)
self.assertEqual('http://solum', endpoint)
| apache-2.0 |
victor-gonzalez/AliPhysics | PWGJE/EMCALJetTasks/macros/JetQA/plotPWGJEQA.py | 27 | 87685 | #! /usr/bin/env python
# Macro to plot PWGJE QA histograms, using AliAnalysisTaskPWGJEQA.
#
# It automatically detects what to plot, based on the content of your analysis output file:
# whether to do track/calo/jet/event QA, as well as MC vs. data, PbPb vs. pp, Run1 vs. Run2, Phos vs. no Phos.
#
# To run:
# python plotPWGJEQA.py -f "/my/dir/AnalysisResults.root" -o "/my/dir/outputQA/" -i ".png"
#
# (or, run without options: defaults are "AnalysisResults.root" and "./outputQA/" and ".pdf")
#
# If not using standard AOD collections, you need to set the list names in the config below.
# You may need to set some of the displayed ranges on the plots.
#
# Note: It is possible you will have to change the scaling on a couple plots, to give them reasonable ranges.
#
# Note: AliAnalysisTaskPWGJEQA uses variable binning for centrality, track pT, track pT-res, and cluster E.
# Relevant histograms are plotted below using "width" scaling option to divide by bin width, when applicable.
#
# Note: Changing the binning in the analysis task may break some functionality here.
#
# Author: James Mulligan ([email protected])
# Track plotting based in part on code from plotJETrackQA.C
# General
import os
import sys
import argparse
import itertools
import math
# ROOT
import ROOT
# Prevent ROOT from stealing focus when plotting
ROOT.gROOT.SetBatch(True)
def plotPWGJEQA(inputFile, outputDir, referenceFile, fileFormat):
# Open input file and get relevant lists
f = ROOT.TFile(inputFile)
# Set directory for QA output
if not outputDir.endswith("/"):
outputDir = outputDir + "/"
if not os.path.exists(outputDir):
os.makedirs(outputDir)
# Detect whether this is a Pt-hard production (only returns true if the histos have been scaled, with scalePtHardHistos.py)
isPtHard = False
for key in f.GetListOfKeys():
if "Scaled" in key.GetName():
isPtHard = True
print("Is Pt-hard: %s" % isPtHard)
# Configure the plotting macro
qaTaskBaseName = "AliAnalysisTaskPWGJEQA"
# Input names
tracksListName = "tracks"
generatorTrackThnName = "tracks_PhysPrim"
matchedTrackThnName = "tracks_Matched"
# Handles older QA task
if "EmcalTrackingQA" in qaTaskBaseName:
tracksListName = "fTracks"
generatorTrackThnName = "fParticlesPhysPrim"
matchedTrackThnName = "fParticlesMatched"
# Get the output list of AliAnalysisTaskPWGJEQA
qaTaskName = determineQATaskName(qaTaskBaseName, f, isPtHard)
print("Found qaTaskName \"{0}\"".format(qaTaskName))
qaList = f.Get(qaTaskName)
# If not a Pt-hard production (since it is done already), we need to set Sumw2 since we will scale and divide histograms
if not isPtHard:
print("Setting Sumw2 on histograms.")
for obj in qaList:
SetSumw2(obj)
# Get the lists for tracks, cells, clusters, full jets, charged jets, and event QA
trackTHnSparse = qaList.FindObject(tracksListName)
cellQAList = qaList.FindObject("emcalCells")
clusterQAList = qaList.FindObject("caloClusters")
chargedJetList = qaList.FindObject("Jet_AKTChargedR020_tracks_pT0150_pt_scheme")
fullJetList = qaList.FindObject("Jet_AKTFullR020_tracks_pT0150_caloClusters_E0300_pt_scheme")
nEventsRef = 0
# If reference file provided, get its analysis lists
qaListRef = ""
trackTHnSparseRef = ""
clusterQAListRef = ""
cellQAListRef = ""
chargedJetListRef = ""
fullJetListRef = ""
if referenceFile:
fRef = ROOT.TFile(referenceFile)
qaListRef = fRef.Get(qaTaskName)
if not isPtHard:
print("Setting Sumw2 on reference histograms.")
for obj in qaListRef:
SetSumw2(obj)
trackTHnSparseRef = qaListRef.FindObject(tracksListName)
trackTHnSparseRef.SetName("trackRef")
clusterQAListRef = qaListRef.FindObject("caloClusters")
clusterQAListRef.SetName("caloClustersRef")
cellQAListRef = qaListRef.FindObject("emcalCells")
cellQAListRef.SetName("emcalCellsRef")
chargedJetListRef = qaListRef.FindObject("Jet_AKTChargedR020_tracks_pT0150_pt_scheme")
chargedJetListRef.SetName("chargedJetListRef")
fullJetListRef = qaListRef.FindObject("Jet_AKTFullR020_tracks_pT0150_caloClusters_E0300_pt_scheme")
fullJetListRef.SetName("fullJetListRef")
histNEventRef = qaListRef.FindObject("fHistEventCount")
nEventsRef = histNEventRef.GetBinContent(1)
print("N events ref: %d" % nEventsRef)
# Get number of events
histNEvent = qaList.FindObject("fHistEventCount")
nEvents = histNEvent.GetBinContent(1)
print("N events: %d" % nEvents)
# Set config: ispp, isMC, isRun2, includePhos
if qaList.FindObject("fHistCentrality"):
ispp = False
else:
ispp = True
print("Is pp: %s" % ispp)
if qaList.FindObject(generatorTrackThnName):
isMC = True
else:
isMC = False
print("Is MC: %s" % isMC)
if clusterQAList:
clusterTHnSparse = clusterQAList.FindObject("clusterObservables")
if ispp:
hClusterType = clusterTHnSparse.Projection(3)
else:
hClusterType = clusterTHnSparse.Projection(4)
isRun2 = hClusterType.GetBinContent(2) > 0
includePhos = hClusterType.GetBinContent(3) > 0
print("Is Run 2: %s" % isRun2)
print("Include Phos: %s" % includePhos)
else:
isRun2 = False
includePhos = False
# Plotting options
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetOptTitle(0)
# Plot QA
print("Plotting QA...")
if trackTHnSparse:
plotTrackQA(ispp, isMC, trackTHnSparse, generatorTrackThnName, matchedTrackThnName, qaList, nEvents, outputDir, qaListRef, trackTHnSparseRef, nEventsRef, fileFormat)
if clusterQAList:
plotCaloQA(ispp, isRun2, includePhos, clusterQAList, cellQAList, nEvents, outputDir, clusterQAListRef, cellQAListRef, nEventsRef, fileFormat)
if chargedJetList:
plotChargedJetQA(ispp, isPtHard, chargedJetList, outputDir, chargedJetListRef, nEvents, nEventsRef, fileFormat)
if fullJetList:
plotFullJetQA(ispp, isPtHard, isRun2, includePhos, fullJetList, outputDir, fullJetListRef, nEvents, nEventsRef, fileFormat)
if qaList.FindObject("eventQA"):
plotEventQA(ispp, isRun2, includePhos, qaList, outputDir, fileFormat)
if isPtHard:
plotPtHard(f, qaList,nEvents, qaListRef ,nEventsRef, outputDir, fileFormat)
def determineQATaskName(qaTaskBaseName, f, isPtHard):
""" Determine the task name based on a wide variety of possible names.
Since the task name varies depending on what input objects are included,
we need to guess the name.
Args:
qaTaskBaseName (str): Base name of the QA task without any of the input object names
f (TFile): Root file containing the QA task
"""
# Get all task names stored in the input file
possibleTaskNames = [key.GetName() for key in f.GetListOfKeys()]
# Possible input object names
tracksName = "tracks"
mcTracksName = "mcparticles"
cellsName = "emcalCells"
clustersName = "caloClusters"
# Compile into a list for easy processing
possibleNames = [tracksName, mcTracksName, cellsName, clustersName]
suffix = "histos"
if isPtHard:
suffix = "histosScaled"
for length in range(0, len(possibleNames)+1):
for elements in itertools.permutations(possibleNames, length):
joined = "_".join(elements)
testTaskName = qaTaskBaseName
if joined:
testTaskName += "_" + joined
# Also Try ESD
testTaskNameESD = testTaskName.replace("emcalCells", "EMCALCells").replace("caloClusters", "CaloClusters").replace("tracks", "Tracks").replace("mcparticles", "MCParticles")
for taskName in [testTaskName, testTaskNameESD]:
taskName = "{0}_{1}".format(taskName, suffix)
if taskName in possibleTaskNames:
return taskName
print("Could not determine QA task name! Please check your spelling!")
exit(1)
########################################################################################################
# Plot track histograms ##############################################################################
########################################################################################################
def plotTrackQA(ispp, isMC, trackTHnSparse, generatorTrackThnName, matchedTrackThnName, qaList, nEvents, outputDir, qaListRef, trackTHnSparseRef, nEventsRef, fileFormat):
# Create subdirectory for Tracks
outputDirTracks = outputDir + "Tracks/"
if not os.path.exists(outputDirTracks):
os.makedirs(outputDirTracks)
# trackTHnSparse consists of (Centrality, Pt, Eta, Phi, Track type, sigma(pT)/pT)
if isMC:
generatorTHnSparse = qaList.FindObject(generatorTrackThnName) # (Centrality, Pt, Eta, Phi, findable)
matchedTHnSparse = qaList.FindObject(matchedTrackThnName) # (Pt-gen, Eta-gen, Phi-gen, Pt-det, Eta-det, Phi-det, (pT-gen - pT-det)/pT-det, Track type)
#---------------------------------------------------------------------------------------------------
# phi distribution of hybrid tracks
#---------------------------------------------------------------------------------------------------
c1 = ROOT.TCanvas("c1","c1: Phi",600,450)
c1.cd()
# Project to (Phi, Track type)
if ispp:
hPhiTracktype = trackTHnSparse.Projection(2,3)
else:
hPhiTracktype = trackTHnSparse.Projection(3,4)
hPhiGlobal = hPhiTracktype.ProjectionY("PhiGlobal", 1, 1)
hPhiComplementary = hPhiTracktype.ProjectionY("PhiComplementary", 2, 2)
hPhiGlobal.SetLineColor(2)
hPhiGlobal.SetLineWidth(3)
hPhiGlobal.SetLineStyle(1)
hPhiComplementary.SetLineStyle(1)
hPhiComplementary.SetLineColor(4)
hPhiComplementary.SetLineWidth(3)
hPhiSum = hPhiGlobal.Clone()
hPhiSum.Add(hPhiComplementary)
hPhiSum.SetTitle("hPhiSum")
hPhiSum.SetName("hPhiSum")
hPhiSum.SetLineColor(1)
hPhiSum.SetMarkerColor(1)
hPhiSum.SetLineStyle(1)
hPhiGlobal.Scale(1./nEvents)
hPhiComplementary.Scale(1./nEvents)
hPhiSum.Scale(1./nEvents)
hPhiGlobal.SetTitle("#phi Distribution of Hybrid Tracks")
hPhiGlobal.GetYaxis().SetTitle("#frac{1}{N_{evts}} #frac{dN}{d#phi}")
hPhiGlobal.GetYaxis().SetTitleSize(0.06)
hPhiGlobal.GetXaxis().SetTitleSize(0.06)
hPhiGlobal.GetXaxis().SetTitleOffset(0.5)
hPhiGlobal.GetYaxis().SetRangeUser(0,15.)
if ispp:
hPhiGlobal.GetYaxis().SetRangeUser(0,0.2)
if isMC:
hPhiGlobal.GetYaxis().SetRangeUser(0,0.25)
ROOT.gPad.SetLeftMargin(0.15)
ROOT.gPad.SetRightMargin(0.05)
ROOT.gPad.SetBottomMargin(0.13)
ROOT.gPad.SetTopMargin(0.05)
hPhiGlobal.Draw("hist")
hPhiComplementary.Draw("hist same")
hPhiSum.Draw("hist same")
leg1 = ROOT.TLegend(0.17,0.7,0.83,0.93,"Hybrid tracks")
leg1.SetFillColor(10)
leg1.SetBorderSize(0)
leg1.SetFillStyle(0)
leg1.SetTextSize(0.04)
leg1.AddEntry(hPhiGlobal, "w/ SPD & ITSrefit", "l")
leg1.AddEntry(hPhiComplementary, "w/o SPD & w/ ITSrefit", "l")
leg1.AddEntry(hPhiSum, "sum", "l")
leg1.Draw("same")
textNEvents = ROOT.TLatex()
textNEvents.SetNDC()
c1.cd()
textNEvents.DrawLatex(0.52,0.68,"#it{N}_{events} = %d" % nEvents)
outputFilename = os.path.join(outputDirTracks, "hTrackPhi" + fileFormat)
c1.SaveAs(outputFilename)
# Also plot the TH2 phi vs. pT -- make sure that phi is uniform at all pT
# Project to (Pt, Phi)
if ispp:
hPhiPtSum = trackTHnSparse.Projection(2,0)
else:
hPhiPtSum = trackTHnSparse.Projection(3,1)
hPhiPtSum.Scale(1.,"width")
hPhiPtSum.GetZaxis().SetRangeUser(1e-7, 3e5)
outputFilename = os.path.join(outputDirTracks, "hTrackPhiPt" + fileFormat)
plotHist(hPhiPtSum, outputFilename, "colz", False, True)
#---------------------------------------------------------------------------------------------------
# pT distribution of hybrid tracks
#---------------------------------------------------------------------------------------------------
# Project to (Pt, Track type)
if ispp:
hPtTracktype = trackTHnSparse.Projection(0,3)
else:
hPtTracktype = trackTHnSparse.Projection(1,4)
hPtGlobal = hPtTracktype.ProjectionY("PtGlobal", 1, 1)
hPtComplementary = hPtTracktype.ProjectionY("PtComplementary", 2, 2)
hPtSum = hPtGlobal.Clone()
hPtSum.Add(hPtComplementary)
# If reference distribution supplied, project to (Pt, Track type)
hPtSumRef = ""
if trackTHnSparseRef and qaListRef:
if ispp:
hPtTracktypeRef = trackTHnSparseRef.Projection(0,3)
else:
hPtTracktypeRef = trackTHnSparseRef.Projection(1,4)
hPtGlobalRef = hPtTracktypeRef.ProjectionY("PtGlobalRef", 1, 1)
hPtComplementaryRef = hPtTracktypeRef.ProjectionY("PtComplementaryRef", 2, 2)
hPtSumRef = hPtGlobalRef.Clone()
hPtSumRef.Add(hPtComplementaryRef)
outputFilename = os.path.join(outputDirTracks, "hTrackPt" + fileFormat)
xRangeMax = 100
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Track selection"
legendRunLabel = "Hybrid tracks"
legendRefLabel = "Hybrid tracks, all runs"
ratioYAxisTitle = "Ratio: run / all runs"
plotSpectra(hPtSum, hPtSumRef, hPtGlobal, hPtComplementary, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "width", "w/ SPD & ITSrefit", "w/o SPD & w/ ITSrefit")
# Plot also ratio of central track spectrum to peripheral track spectrum
trackTHnSparse.GetAxis(0).SetRangeUser(0,10)
hPt010 = trackTHnSparse.Projection(1)
hPt010.SetName("hPt010")
trackTHnSparse.GetAxis(0).SetRangeUser(50,90)
hPt5090 = trackTHnSparse.Projection(1)
hPt5090.SetName("hPt5090")
outputFilename = os.path.join(outputDirTracks, "hTrackPtRatio" + fileFormat)
xRangeMax = 75
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dE_{T}} [GeV^{-1}]"
legendTitle = "Tracks"
h1legendLabel = "50-90 %"
h2legendLabel = "0-10 %"
ratioYAxisTitle = "Central / Peripheral"
yRatioMax = 12
plotSpectraCent(hPt5090, hPt010, "", nEvents, ispp, outputFilename, xRangeMax, yAxisTitle, ratioYAxisTitle, legendTitle, h1legendLabel, h2legendLabel, "", "width", yRatioMax)
trackTHnSparse.GetAxis(0).SetRangeUser(0,90)
#---------------------------------------------------------------------------------------------------
# pT resolution of hybrid tracks -- from track fitting
#---------------------------------------------------------------------------------------------------
c5 = ROOT.TCanvas("c5","c5: pT resolution",600,450)
c5.cd()
# Project to (Pt, Track type, pT resolution)
if ispp:
hPtTracktypePtSigma1Pt = trackTHnSparse.Projection(0,3,4)
else:
hPtTracktypePtSigma1Pt = trackTHnSparse.Projection(1,4,5)
# Project to global tracks and take profile, to get the pT resolution as a function of pT (Profile of pT vs pT*sigma(1/pT), i.e. pT vs sigma(pT)/pT)
# Note: No need to scale by bin width (despite pt-res having variable binning), since we take a profile (rather than e.g. plot a spectrum).
hPtTracktypePtSigma1Pt.GetYaxis().SetRange(1,1)
hPtPtSigma1PtGlobal = hPtTracktypePtSigma1Pt.Project3D("zx")
hPtPtSigma1PtGlobal.SetName("hPtPtSigma1PtGlobal")
profPtPtSigma1PtGlobal = hPtPtSigma1PtGlobal.ProfileX()
profPtPtSigma1PtGlobal.SetName("profPtPtSigma1PtGlobal")
profPtPtSigma1PtGlobal.SetLineColor(2)
profPtPtSigma1PtGlobal.SetLineWidth(3)
profPtPtSigma1PtGlobal.SetMarkerStyle(21)
profPtPtSigma1PtGlobal.SetMarkerColor(2)
profPtPtSigma1PtGlobal.SetMaximum(0.3)
#profPtPtSigma1PtGlobal.GetYaxis().SetTitle("#it{p}_{T} #times #sigma(1/#it{p}_{T})")
profPtPtSigma1PtGlobal.GetYaxis().SetTitle(" #sigma(#it{p}_{T}) / #it{p}_{T}")
profPtPtSigma1PtGlobal.GetXaxis().SetTitleSize(0.06)
profPtPtSigma1PtGlobal.GetYaxis().SetTitleSize(0.06)
profPtPtSigma1PtGlobal.GetYaxis().SetRangeUser(0,0.15)
ROOT.gPad.SetLeftMargin(0.15)
ROOT.gPad.SetRightMargin(0.05)
ROOT.gPad.SetBottomMargin(0.14)
ROOT.gPad.SetTopMargin(0.05)
#profPtPtSigma1PtGlobal.GetXaxis().SetRangeUser(0, 100)
profPtPtSigma1PtGlobal.Draw()
# Project to complementary tracks and take profile
hPtTracktypePtSigma1Pt.GetYaxis().SetRange(2,2)
hPtPtSigma1PtComplementary = hPtTracktypePtSigma1Pt.Project3D("zx")
hPtPtSigma1PtComplementary.SetName("hPtPtSigma1PtComplementary")
profPtPtSigma1PtComplementary = hPtPtSigma1PtComplementary.ProfileX()
profPtPtSigma1PtComplementary.SetName("profPtPtSigma1PtComplementary")
profPtPtSigma1PtComplementary.SetLineColor(4)
profPtPtSigma1PtComplementary.SetLineWidth(3)
profPtPtSigma1PtComplementary.SetMarkerStyle(24)
profPtPtSigma1PtComplementary.SetMarkerColor(4)
profPtPtSigma1PtComplementary.Draw("same")
leg3 = ROOT.TLegend(0.21,0.6,0.88,0.88,"Hybrid tracks")
leg3.SetFillColor(10)
leg3.SetBorderSize(0)
leg3.SetFillStyle(0)
leg3.SetTextSize(0.04)
leg3.AddEntry(profPtPtSigma1PtGlobal, "w/ SPD & ITSrefit", "lp")
leg3.AddEntry(profPtPtSigma1PtComplementary, "w/o SPD & w/ ITSrefit", "lp")
leg3.Draw("same")
outputFilename = os.path.join(outputDirTracks, "profTrackPtResolution" + fileFormat)
c5.SaveAs(outputFilename)
#---------------------------------------------------------------------------------------------------
# pT resolution of hybrid tracks -- from MC
#---------------------------------------------------------------------------------------------------
# (the error bars on this histogram, which denote the resolution, are not working at present...)
if isMC:
# Plot distribution (pT-gen - pT-det)/pT-det
c25 = ROOT.TCanvas("c25","c25: pT Res Dist MC",600,450)
c25.cd()
c25.SetLogy()
if ispp:
hPtRes = matchedTHnSparse.Projection(6)
else:
hPtRes = matchedTHnSparse.Projection(7)
hPtRes.GetYaxis().SetTitle("counts")
hPtRes.Draw("hist E")
outputFilename = os.path.join(outputDirTracks, "hTrackPtResolutionMC" + fileFormat)
c25.SaveAs(outputFilename)
# Plot mean of the distribution as a function of pT, with error bars as the standard deviation of the distribution
c24 = ROOT.TCanvas("c24","c24: pT Resolution MC",600,450)
c24.cd()
# Project to (Pt, pT resolution, Track type)
if ispp:
hPtTracktypePtRes = matchedTHnSparse.Projection(3,7,6)
else:
hPtTracktypePtRes = matchedTHnSparse.Projection(4,8,7)
# Project to global tracks and take profile, to get the pT resolution as a function of pT
hPtTracktypePtRes.GetYaxis().SetRange(1,1)
hPtPtResGlobal = hPtTracktypePtRes.Project3D("zx")
hPtPtResGlobal.SetName("hPtPtResGlobal")
profPtPtResGlobal = hPtPtResGlobal.ProfileX("prof",1,-1,"s") # set errors to standard deviation (rather than standard error on mean)
profPtPtResGlobal.SetName("profPtPtResGlobal")
profPtPtResGlobal.SetLineColor(2)
profPtPtResGlobal.SetLineWidth(3)
profPtPtResGlobal.SetMarkerStyle(21)
profPtPtResGlobal.SetMarkerColor(2)
profPtPtResGlobal.SetMaximum(0.3)
profPtPtResGlobal.GetYaxis().SetTitle("(#it{p}_{T}^{gen} - #it{p}_{T}^{det}) / #it{p}_{T}^{det}")
profPtPtResGlobal.GetXaxis().SetTitleSize(0.06)
profPtPtResGlobal.GetYaxis().SetTitleSize(0.06)
ROOT.gPad.SetLeftMargin(0.15)
ROOT.gPad.SetRightMargin(0.05)
ROOT.gPad.SetBottomMargin(0.14)
ROOT.gPad.SetTopMargin(0.05)
profPtPtResGlobal.GetYaxis().SetRangeUser(-0.5, 1)
profPtPtResGlobal.GetXaxis().SetRangeUser(0,100)
profPtPtResGlobal.Draw("E")
# Project to complementary tracks and take profile
hPtTracktypePtRes.GetYaxis().SetRange(2,2)
hPtPtResComplementary = hPtTracktypePtRes.Project3D("zx")
hPtPtResComplementary.SetName("hPtPtResComplementary")
profPtPtResComplementary = hPtPtResComplementary.ProfileX()
profPtPtResComplementary.SetName("profPtPtResComplementary")
profPtPtResComplementary.SetLineColor(4)
profPtPtResComplementary.SetLineWidth(3)
profPtPtResComplementary.SetMarkerStyle(24)
profPtPtResComplementary.SetMarkerColor(4)
profPtPtResComplementary.Draw("same E")
leg3 = ROOT.TLegend(0.21,0.6,0.88,0.88,"Hybrid tracks")
leg3.SetFillColor(10)
leg3.SetBorderSize(0)
leg3.SetFillStyle(0)
leg3.SetTextSize(0.04)
leg3.AddEntry(profPtPtResGlobal, "w/ SPD & ITSrefit", "lp")
leg3.AddEntry(profPtPtResComplementary, "w/o SPD & w/ ITSrefit", "lp")
leg3.Draw("hist same")
textPtRes = ROOT.TLatex()
textPtRes.SetNDC()
textPtRes.DrawLatex(0.45,0.9,"Data points: mean value")
textPtRes.DrawLatex(0.45, 0.8,"Error bars: stdev (resolution)")
outputFilename = os.path.join(outputDirTracks, "profTrackPtResolutionMC" + fileFormat)
c24.SaveAs(outputFilename)
#---------------------------------------------------------------------------------------------------
# Tracking efficiency
#---------------------------------------------------------------------------------------------------
if isMC:
# Plot ratio of pT-gen-matched to pT-gen
c26 = ROOT.TCanvas("c26","c26: TrackingEfficiency",600,450)
c26.cd()
for dim in ["1D", "2D"]:
if dim == "1D":
# 1D case
if ispp:
hPtGenMatched = matchedTHnSparse.Projection(0)
hPtGen1D = generatorTHnSparse.Projection(0, 3)
else:
hPtGenMatched = matchedTHnSparse.Projection(1)
hPtGen1D = generatorTHnSparse.Projection(1, 4)
hPtGenFindable = hPtGen1D.ProjectionY("trackEff", 2, 2)
elif dim == "2D":
# 2D case
if ispp:
hPtGenMatched = matchedTHnSparse.Projection(1, 0)
hPtGen2D = generatorTHnSparse.Projection(0, 1, 3)
else:
hPtGenMatched = matchedTHnSparse.Projection(2, 1)
hPtGen2D = generatorTHnSparse.Projection(1, 2, 4)
hPtGen2D.GetZaxis().SetRange(2, 2)
hPtGenFindable = hPtGen2D.Project3D("yx")
hTrackingEfficiency = hPtGenMatched.Clone()
hTrackingEfficiency.Divide(hPtGenMatched, hPtGenFindable, 1., 1., "B")
hTrackingEfficiency.SetMarkerStyle(21)
hTrackingEfficiency.SetMarkerColor(2)
if hTrackingEfficiency.InheritsFrom(ROOT.TH2.Class()):
hTrackingEfficiency.Draw("colz")
else:
hTrackingEfficiency.GetYaxis().SetTitle("Tracking Efficiency")
hTrackingEfficiency.GetYaxis().SetRangeUser(0.6,1)
hTrackingEfficiency.GetXaxis().SetRangeUser(0,50)
hTrackingEfficiency.Draw("P")
outputFilename = os.path.join(outputDirTracks, "hTrackingEfficiency{0}".format(dim) + fileFormat)
c26.SaveAs(outputFilename)
#---------------------------------------------------------------------------------------------------
# eta distribution of hybrid tracks
#---------------------------------------------------------------------------------------------------
c6 = ROOT.TCanvas("c6","c6: Eta",600,450)
c6.cd()
# Project to (Eta, Track type)
if ispp:
hEtaTracktype = trackTHnSparse.Projection(1,3)
else:
hEtaTracktype = trackTHnSparse.Projection(2,4)
hEtaGlobal = hEtaTracktype.ProjectionY("EtaGlobal", 1, 1)
hEtaComplementary = hEtaTracktype.ProjectionY("EtaComplementary", 2, 2)
hEtaGlobal.SetLineColor(2)
hEtaGlobal.SetLineWidth(3)
hEtaGlobal.SetLineStyle(1)
hEtaComplementary.SetLineStyle(1)
hEtaComplementary.SetLineColor(4)
hEtaComplementary.SetLineWidth(3)
hEtaSum = hEtaGlobal.Clone()
hEtaSum.Add(hEtaComplementary)
hEtaSum.SetTitle("hEtaSum")
hEtaSum.SetName("hEtaSum")
hEtaSum.SetLineColor(1)
hEtaSum.SetMarkerColor(1)
hEtaSum.SetLineStyle(1)
hEtaGlobal.Scale(1./nEvents)
hEtaComplementary.Scale(1./nEvents)
hEtaSum.Scale(1./nEvents)
hEtaGlobal.SetTitle("#eta Distribution of Hybrid Tracks")
hEtaGlobal.GetYaxis().SetTitle("#frac{1}{N_{evts}} #frac{dN}{d#eta}")
hEtaGlobal.GetYaxis().SetTitleSize(0.06)
hEtaGlobal.GetXaxis().SetTitleSize(0.06)
hEtaGlobal.GetXaxis().SetTitleOffset(0.7)
hEtaGlobal.GetYaxis().SetRangeUser(0,20.)
if ispp:
hEtaGlobal.GetYaxis().SetRangeUser(0,0.2)
if isMC:
hEtaGlobal.GetYaxis().SetRangeUser(0,0.3)
ROOT.gPad.SetLeftMargin(0.15)
ROOT.gPad.SetRightMargin(0.05)
ROOT.gPad.SetBottomMargin(0.13)
ROOT.gPad.SetTopMargin(0.05)
hEtaGlobal.Draw("hist")
hEtaComplementary.Draw("hist same")
hEtaSum.Draw("hist same")
leg1 = ROOT.TLegend(0.17,0.7,0.83,0.93,"Hybrid tracks")
leg1.SetFillColor(10)
leg1.SetBorderSize(0)
leg1.SetFillStyle(0)
leg1.SetTextSize(0.04)
leg1.AddEntry(hEtaGlobal, "w/ SPD & ITSrefit", "l")
leg1.AddEntry(hEtaComplementary, "w/o SPD & w/ ITSrefit", "l")
leg1.AddEntry(hEtaSum, "sum", "l")
leg1.Draw("same")
textNEvents = ROOT.TLatex()
textNEvents.SetNDC()
textNEvents.DrawLatex(0.65,0.87,"#it{N}_{events} = %d" % nEvents)
outputFilename = os.path.join(outputDirTracks, "hTrackEta" + fileFormat)
c6.SaveAs(outputFilename)
# Also plot the TH2 eta vs. pT -- make sure that eta is uniform at all pT
# Project to (Pt, Eta)
if ispp:
hEtaPtSum = trackTHnSparse.Projection(1,0)
else:
hEtaPtSum = trackTHnSparse.Projection(2,1)
hEtaPtSum.Scale(1.,"width")
hEtaPtSum.GetZaxis().SetRangeUser(1e-7, 3e5)
outputFilename = os.path.join(outputDirTracks, "hTrackEtaPt" + fileFormat)
plotHist(hEtaPtSum, outputFilename, "colz", False, True)
#---------------------------------------------------------------------------------------------------
# eta-phi distribution of hybrid tracks
#---------------------------------------------------------------------------------------------------
# Project to (Eta, Phi)
if ispp:
hEtaPhiSum = trackTHnSparse.Projection(1,2)
else:
hEtaPhiSum = trackTHnSparse.Projection(2,3)
hEtaPhiSum.SetName("hEtaPhiSum")
outputFilename = os.path.join(outputDirTracks, "hTrackEtaPhi" + fileFormat)
plotHist(hEtaPhiSum, outputFilename, "colz")
# And plot the eta-phi distribution for high-pT tracks
ROOT.gStyle.SetOptTitle(1)
if ispp:
trackTHnSparse.GetAxis(0).SetRangeUser(10,150)
hTrackEtaPhiHighPt = trackTHnSparse.Projection(1,2)
else:
trackTHnSparse.GetAxis(1).SetRangeUser(10,150)
hTrackEtaPhiHighPt = trackTHnSparse.Projection(2,3)
hTrackEtaPhiHighPt.SetTitle("Track Occupancy, p_{T} > 10 GeV")
outputFilename = os.path.join(outputDirTracks, "hTrackEtaPhiHighPt" + fileFormat)
plotHist(hTrackEtaPhiHighPt, outputFilename, "colz")
if ispp:
trackTHnSparse.GetAxis(0).SetRangeUser(0,150)
else:
trackTHnSparse.GetAxis(1).SetRangeUser(0,150)
ROOT.gStyle.SetOptTitle(0)
########################################################################################################
# Plot cluster histograms ##############################################################################
########################################################################################################
def plotCaloQA(ispp, isRun2, includePhos, clusterQAList, cellQAList, nEvents, outputDir, clusterQAListRef, cellQAListRef, nEventsRef, fileFormat):
# Create subdirectory for Cells, Clusters
outputDirCells = outputDir + "Cells/"
if not os.path.exists(outputDirCells):
os.makedirs(outputDirCells)
outputDirClusters = outputDir + "Clusters/"
if not os.path.exists(outputDirClusters):
os.makedirs(outputDirClusters)
clusterTHnSparse = clusterQAList.FindObject("clusterObservables")
# (Centrality, E_clus, eta, phi, clusterType)
if clusterQAListRef:
clusterTHnSparseRef = clusterQAListRef.FindObject("clusterObservables")
# Plot Eta-Phi of ALL CLUSTERS -----------------------------------------------------
# Project to (Eta, Phi)
if ispp:
hClusPhiEta = clusterTHnSparse.Projection(2,1)
else:
hClusPhiEta = clusterTHnSparse.Projection(3,2)
hClusPhiEta.SetName("clusterEMCalObservables_proj_eta_phi")
outputFilename = os.path.join(outputDirClusters, "hClusPhiEta" + fileFormat)
hClusPhiEta.GetXaxis().SetRangeUser(-1.5,0.8)#ELIANE -0.8,0.8
hClusPhiEta.GetYaxis().SetRangeUser(1.2,5.8)
plotHist(hClusPhiEta, outputFilename, "colz")
# Plot ratio to reference run, if supplied
if clusterQAListRef:
if ispp:
hClusPhiEtaRef = clusterTHnSparseRef.Projection(2,1)
else:
hClusPhiEtaRef = clusterTHnSparseRef.Projection(3,2)
hClusPhiEta.Scale(1./nEvents)
hClusPhiEtaRef.Scale(1./nEventsRef)
hClusPhiEtaRatio = hClusPhiEta.Clone()
hClusPhiEtaRatio.Divide(hClusPhiEtaRef)
ROOT.gStyle.SetOptTitle(1)
hClusPhiEtaRatio.SetTitle("Cluster Occupancy (per event): Current Run / All Runs")
outputFilename = os.path.join(outputDirClusters, "hClusPhiEtaRatio" + fileFormat)
plotHist(hClusPhiEtaRatio, outputFilename, "colz", False, True)
ROOT.gStyle.SetOptTitle(0)
# Plot EMCAL CLUSTERS --------------------------------------------------------------
# Project to (Energy, Eta, Phi, EMCal Cluster type)
if ispp:
clusterTHnSparse.GetAxis(3).SetRange(1,1)
hClusEMCalEta = clusterTHnSparse.Projection(1)
hClusEMCalPhi = clusterTHnSparse.Projection(2)
hClusEMCalEnergy = clusterTHnSparse.Projection(0)
else:
clusterTHnSparse.GetAxis(4).SetRange(1,1)
hClusEMCalEta = clusterTHnSparse.Projection(2)
hClusEMCalPhi = clusterTHnSparse.Projection(3)
hClusEMCalEnergy = clusterTHnSparse.Projection(1)
hClusEMCalEta.SetName("ClusEtaEmcal")
hClusEMCalPhi.SetName("ClusPhiEmcal")
hClusEMCalEnergy.SetName("ClusEnergyEmcal")
# Plot phi distribution
outputFilename = os.path.join(outputDirClusters, "hClusEMCalPhi" + fileFormat)
plotHist(hClusEMCalPhi, outputFilename, "hist E")
# Plot eta distribution
outputFilename = os.path.join(outputDirClusters, "hClusEMCalEta" + fileFormat)
plotHist(hClusEMCalEta, outputFilename, "hist E")
# Plot energy distribution
hClusEMCalEnergy.SetName("hClusEMCalEnergy")
hClusEMCalEnergyRef = ""
if clusterQAListRef:
if ispp:
clusterTHnSparseRef.GetAxis(3).SetRange(1,1)
hClusEMCalEnergyRef = clusterTHnSparseRef.Projection(0)
else:
clusterTHnSparseRef.GetAxis(4).SetRange(1,1)
hClusEMCalEnergyRef = clusterTHnSparseRef.Projection(1)
hClusEMCalEnergyRef.SetName("clusterEMCalObservablesRef_proj_energy")
outputFilename = os.path.join(outputDirClusters, "hClusEMCalEnergy" + fileFormat)
xRangeMax = 100
if ispp:
xRangeMax = 80
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dE_{T}} [GeV^{-1}]"
legendTitle = "EMCal Clusters"
legendRunLabel = "Current run"
legendRefLabel = "All runs"
ratioYAxisTitle = "Ratio: run / all runs"
plotSpectra(hClusEMCalEnergy, hClusEMCalEnergyRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "width")
# Plot DCAL CLUSTERS (if isRun2) ----------------------------------------------------
if isRun2:
# Project to (Energy, Eta, Phi, DCal Cluster type)
if ispp:
clusterTHnSparse.GetAxis(3).SetRange(2,2)
hClusDCalEta = clusterTHnSparse.Projection(1)
hClusDCalPhi = clusterTHnSparse.Projection(2)
hClusDCalEnergy = clusterTHnSparse.Projection(0)
else:
clusterTHnSparse.GetAxis(4).SetRange(2,2)
hClusDCalEta = clusterTHnSparse.Projection(2)
hClusDCalPhi = clusterTHnSparse.Projection(3)
hClusDCalEnergy = clusterTHnSparse.Projection(1)
hClusDCalEta.SetName("ClusEtaDcal")
hClusDCalPhi.SetName("ClusPhiDcal")
hClusDCalEnergy.SetName("ClusEnergyDcal")
# Plot phi distribution
outputFilename = os.path.join(outputDirClusters, "hClusDCalPhi" + fileFormat)
plotHist(hClusDCalPhi, outputFilename, "hist E")
# Plot eta distribution
outputFilename = os.path.join(outputDirClusters, "hClusDCalEta" + fileFormat)
plotHist(hClusDCalEta, outputFilename, "hist E")
# Plot energy distribution
hClusDCalEnergy.SetName("hClusDCalEnergy")
hClusDCalEnergyRef = ""
if clusterQAListRef:
if ispp:
clusterTHnSparseRef.GetAxis(3).SetRange(2,2)
hClusDCalEnergyRef = clusterTHnSparseRef.Projection(0)
else:
clusterTHnSparseRef.GetAxis(4).SetRange(2,2)
hClusDCalEnergyRef = clusterTHnSparseRef.Projection(1)
hClusDCalEnergyRef.SetName("clusterDCalObservablesRef_proj_energy")
outputFilename = os.path.join(outputDirClusters, "hClusDCalEnergy" + fileFormat)
xRangeMax = 100
if ispp:
xRangeMax = 50
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dE_{T}} [GeV^{-1}]"
legendTitle = "DCal Clusters"
legendRunLabel = "Current run"
legendRefLabel = "All runs"
ratioYAxisTitle = "Ratio: run / all runs"
plotSpectra(hClusDCalEnergy, hClusDCalEnergyRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "width")
# Plot PHOS CLUSTERS (if includePhos) -----------------------------------------------
if includePhos:
# Project to (Energy, Eta, Phi, PHOS Cluster type)
if ispp:
clusterTHnSparse.GetAxis(3).SetRange(3,3)
hClusPHOSEta = clusterTHnSparse.Projection(1)
hClusPHOSPhi = clusterTHnSparse.Projection(2)
hClusPHOSEnergy = clusterTHnSparse.Projection(0)
else:
clusterTHnSparse.GetAxis(4).SetRange(3,3)
hClusPHOSEta = clusterTHnSparse.Projection(2)
hClusPHOSPhi = clusterTHnSparse.Projection(3)
hClusPHOSEnergy = clusterTHnSparse.Projection(1)
hClusPHOSEta.SetName("ClusEtaPHOS")
hClusPHOSPhi.SetName("ClusPhiPHOS")
hClusPHOSEnergy.SetName("ClusEnergyPHOS")
# Plot phi distribution
outputFilename = os.path.join(outputDirClusters, "hClusPHOSPhi" + fileFormat)
plotHist(hClusPHOSPhi, outputFilename, "hist E")
# Plot eta distribution
outputFilename = os.path.join(outputDirClusters, "hClusPHOSEta" + fileFormat)
plotHist(hClusPHOSEta, outputFilename, "hist E")
# Plot energy distribution
hClusPHOSEnergy.SetName("hClusPHOSEnergy")
hClusPHOSEnergyRef = ""
if clusterQAListRef:
if ispp:
clusterTHnSparseRef.GetAxis(3).SetRange(3,3)
hClusPHOSEnergyRef = clusterTHnSparseRef.Projection(0)
else:
clusterTHnSparseRef.GetAxis(4).SetRange(3,3)
hClusPHOSEnergyRef = clusterTHnSparseRef.Projection(1)
hClusPHOSEnergyRef.SetName("clusterPHOSObservablesRef_proj_energy")
outputFilename = os.path.join(outputDirClusters, "hClusPHOSEnergy" + fileFormat)
xRangeMax = 100
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dE_{T}} [GeV^{-1}]"
legendTitle = "PHOS Clusters"
legendRunLabel = "Current run"
legendRefLabel = "All runs"
ratioYAxisTitle = "Ratio: run / all runs"
plotSpectra(hClusPHOSEnergy, hClusPHOSEnergyRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "width")
# Plot the ratio of cluster spectra in EMCal/DCal/PHOS
if isRun2 and includePhos:
outputFilename = os.path.join(outputDirClusters, "hClusEnergyRatio" + fileFormat)
xRangeMax = 250
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dE_{T}} [GeV^{-1}]"
legendTitle = "Calo clusters"
legendRunLabel = "EMCal clusters"
legendRefLabel = "PHOS clusters"
ratioYAxisTitle = "Ratio to PHOS"
h2LegendLabel = "DCal clusters"
# Note: the spectra already have been scaled by nEvents, bin width
plotSpectra(hClusEMCalEnergy, hClusPHOSEnergy, hClusDCalEnergy, "", 1., 1., ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "", h2LegendLabel)
# Plot also the ratio of DCal to EMCal
if isRun2:
outputFilename = os.path.join(outputDirClusters, "hClusEnergyRatioEMC" + fileFormat)
xRangeMax = 250
if ispp:
xRangeMax = 80
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dE_{T}} [GeV^{-1}]"
legendTitle = "Calo clusters"
legendRunLabel = "DCal clusters"
legendRefLabel = "EMCal clusters"
ratioYAxisTitle = "DCal / EMCal"
# Note: the spectra already have been scaled by nEvents, bin width
plotSpectra(hClusDCalEnergy, hClusEMCalEnergy, "", "", 1., 1., ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename)
# Plot some PHOS QA plots
if includePhos:
# Plot also PHOS SM spectra
SMlist = clusterQAList.FindObject("BySM")
c2 = ROOT.TCanvas("c2","c2: hist",600,450)
c2.cd()
c2.SetLogy()
leg = ROOT.TLegend(0.3,0.6,0.88,0.83,"PHOS SM")
leg.SetFillColor(10)
leg.SetBorderSize(0)
leg.SetFillStyle(0)
leg.SetTextSize(0.04)
for sm in range(1,5):
hSM = SMlist.FindObject("hPhosClusEnergy_SM" + str(sm))
hSM.SetLineColor(sm)
hSM.SetLineStyle(1)
hSM.GetXaxis().SetRangeUser(0,100)
if sm is 1:
hSM.Draw("hist E")
else:
hSM.Draw("hist E same")
leg.AddEntry(hSM, "SM " + str(sm), "l")
leg.Draw("same")
outputFilename = os.path.join(outputDirClusters, "hClusPHOSEnergyBySM" + fileFormat)
c2.SaveAs(outputFilename)
c2.Close()
# Plot some PHOS QA plots
if includePhos:
hPhosNCellsVsEnergy = clusterQAList.FindObject("hPhosNCellsVsEnergy")
outputFilename = os.path.join(outputDirClusters, "hClusPHOSNCellsVsEnergy" + fileFormat)
plotHist(hPhosNCellsVsEnergy, outputFilename, "colz", True, True)
hPhosM02VsEnergy = clusterQAList.FindObject("hPhosM02VsEnergy")
outputFilename = os.path.join(outputDirClusters, "hClusPHOSM02VsEnergy" + fileFormat)
plotHist(hPhosM02VsEnergy, outputFilename, "colz", True, True)
hPhosCellIdVsEnergy = clusterQAList.FindObject("hPhosCellIdVsEnergy")
outputFilename = os.path.join(outputDirClusters, "hClusPHOSCellIdVsEnergy" + fileFormat)
plotHist(hPhosCellIdVsEnergy, outputFilename, "colz", True, True)
# Plot EMCAL CELLS --------------------------------------------------------------
hCellEnergy = cellQAList.FindObject("fHistCellEnergy")
outputFilename = os.path.join(outputDirCells, "hCellEnergy" + fileFormat)
plotHist(hCellEnergy, outputFilename, "hist E", True)
profCellAbsIdEnergy = cellQAList.FindObject("fProfCellAbsIdEnergy")
outputFilename = os.path.join(outputDirCells, "profCellAbsIdEnergy" + fileFormat)
plotHist(profCellAbsIdEnergy, outputFilename)
hCellTime = cellQAList.FindObject("fHistCellTime")
outputFilename = os.path.join(outputDirCells, "hCellTime" + fileFormat)
plotHist(hCellTime, outputFilename, "hist E")
profCellAbsIdTime = cellQAList.FindObject("fProfCellAbsIdTime")
outputFilename = os.path.join(outputDirCells, "profCellAbsIdTime" + fileFormat)
profCellAbsIdTime.GetYaxis().SetRangeUser(-0.2e-6,0.2e-6)
plotHist(profCellAbsIdTime, outputFilename)
# Plot the CELL energy spectrum with and without timing cuts
hCellEnergyTall = cellQAList.FindObject("fHistCellEvsTime")
hCellEnergyTall = hCellEnergyTall.ProjectionY()
hCellEnergyTall.SetName("cell_Allproj_energy")
hCellEnergyTall.GetXaxis().SetTitle("E_{Cell} [GeV]")
outputFilename = os.path.join(outputDirCells, "hCellEnergyTall" + fileFormat)
plotHist(hCellEnergyTall, outputFilename, "hist E", True)
hCellEnergyTsel = cellQAList.FindObject("fHistCellEvsTime")
hCellEnergyTsel.GetXaxis().SetRangeUser(-50e-9,50e-9) #recomended time cut
hCellEnergyTsel = hCellEnergyTsel.ProjectionY()
hCellEnergyTsel.SetName("cell_Selproj_energy")
hCellEnergyTsel.GetXaxis().SetTitle("E_{Cell} |t_{cell}|<50ns [GeV]")
outputFilename = os.path.join(outputDirCells, "hCellEnergyTsel" + fileFormat)
plotHist(hCellEnergyTsel, outputFilename, "hist E", True)
#refernce histograms
if cellQAListRef:
hCellEnergyTallRef = cellQAListRef.FindObject("fHistCellEvsTime")
hCellEnergyTallRef = hCellEnergyTallRef.ProjectionY()
hCellEnergyTallRef.SetName("cellRef_Allproj_energy")
hCellEnergyTselRef = cellQAListRef.FindObject("fHistCellEvsTime")
hCellEnergyTselRef.GetXaxis().SetRangeUser(-50e-9,50e-9)
hCellEnergyTselRef = hCellEnergyTselRef.ProjectionY()
hCellEnergyTselRef.SetName("cellRef_Selproj_energy")
xRangeMax = 100
if ispp:
xRangeMax = 80
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dE_{Cell}} [GeV^{-1}]"
legendTitle = "EMCal Cells"
legendRunLabel = "Current run"
legendRefLabel = "All runs"
ratioYAxisTitle = "Ratio: run / all runs"
outputFilename = os.path.join(outputDirCells, "hCellEnergyTallRatio" + fileFormat)
plotSpectra(hCellEnergyTall, hCellEnergyTallRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "width")
outputFilename = os.path.join(outputDirCells, "hCellEnergyTselRatio" + fileFormat)
plotSpectra(hCellEnergyTsel, hCellEnergyTselRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "width")
########################################################################################################
# Plot charged jet histograms #######################################################################
########################################################################################################
def plotChargedJetQA(ispp, isPtHard, chargedJetList, outputDir, chargedJetListRef, nEvents, nEventsRef, fileFormat):
# Create subdirectory for Jets
outputDirJets = outputDir + "Jets/"
if not os.path.exists(outputDirJets):
os.makedirs(outputDirJets)
chargedJetTHnSparse = chargedJetList.FindObject("fHistJetObservables")
# (Centrality, eta, phi, pT, pTcorr, pT leading particle)
if chargedJetListRef:
chargedJetTHnSparseRef = chargedJetListRef.FindObject("fHistJetObservables")
ROOT.gStyle.SetOptTitle(1)
if not ispp:
# Plot charged jet rho vs. centrality
hChargedJetRhoVsCent = chargedJetList.FindObject("fHistRhoVsCent")
hChargedJetRhoVsCent.SetTitle("Rho vs. Centrality, Charged Jets")
outputFilename = os.path.join(outputDirJets, "hChargedJetRhoVsCent" + fileFormat)
plotHist(hChargedJetRhoVsCent, outputFilename, "colz", False, True)
# Plot charged jet eta-phi, for jet pT > threshold
# there are ceil(250/3)=84 jet pt bins
# (5,84) means (~12 GeV < jet pT < 250 GeV)
# (11,84) means (~30 GeV < jet pT < 250 GeV)
minJetPtBin = 5
maxJetPtBin = 84
if ispp:
chargedJetTHnSparse.GetAxis(2).SetRange(minJetPtBin, maxJetPtBin)
else:
minJetPtBin = 11
chargedJetTHnSparse.GetAxis(3).SetRange(minJetPtBin, maxJetPtBin)
if ispp:
hChargedJetEtaPhi = chargedJetTHnSparse.Projection(1,0)
else:
hChargedJetEtaPhi = chargedJetTHnSparse.Projection(2,1)
hChargedJetEtaPhi.SetName("ChargedJetEtaPhi")
hChargedJetEtaPhi.SetTitle("Charged Jet Occupancy, p_{T,jet} > " + str((minJetPtBin-1)*3) + " GeV")
hChargedJetEtaPhi.GetXaxis().SetRangeUser(-0.8,0.8)
outputFilename = os.path.join(outputDirJets, "hChargedJetEtaPhi" + fileFormat)
plotHist(hChargedJetEtaPhi, outputFilename, "colz", False)
# Plot ratio to reference run, if supplied
if chargedJetListRef:
if ispp:
chargedJetTHnSparseRef.GetAxis(2).SetRange(minJetPtBin, maxJetPtBin)
hChargedJetEtaPhiRef = chargedJetTHnSparseRef.Projection(1,0)
else:
chargedJetTHnSparseRef.GetAxis(3).SetRange(minJetPtBin, maxJetPtBin)
hChargedJetEtaPhiRef = chargedJetTHnSparseRef.Projection(2,1)
hChargedJetEtaPhiRef.SetName("ChargedJetEtaPhiRef")
hChargedJetEtaPhi.Scale(1./nEvents)
hChargedJetEtaPhiRef.Scale(1./nEventsRef)
hChargedJetEtaPhiRatio = hChargedJetEtaPhi.Clone()
hChargedJetEtaPhiRatio.Divide(hChargedJetEtaPhiRef)
hChargedJetEtaPhiRatio.SetTitle("Charged Jet p_{T,jet} > " + str((minJetPtBin-1)*3) + " GeV Occupancy (per event): Current Run / All Runs")
outputFilename = os.path.join(outputDirJets, "hChargedJetEtaPhiRatio" + fileFormat)
plotHist(hChargedJetEtaPhiRatio, outputFilename, "colz", False, True)
if ispp:
chargedJetTHnSparseRef.GetAxis(2).SetRange(1, maxJetPtBin)
else:
chargedJetTHnSparseRef.GetAxis(3).SetRange(1, maxJetPtBin)
if ispp:
chargedJetTHnSparse.GetAxis(2).SetRange(1, maxJetPtBin)
else:
chargedJetTHnSparse.GetAxis(3).SetRange(1, maxJetPtBin)
# Plot charged jet pT
if ispp:
hChargedJetPt = chargedJetTHnSparse.Projection(2)
else:
hChargedJetPt = chargedJetTHnSparse.Projection(3)
hChargedJetPt.SetName("hChargedJetPt")
hChargedJetPtRef = ""
if chargedJetListRef:
if ispp:
hChargedJetPtRef = chargedJetTHnSparseRef.Projection(2)
else:
hChargedJetPtRef = chargedJetTHnSparseRef.Projection(3)
hChargedJetPtRef.SetName("hChargedJetPt")
outputFilename = os.path.join(outputDirJets, "hChargedJetPt" + fileFormat)
xRangeMax = 250
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Charged jets"
legendRunLabel = "Current run"
legendRefLabel = "All runs"
ratioYAxisTitle = "Ratio: run / all runs"
plotSpectra(hChargedJetPt, hChargedJetPtRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename)
# Plot charged jet pT leading particle vs. jet pT
if ispp:
hChargedJetPtLeadjetPt = chargedJetTHnSparse.Projection(3,2)
else:
hChargedJetPtLeadjetPt = chargedJetTHnSparse.Projection(5,3)
hChargedJetPtLeadjetPt.SetName("fHistChJetObservables_proj_pt_leadpt")
hChargedJetPtLeadjetPt.SetTitle("Leading pT vs. Jet pT, Charged Jets")
outputFilename = os.path.join(outputDirJets, "hChargedJetPtLeadjetPt" + fileFormat)
if isPtHard:
yMin= hChargedJetPt.GetBinContent(hChargedJetPt.FindBin(200)) #find entry in bin at 200 GeV to get the right y-Axis scale
yMax= hChargedJetPt.GetBinContent(hChargedJetPt.GetMaximumBin()) #find entry in bin at 200 GeV to get the right y-Axis scale
hChargedJetPt.GetYaxis().SetRangeUser(yMin,yMax*1.1)
plotHist(hChargedJetPtLeadjetPt, outputFilename, "colz", "", True)
else:
plotHist(hChargedJetPtLeadjetPt, outputFilename, "colz", "", True)
ROOT.gStyle.SetOptTitle(0)
# Plot charged jet pT, background-subtracted
if not ispp:
hChargedJetPtCorr = chargedJetTHnSparse.Projection(4)
hChargedJetPtCorr.SetName("hChargedJetPtCorr")
hChargedJetPtCorrRef = ""
if chargedJetListRef:
hChargedJetPtCorrRef = chargedJetTHnSparseRef.Projection(4)
hChargedJetPtCorrRef.SetName("hChargedJetPtCorr")
outputFilename = os.path.join(outputDirJets, "hChargedJetPtCorr" + fileFormat)
xRangeMax = 150
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Charged jets, background subtracted"
legendRunLabel = "Current run"
legendRefLabel = "All runs"
ratioYAxisTitle = "Ratio: run / all runs"
plotSpectra(hChargedJetPtCorr, hChargedJetPtCorrRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename)
# Plot charged jet pT, background-subtracted, by centrality
chargedJetTHnSparse.GetAxis(0).SetRange(1, 1)
hChargedJetPtCorr010 = chargedJetTHnSparse.Projection(4)
hChargedJetPtCorr010.SetName("hChargedJetPtCorr010")
chargedJetTHnSparse.GetAxis(0).SetRange(2, 2)
hChargedJetPtCorr1030 = chargedJetTHnSparse.Projection(4)
hChargedJetPtCorr1030.SetName("hChargedJetPtCorr1030")
chargedJetTHnSparse.GetAxis(0).SetRange(3, 3)
hChargedJetPtCorr3050 = chargedJetTHnSparse.Projection(4)
hChargedJetPtCorr3050.SetName("hChargedJetPtCorr3050")
chargedJetTHnSparse.GetAxis(0).SetRange(4, 4)
hChargedJetPtCorr5090 = chargedJetTHnSparse.Projection(4)
hChargedJetPtCorr5090.SetName("hChargedJetPtCorr5090")
outputFilename = os.path.join(outputDirJets, "hChargedJetPtCorrCentral" + fileFormat)
xRangeMax = 150
yAxisTitle = "#frac{1}{N_{evts}N_{coll}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Charged jets, background subtracted"
legendRunLabel = "0-10%"
legendRefLabel = "50-90%"
DCalLegendLabel = "10-30%"
PHOSLegendLabel = "30-50%"
ratioYAxisTitle = "R_{CP}"
# Scale by Ncoll, to compare different centralities
# Values taken from https://twiki.cern.ch/twiki/bin/view/ALICE/CentralityCodeSnippets
Ncoll010 = 1636.
Ncoll1030 = 801.
Ncoll3050 = 264.
Ncoll5090 = 38.1
Ncoll090 = 435.3
hChargedJetPtCorr010.Scale(4.) # Scale by number of events in 0-10% relative to 50-90%
hChargedJetPtCorr1030.Scale(Ncoll010/Ncoll1030 * 2.)
hChargedJetPtCorr3050.Scale(Ncoll010/Ncoll3050 * 2.)
hChargedJetPtCorr5090.Scale(Ncoll010/Ncoll5090)
plotSpectra(hChargedJetPtCorr010, hChargedJetPtCorr5090, 0, 0, nEvents, nEvents, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "", DCalLegendLabel, PHOSLegendLabel)
chargedJetTHnSparse.GetAxis(0).SetRange(1,4)
########################################################################################################
# Plot full jet histograms ##############################################################################
########################################################################################################
def plotFullJetQA(ispp, isPtHard, isRun2, includePhos, fullJetList, outputDir, fullJetListRef, nEvents, nEventsRef, fileFormat):
# Create subdirectory for Jets
outputDirJets = outputDir + "Jets/"
if not os.path.exists(outputDirJets):
os.makedirs(outputDirJets)
fullJetTHnSparse = fullJetList.FindObject("fHistJetObservables")
# (Centrality, eta, phi, pT, pTcorr, pT leading particle)
if fullJetListRef:
fullJetTHnSparseRef = fullJetListRef.FindObject("fHistJetObservables")
ROOT.gStyle.SetOptTitle(1)
if not ispp:
# Plot full jet rho vs. centrality
hFullJetRhoVsCent = fullJetList.FindObject("fHistRhoVsCent")
hFullJetRhoVsCent.SetTitle("Rho vs. Centrality, Full Jets")
outputFilename = os.path.join(outputDirJets, "hFullJetRhoVsCent" + fileFormat)
plotHist(hFullJetRhoVsCent, outputFilename, "colz", False, True)
# Plot Neutral Energy Fraction
hFullJetNEF = fullJetList.FindObject("hNEFVsPtEMC")
if not ispp:
if hFullJetNEF:
hFullJetNEF = hNEFVsPtCorrVsCentEMCal.Project3D("zy")
else:
print("hFullJetNEF not saved for PbPb in this version")
hFullJetNEF = hFullJetNEF.ProjectionY()
hFullJetNEFDCal = fullJetList.FindObject("hNEFVsPtDCal")
hFullJetNEFDCal = hFullJetNEFDCal.ProjectionY()
hFullJetNEF.SetTitle("NEF vs. p_{T,jet}, Full Jets")
outputFilename = os.path.join(outputDirJets, "hFullJetNEF" + fileFormat)
# plotHist(hFullJetNEF, outputFilename, "colz", True, False)
plotNEFSpectra(hFullJetNEF,hFullJetNEFDCal, 0,nEvents, ispp, 1, "1/N_{Evt} dN/dNEF", "EMCal", outputFilename,"", "DCal")
if ispp:
# Plot Delta HadCorr vs pT
hFullJetDeltaHcorr = fullJetList.FindObject("hDeltaEHadCorr")
hFullJetDeltaHcorr.GetXaxis().SetRangeUser(0., 150.)
hFullJetDeltaHcorr.SetTitle("#Delta E vs. p_{T,jet}, Full Jets")
#outputFilename = os.path.join(outputDirJets, "hFullJetDeltaHcorr" + fileFormat)
#plotHist(hFullJetDeltaHcorr, outputFilename, "colz", False, True)
hFullJetDeltaHcorr.SetTitle("<#DeltaE> vs. p_{T,jet}, Full Jets")
hDeltaEHadCorrProf = hFullJetDeltaHcorr.ProfileX()
hDeltaEHadCorrProf.GetYaxis().SetRangeUser(0.08, 15.)
hDeltaEHadCorrProf.SetLineColor(1)
hDeltaEHadCorrProf.SetMarkerStyle(20)
hDeltaEHadCorrProf.GetYaxis().SetTitleOffset(1.2)
hDeltaEHadCorrProf.GetYaxis().SetTitle("< #sum#it{E}_{nonlincorr} - #it{E}_{hadcorr} >")
outputFilename = os.path.join(outputDirJets, "hDeltaEHadCorrProf" + fileFormat)
plotHist(hDeltaEHadCorrProf, outputFilename, "E", True, False)
else:
print("hFullJetDeltaHcorr not saved for PbPb yet") #need to project the TH3 down to 2D
# Plot full jet eta-phi, for jet pT > threshold
# there are ceil(250/3)=84 jet pt bins
# (5,84) means (~12 GeV < jet pT < 250 GeV)
# (11,84) means (~30 GeV < jet pT < 250 GeV)
minJetPtBin = 5
maxJetPtBin = 84
if ispp:
fullJetTHnSparse.GetAxis(2).SetRange(minJetPtBin, maxJetPtBin)
else:
minJetPtBin = 11
fullJetTHnSparse.GetAxis(3).SetRange(minJetPtBin, maxJetPtBin)
# Plot full jet eta-phi
if ispp:
hFullJetEtaPhi = fullJetTHnSparse.Projection(1,0)
else:
hFullJetEtaPhi = fullJetTHnSparse.Projection(2,1)
hFullJetEtaPhi.SetName("FullJetEtaPhi")
hFullJetEtaPhi.SetTitle("Full Jet Occupancy, p_{T,jet} > " + str((minJetPtBin-1)*3) + " GeV")
outputFilename = os.path.join(outputDirJets, "hFullJetEtaPhi" + fileFormat)
hFullJetEtaPhi.GetXaxis().SetRangeUser(-0.8,0.8)
hFullJetEtaPhi.GetYaxis().SetRangeUser(1.2,5.8)
plotHist(hFullJetEtaPhi, outputFilename, "colz", False)
# Plot ratio to reference run, if supplied
if fullJetListRef:
if ispp:
fullJetTHnSparseRef.GetAxis(2).SetRange(minJetPtBin, maxJetPtBin)
hFullJetEtaPhiRef = fullJetTHnSparseRef.Projection(1,0)
else:
fullJetTHnSparseRef.GetAxis(3).SetRange(minJetPtBin, maxJetPtBin)
hFullJetEtaPhiRef = fullJetTHnSparseRef.Projection(2,1)
hFullJetEtaPhiRef.SetName("FullJetEtaPhiRef")
hFullJetEtaPhi.Scale(1./nEvents)
hFullJetEtaPhiRef.Scale(1./nEventsRef)
hFullJetEtaPhiRatio = hFullJetEtaPhi.Clone()
hFullJetEtaPhiRatio.Divide(hFullJetEtaPhiRef)
hFullJetEtaPhiRatio.SetTitle("Full Jet p_{T,jet} > " + str((minJetPtBin-1)*3) + " GeV Occupancy (per event): Current Run / All Runs")
outputFilename = os.path.join(outputDirJets, "hFullJetEtaPhiRatio" + fileFormat)
plotHist(hFullJetEtaPhiRatio, outputFilename, "colz", False)
if ispp:
fullJetTHnSparseRef.GetAxis(2).SetRange(1, maxJetPtBin)
else:
fullJetTHnSparseRef.GetAxis(3).SetRange(1, maxJetPtBin)
if ispp:
fullJetTHnSparse.GetAxis(2).SetRange(1, maxJetPtBin)
else:
fullJetTHnSparse.GetAxis(3).SetRange(1, maxJetPtBin)
ROOT.gStyle.SetOptTitle(0)
# Plot full jet pT
if ispp:
hFullJetPt = fullJetTHnSparse.Projection(2)
else:
hFullJetPt = fullJetTHnSparse.Projection(3)
hFullJetPt.SetName("hFullJetPt")
hFullJetPtRef = ""
if fullJetListRef:
if ispp:
hFullJetPtRef = fullJetTHnSparseRef.Projection(2)
else:
hFullJetPtRef = fullJetTHnSparseRef.Projection(3)
hFullJetPtRef.SetName("hFullJetPt")
outputFilename = os.path.join(outputDirJets, "hFullJetPt" + fileFormat)
xRangeMax = 250
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Full jets"
legendRunLabel = "Current run"
legendRefLabel = "All runs"
ratioYAxisTitle = "Ratio: run / all runs"
plotSpectra(hFullJetPt, hFullJetPtRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename)
# Plot full jet pT leading particle vs. jet pT
if ispp:
hFullJetPtLeadjetPt = fullJetTHnSparse.Projection(3,2)
else:
hFullJetPtLeadjetPt = fullJetTHnSparse.Projection(5,3)
hFullJetPtLeadjetPt.SetName("fHistFuJetObservables_proj_pt_leadpt")
hFullJetPtLeadjetPt.SetTitle("Leading pT vs. Jet pT, Full Jets")
outputFilename = os.path.join(outputDirJets, "hFullJetPtLeadjetPt" + fileFormat)
if ispp:
hFullJetPtLeadjetPt.GetXaxis().SetRangeUser(0,200)
hFullJetPtLeadjetPt.GetYaxis().SetRangeUser(0,100)
if isPtHard:
yMin = hFullJetPt.GetBinContent(hFullJetPt.FindBin(200)) #find entry in bin at 200 GeV to get the right y-Axis scale
maxBin= hFullJetPt.GetBinContent(hFullJetPt.GetMaximumBin())
hFullJetPt.SetMinimum(yMin);
hFullJetPt.SetMaximum(maxBin*1.1);
plotHist(hFullJetPtLeadjetPt, outputFilename, "colz", "", True)
else:
plotHist(hFullJetPtLeadjetPt, outputFilename, "colz", "", True)
# Plot full jet pT, background subtracted
if not ispp:
hFullJetPtCorr = fullJetTHnSparse.Projection(4)
hFullJetPtCorr.SetName("hFullJetPtCorr")
hFullJetPtCorrRef = ""
if fullJetListRef:
hFullJetPtCorrRef = fullJetTHnSparseRef.Projection(4)
hFullJetPtCorrRef.SetName("hFullJetPtCorrRef")
outputFilename = os.path.join(outputDirJets, "hFullJetPtCorr" + fileFormat)
xRangeMax = 150
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Full jets, background subtracted"
legendRunLabel = "Current run"
legendRefLabel = "All runs"
ratioYAxisTitle = "Ratio: run / all runs"
plotSpectra(hFullJetPtCorr, hFullJetPtCorrRef, 0, 0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename)
# Plot full jet pT, background-subtracted, by centrality
fullJetTHnSparse.GetAxis(0).SetRange(1, 1)
hFullJetPtCorr010 = fullJetTHnSparse.Projection(4)
hFullJetPtCorr010.SetName("hFullJetPtCorr010")
fullJetTHnSparse.GetAxis(0).SetRange(2, 2)
hFullJetPtCorr1030 = fullJetTHnSparse.Projection(4)
hFullJetPtCorr1030.SetName("hFullJetPtCorr1030")
fullJetTHnSparse.GetAxis(0).SetRange(3, 3)
hFullJetPtCorr3050 = fullJetTHnSparse.Projection(4)
hFullJetPtCorr3050.SetName("hFullJetPtCorr3050")
fullJetTHnSparse.GetAxis(0).SetRange(4, 4)
hFullJetPtCorr5090 = fullJetTHnSparse.Projection(4)
hFullJetPtCorr5090.SetName("hFullJetPtCorr5090")
outputFilename = os.path.join(outputDirJets, "hFullJetPtCorrCentral" + fileFormat)
xRangeMax = 150
yAxisTitle = "#propto#frac{1}{N_{evts}N_{coll}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Full jets, background subtracted"
legendRunLabel = "0-10%"
legendRefLabel = "50-90%"
DCalLegendLabel = "10-30%"
PHOSLegendLabel = "30-50%"
ratioYAxisTitle = "R_{CP}"
# Scale by Ncoll, to compare different centralities
# Values taken from https://twiki.cern.ch/twiki/bin/view/ALICE/CentralityCodeSnippets
Ncoll010 = 1636.
Ncoll1030 = 801.
Ncoll3050 = 264.
Ncoll5090 = 38.1
Ncoll090 = 435.3
hFullJetPtCorr010.Scale(4.) # Scale by number of events in 0-10% relative to 50-90%
hFullJetPtCorr1030.Scale(Ncoll010/Ncoll1030 * 2.)
hFullJetPtCorr3050.Scale(Ncoll010/Ncoll3050 * 2.)
hFullJetPtCorr5090.Scale(Ncoll010/Ncoll5090)
plotSpectra(hFullJetPtCorr010, hFullJetPtCorr5090, 0, 0, nEvents, nEvents, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "", DCalLegendLabel, PHOSLegendLabel)
fullJetTHnSparse.GetAxis(0).SetRange(1,4)
# Plot full jet pT spectra separately for EMCal, DCal, PHOS jets
if isRun2 and includePhos:
#EMCal jets -- divide from DCal/PHOS by phi cut
if ispp:
phiDivideBin = fullJetTHnSparse.GetAxis(1).FindBin(4.)
fullJetTHnSparse.GetAxis(1).SetRange(0, phiDivideBin)
hFullJetEMCalEtaPhiPt = fullJetTHnSparse.Projection(0,1,2, "o") # "o" keep the original axis range
else:
phiDivideBin = fullJetTHnSparse.GetAxis(2).FindBin(4.)
fullJetTHnSparse.GetAxis(2).SetRange(0, phiDivideBin)
hFullJetEMCalEtaPhiPt = fullJetTHnSparse.Projection(1,2,3, "o")
hFullJetEMCalEtaPhiPtCorr = fullJetTHnSparse.Projection(1,2,4, "o")
hFullJetEMCalEtaPhiPtCorr.SetName("FullJetEMCalEtaPhiPtCorr");
hFullJetEMCalEtaPhiPt.SetName("FullJetEMCalEtaPhiPt");
hFullJetEMCalEtaPhi = hFullJetEMCalEtaPhiPt.Project3D("yx")
outputFilename = os.path.join(outputDirJets, "hFullJetEtaPhiEMCal" + fileFormat)
plotHist(hFullJetEMCalEtaPhi, outputFilename, "colz")
hFullJetEMCalPt = hFullJetEMCalEtaPhiPt.Project3D("z")
if not ispp:
hFullJetEMCalPtCorr = hFullJetEMCalEtaPhiPtCorr.Project3D("z")
# DCal jets -- divide from EMCal by phi cut, and divide from PHOS by |eta| > 0.22 (no fiducial cut on inner eta)
if ispp:
etaMinDCalBinNeg = fullJetTHnSparse.GetAxis(0).FindBin(-0.22)
etaMinDCalBinPos = fullJetTHnSparse.GetAxis(0).FindBin(0.22)
fullJetTHnSparse.GetAxis(1).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(0).SetRange(1, etaMinDCalBinNeg)
hFullJetDCalEtaPhiPtNeg = fullJetTHnSparse.Projection(0,1,2, "o")
else:
etaMinDCalBinNeg = fullJetTHnSparse.GetAxis(1).FindBin(-0.22)
etaMinDCalBinPos = fullJetTHnSparse.GetAxis(1).FindBin(0.22)
fullJetTHnSparse.GetAxis(2).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(1).SetRange(1, etaMinDCalBinNeg)
hFullJetDCalEtaPhiPtNeg = fullJetTHnSparse.Projection(1,2,3, "o")
hFullJetDCalEtaPhiPtCorrNeg = fullJetTHnSparse.Projection(1,2,4, "o")
hFullJetDCalEtaPhiPtCorrNeg.SetName("FullJetDCalEtaPhiPtCorrNeg");
hFullJetDCalEtaPhiPtNeg.SetName("FullJetDCalEtaPhiPtNeg");
if ispp:
fullJetTHnSparse.GetAxis(1).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(0).SetRange(etaMinDCalBinPos, 70)
hFullJetDCalEtaPhiPtPos = fullJetTHnSparse.Projection(0,1,2, "o")
else:
fullJetTHnSparse.GetAxis(2).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(1).SetRange(etaMinDCalBinPos, 70)
hFullJetDCalEtaPhiPtPos = fullJetTHnSparse.Projection(1,2,3, "o")
hFullJetDCalEtaPhiPtCorrPos = fullJetTHnSparse.Projection(1,2,4, "o")
hFullJetDCalEtaPhiPtCorrPos.SetName("FullJetDCalEtaPhiPtCorrPos");
hFullJetDCalEtaPhiPtPos.SetName("FullJetDCalEtaPhiPtPos");
# Add the TH3s
hFullJetDCalEtaPhiPt = hFullJetDCalEtaPhiPtNeg.Clone()
hFullJetDCalEtaPhiPt.Add(hFullJetDCalEtaPhiPtPos)
if not ispp:
hFullJetDCalEtaPhiPtCorr = hFullJetDCalEtaPhiPtCorrNeg.Clone()
hFullJetDCalEtaPhiPtCorr.Add(hFullJetDCalEtaPhiPtCorrPos)
# Project to TH2 for eta-phi, and TH1 of pT
hFullJetDCalEtaPhi = hFullJetDCalEtaPhiPt.Project3D("yx")
outputFilename = os.path.join(outputDirJets, "hFullJetEtaPhiDCal" + fileFormat)
plotHist(hFullJetDCalEtaPhi, outputFilename, "colz")
hFullJetDCalPt = hFullJetDCalEtaPhiPt.Project3D("z")
if not ispp:
hFullJetDCalPtCorr = hFullJetDCalEtaPhiPtCorr.Project3D("z")
# Gap jets -- divide from EMCal by phi cut, and divide from PHOS by |eta| > 0.13 and DCal by |eta| < 0.22
if ispp:
etaMinPHOSBin = fullJetTHnSparse.GetAxis(0).FindBin(-0.13)
etaMaxPHOSBin = fullJetTHnSparse.GetAxis(0).FindBin(0.13)
fullJetTHnSparse.GetAxis(1).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(0).SetRange(etaMinDCalBinNeg, etaMinPHOSBin)
hFullJetGapEtaPhiPtNeg = fullJetTHnSparse.Projection(0,1,2, "o")
else:
etaMinPHOSBin = fullJetTHnSparse.GetAxis(1).FindBin(-0.13)
etaMaxPHOSBin = fullJetTHnSparse.GetAxis(1).FindBin(0.13)
fullJetTHnSparse.GetAxis(2).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(1).SetRange(etaMinDCalBinNeg, etaMinPHOSBin)
hFullJetGapEtaPhiPtNeg = fullJetTHnSparse.Projection(1,2,3, "o")
hFullJetGapEtaPhiPtCorrNeg = fullJetTHnSparse.Projection(1,2,4, "o")
hFullJetGapEtaPhiPtCorrNeg.SetName("FullJetGapEtaPhiPtCorrNeg");
hFullJetGapEtaPhiPtNeg.SetName("FullJetGapEtaPhiPtNeg");
if ispp:
fullJetTHnSparse.GetAxis(1).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(0).SetRange(etaMaxPHOSBin, etaMinDCalBinPos)
hFullJetGapEtaPhiPtPos = fullJetTHnSparse.Projection(0,1,2, "o")
else:
fullJetTHnSparse.GetAxis(2).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(1).SetRange(etaMaxPHOSBin, etaMinDCalBinPos)
hFullJetGapEtaPhiPtPos = fullJetTHnSparse.Projection(1,2,3, "o")
hFullJetGapEtaPhiPtCorrPos = fullJetTHnSparse.Projection(1,2,4, "o")
hFullJetGapEtaPhiPtCorrPos.SetName("FullJetGapEtaPhiPtCorrPos");
hFullJetGapEtaPhiPtPos.SetName("FullJetGapEtaPhiPtPos");
# Add the TH3s
hFullJetGapEtaPhiPt = hFullJetGapEtaPhiPtNeg.Clone()
hFullJetGapEtaPhiPt.Add(hFullJetGapEtaPhiPtPos)
if not ispp:
hFullJetGapEtaPhiPtCorr = hFullJetGapEtaPhiPtCorrNeg.Clone()
hFullJetGapEtaPhiPtCorr.Add(hFullJetGapEtaPhiPtCorrPos)
# Project to TH2 for eta-phi, and TH1 of pT
hFullJetGapEtaPhi = hFullJetGapEtaPhiPt.Project3D("yx")
outputFilename = os.path.join(outputDirJets, "hFullJetEtaPhiGap" + fileFormat)
plotHist(hFullJetGapEtaPhi, outputFilename, "colz")
hFullJetGapPt = hFullJetGapEtaPhiPt.Project3D("z")
if not ispp:
hFullJetGapPtCorr = hFullJetGapEtaPhiPtCorr.Project3D("z")
# PHOS jets -- divide from EMCal by phi cut, and divide from DCal by eta < 0.13 (no fiducial cut on inner eta)
# fiducial cut on DCal (kDCALfid) ensures that remaining region is only PHOS
if ispp:
fullJetTHnSparse.GetAxis(1).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(0).SetRange(etaMinPHOSBin, etaMaxPHOSBin)
hFullJetPHOSEtaPhiPt = fullJetTHnSparse.Projection(0,1,2, "o")
else:
fullJetTHnSparse.GetAxis(2).SetRange(phiDivideBin, 101)
fullJetTHnSparse.GetAxis(1).SetRange(etaMinPHOSBin, etaMaxPHOSBin)
hFullJetPHOSEtaPhiPt = fullJetTHnSparse.Projection(1,2,3, "o")
hFullJetPHOSEtaPhiPtCorr = fullJetTHnSparse.Projection(1,2,4, "o")
hFullJetPHOSEtaPhiPtCorr.SetName("FullJetPHOSEtaPhiPtCorr");
hFullJetPHOSEtaPhiPt.SetName("FullJetPHOSEtaPhiPt");
hFullJetPHOSEtaPhi = hFullJetPHOSEtaPhiPt.Project3D("yx")
outputFilename = os.path.join(outputDirJets, "hFullJetEtaPhiPHOS" + fileFormat)
plotHist(hFullJetPHOSEtaPhi, outputFilename, "colz")
hFullJetPtRef = ""
if fullJetListRef:
if ispp:
hFullJetPtRef = fullJetTHnSparseRef.Projection(2)
else:
hFullJetPtRef = fullJetTHnSparseRef.Projection(3)
hFullJetPtCorrRef = fullJetTHnSparseRef.Projection(4)
hFullJetPtCorrRef.SetName("hFullJetPtCorr")
hFullJetPtRef.SetName("hFullJetPt")
hFullJetPHOSPt = hFullJetPHOSEtaPhiPt.Project3D("z")
if not ispp:
hFullJetPHOSPtCorr = hFullJetPHOSEtaPhiPtCorr.Project3D("z")
# Now plot the EMCal/DCal/PHOS jet pT spectra and their ratio to the reference
outputFilename = os.path.join(outputDirJets, "hFullJetPtCalo" + fileFormat)
xRangeMax = 250
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Full jets"
legendRunLabel = "EMCal jets"
if fullJetListRef:
legendRefLabel = "All full jets"
ratioYAxisTitle = "Ratio to all"
DCalLegendLabel = "DCal jets"
PHOSLegendLabel = "PHOS jets"
plotSpectra(hFullJetEMCalPt, hFullJetPtRef, hFullJetDCalPt, hFullJetPHOSPt, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "", DCalLegendLabel, PHOSLegendLabel)
else:
legendRefLabel = "PHOS jets"
ratioYAxisTitle = "Ratio to PHOS"
h2LegendLabel = "DCal jets"
plotSpectra(hFullJetEMCalPt, hFullJetPHOSPt, hFullJetDCalPt, "", nEvents, nEvents, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "", h2LegendLabel)
# And plot the background subtracted EMCal/DCal/PHOS jet pT spectra and their ratio to the reference
if not ispp:
outputFilename = os.path.join(outputDirJets, "hFullJetPtCorrCalo" + fileFormat)
xRangeMax = 250
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "Full jets, background subtracted"
legendRunLabel = "EMCal jets"
if fullJetListRef:
legendRefLabel = "All full jets"
ratioYAxisTitle = "Ratio to all"
DCalLegendLabel = "DCal jets"
PHOSLegendLabel = "PHOS jets"
plotSpectra(hFullJetEMCalPtCorr, hFullJetPtCorrRef, hFullJetDCalPtCorr, hFullJetPHOSPtCorr, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "", DCalLegendLabel, PHOSLegendLabel)
else:
legendRefLabel = "PHOS jets"
ratioYAxisTitle = "Ratio to PHOS"
h2LegendLabel = "DCal jets"
plotSpectra(hFullJetEMCalPtCorr, hFullJetPHOSPtCorr, hFullJetDCalPtCorr, "", nEvents, nEvents, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "", h2LegendLabel)
########################################################################################################
# Plot event histograms ##############################################################################
########################################################################################################
def plotEventQA(ispp, isRun2, includePhos, qaList, outputDir, fileFormat):
histNEvent = qaList.FindObject("fHistEventCount")
nEvents = histNEvent.GetBinContent(1)
#print("N events: %d" % nEvents)
# Create subdirectory for EventQA
outputDirEventQA = outputDir + "EventQA/"
if not os.path.exists(outputDirEventQA):
os.makedirs(outputDirEventQA)
eventQATHnSparse = qaList.FindObject("eventQA")
# (Centrality, N tracks, pT leading track, N clusters, leading cluster E)
if ispp:
# N tracks
hEventNtracks = eventQATHnSparse.Projection(0)
outputFilename = os.path.join(outputDirEventQA, "hEventNtracks" + fileFormat)
plotHist(hEventNtracks, outputFilename, "hist E")
# N clusters
hEventNclusters = eventQATHnSparse.Projection(2)
outputFilename = os.path.join(outputDirEventQA, "hEventNclusters" + fileFormat)
plotHist(hEventNclusters, outputFilename, "hist E")
else:
# N tracks vs. Centrality
hEventNtracksCentrality = eventQATHnSparse.Projection(1,0)
outputFilename = os.path.join(outputDirEventQA, "hEventNtracksCentrality" + fileFormat)
plotHist(hEventNtracksCentrality, outputFilename, "colz", False, True)
# N clusters vs. Centrality
hEventNclustersCentrality = eventQATHnSparse.Projection(3,0)
outputFilename = os.path.join(outputDirEventQA, "hEventNclustersCentrality" + fileFormat)
plotHist(hEventNclustersCentrality, outputFilename, "colz", False, True)
if ispp:
# Plot leading cluster energy
hEventEmcalLeadClusE = eventQATHnSparse.Projection(3)
outputFilename = os.path.join(outputDirEventQA, "hEventLeadClusE" + fileFormat)
plotHist(hEventEmcalLeadClusE, outputFilename, "hist E", True)
else:
# Plot leading cluster energy vs. Centrality
hEventLeadClusECentrality = eventQATHnSparse.Projection(4,0)
outputFilename = os.path.join(outputDirEventQA, "hEventLeadClusECentrality" + fileFormat)
plotHist(hEventLeadClusECentrality, outputFilename, "colz", False, True)
# Event rejection reasons
EventCutList = qaList.FindObject("EventCutOutput")
hEventReject = EventCutList.FindObject("fCutStats")
hEventReject.GetYaxis().SetTitle("N events accepted")
outputFilename = os.path.join(outputDirEventQA, "hEventReject" + fileFormat)
textNEvents = ROOT.TLatex()
textNEvents.SetNDC()
textNEvents.DrawLatex(0.65,0.87,"#it{N}_{events} = %d" % nEvents)
plotHist(hEventReject, outputFilename, "hist", False)
########################################################################################################
# Plot Pt-hard histograms ##############################################################################
########################################################################################################
def plotPtHard(f, qaList, nEvents, qaListRef, nEventsRef, outputDir, fileFormat):
# Note: errors have not been propagated correctly for Pt-hard histos, so we do not plot them.
# Create subdirectory for PtHard
outputDirPtHard = outputDir + "PtHard/"
if not os.path.exists(outputDirPtHard):
os.makedirs(outputDirPtHard)
ROOT.gStyle.SetOptTitle(1)
hNEvents = f.Get("hNEventsAcc")
outputFilename = os.path.join(outputDirPtHard, "hPtHardNEvents" + fileFormat)
plotHist(hNEvents, outputFilename, "hist")
hXSecPerEvent = f.Get("hXSecPerEvent")
if hXSecPerEvent:
outputFilename = os.path.join(outputDirPtHard, "hPtHardXSecPerEvent" + fileFormat)
plotHist(hXSecPerEvent, outputFilename, "hist", True)
hNTrialsPerEvent = f.Get("hNTrialsPerEvent")
if hNTrialsPerEvent:
outputFilename = os.path.join(outputDirPtHard, "hPtHardNTrialsPerEvent" + fileFormat)
plotHist(hNTrialsPerEvent, outputFilename, "hist")
hScaleFactor = f.Get("hScaleFactor")
if hScaleFactor:
outputFilename = os.path.join(outputDirPtHard, "hPtHardScaleFactor" + fileFormat)
plotHist(hScaleFactor, outputFilename, "hist", True)
hPtHard = qaList.FindObject("hPtHard")
outputFilename = os.path.join(outputDirPtHard, "hPtHard" + fileFormat)
plotHist(hPtHard, outputFilename, "hist", True)
#if a reference is provided
if qaListRef:
hPtHardRef = qaListRef.FindObject("hPtHard")
outputFilename = os.path.join(outputDirPtHard, "hPtHard_Ratio" + fileFormat)
xRangeMax = 100
yAxisTitle = "#frac{1}{N_{evts}}#frac{dN}{dp_{T}} [GeV^{-1}]"
legendTitle = "pT Hard production"
legendRunLabel = "hPtHard this run"
legendRefLabel = "hPtHard all runs"
ratioYAxisTitle = "Ratio: run / all runs"
hPtHardRef.SetLineColor(1)
ispp=1
if nEventsRef!=0:
plotSpectra(hPtHard, hPtHardRef,0x0, 0x0, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, "1", "2", "3")
########################################################################################################
# Plot basic histogram ##############################################################################
########################################################################################################
def plotHist(h, outputFilename, drawOptions = "", setLogy = False, setLogz = False):
c = ROOT.TCanvas("c","c: hist",600,450)
c.cd()
if setLogy:
c.SetLogy()
if setLogz:
c.SetLogz()
h.Draw(drawOptions)
c.SaveAs(outputFilename)
c.Close()
########################################################################################################
# Plot spectra (and ratio, if reference file suppled) ################################################
########################################################################################################
def plotSpectra(h, hRef, h2, h3, nEvents, nEventsRef, ispp, xRangeMax, yAxisTitle, legendTitle, legendRunLabel, legendRefLabel, ratioYAxisTitle, outputFilename, scalingOptions = "", h2LegendLabel = "", h3LegendLabel = ""):
h.SetLineColor(1)
h.SetLineWidth(2)
h.SetLineStyle(1)
h.Scale(1./nEvents, scalingOptions)
h.GetYaxis().SetTitle(yAxisTitle)
h.GetYaxis().SetTitleSize(0.06)
h.GetXaxis().SetRangeUser(0,xRangeMax)
h.GetYaxis().SetRangeUser(2e-9,2e3)
if ispp:
h.GetYaxis().SetRangeUser(2e-11,20)
h.GetYaxis().SetLabelFont(43)
h.GetYaxis().SetLabelSize(20)
if h2:
h2.SetLineColor(2)
h2.SetLineWidth(2)
h2.SetLineStyle(1)
h2.Scale(1./nEvents, scalingOptions)
h2.GetYaxis().SetTitle(yAxisTitle)
h2.GetYaxis().SetTitleSize(0.06)
h2.GetXaxis().SetRangeUser(0,xRangeMax)
h2.GetYaxis().SetRangeUser(2e-9,2e3)
if ispp:
h2.GetYaxis().SetRangeUser(2e-11,20)
h2.GetYaxis().SetLabelFont(43)
h2.GetYaxis().SetLabelSize(20)
h2.GetXaxis().SetTitleOffset(1.4)
if h3:
h3.SetLineStyle(1)
h3.SetLineColor(4)
h3.SetLineWidth(2)
h3.Scale(1./nEvents, scalingOptions)
if not hRef:
c = ROOT.TCanvas("c","c: pT",600,450)
c.cd()
ROOT.gPad.SetLeftMargin(0.16)
ROOT.gPad.SetRightMargin(0.05)
ROOT.gPad.SetBottomMargin(0.14)
ROOT.gPad.SetTopMargin(0.05)
ROOT.gPad.SetLogy()
if h2 and h3:
h2.Draw("hist E")
h3.Draw("hist E same")
h.Draw("hist E same")
elif h2:
h2.Draw("hist E")
h.Draw("hist E same")
else:
h.Draw("hist E")
if hRef:
c = ROOT.TCanvas("c","c: pT",800,850)
c.cd()
pad1 = ROOT.TPad("pad1", "pad1", 0, 0.3, 1, 1.0)
pad1.SetBottomMargin(0)
pad1.SetLeftMargin(0.15)
pad1.SetRightMargin(0.05)
pad1.SetTopMargin(0.05)
pad1.SetLogy()
pad1.Draw()
pad1.cd()
if h2 and h3:
h2.Draw("hist")
h3.Draw("hist same")
h.Draw("hist same")
elif h2:
h2.Draw("hist E")
h.Draw("hist E same")
else:
h.Draw("hist E")
hRef.SetLineColor(8)
if h2 and not h3: # hack to keep color scheme consistent in cluster spectra ratio
hRef.SetLineColor(4)
hRef.SetMarkerColor(1)
hRef.SetLineStyle(1)
hRef.Scale(1./nEventsRef, scalingOptions)
hRef.Draw("hist E same")
c.cd()
pad2 = ROOT.TPad("pad2", "pad2", 0, 0.05, 1, 0.3)
pad2.SetTopMargin(0)
pad2.SetBottomMargin(0.35)
pad2.SetLeftMargin(0.15)
pad2.SetRightMargin(0.05)
pad2.Draw()
pad2.cd()
hRatio = h.Clone()
hRatio.Divide(hRef)
hRatio.SetMarkerStyle(20)
hRatio.SetMarkerSize(0.5)
hRatio.SetMarkerColor(1)
if h2:
hRatio2 = h2.Clone()
hRatio2.Divide(hRef)
hRatio2.SetMarkerStyle(21)
hRatio2.SetMarkerColor(2)
hRatio2.GetYaxis().SetTitle(ratioYAxisTitle)
hRatio2.GetYaxis().SetTitleSize(20)
hRatio2.GetYaxis().SetTitleFont(43)
hRatio2.GetYaxis().SetTitleOffset(2.2)
hRatio2.GetYaxis().SetLabelFont(43)
hRatio2.GetYaxis().SetLabelSize(20)
hRatio2.GetYaxis().SetNdivisions(505)
hRatio2.GetYaxis().SetRangeUser(0,2.2)
if ratioYAxisTitle in "Ratio to all":
hRatio2.GetYaxis().SetRangeUser(0,1.2)
hRatio2.GetXaxis().SetRangeUser(0,xRangeMax)
hRatio2.GetXaxis().SetTitleSize(30)
hRatio2.GetXaxis().SetTitleFont(43)
hRatio2.GetXaxis().SetTitleOffset(4.)
hRatio2.GetXaxis().SetLabelFont(43)
hRatio2.GetXaxis().SetLabelSize(20)
if h3:
hRatio3 = h3.Clone()
hRatio3.Divide(hRef)
hRatio3.SetMarkerStyle(21)
hRatio3.SetMarkerColor(4)
if h2 and h3:
hRatio2.Draw("P E")
hRatio3.Draw("P E same")
hRatio.Draw("P E same")
elif h2:
hRatio2.GetYaxis().SetRangeUser(0,25)
hRatio2.Draw("P E")
hRatio.Draw("P E same")
if not h2 and not h3:
hRatio.GetYaxis().SetTitle(ratioYAxisTitle)
hRatio.GetYaxis().SetTitleSize(20)
hRatio.GetYaxis().SetTitleFont(43)
hRatio.GetYaxis().SetTitleOffset(2.2)
hRatio.GetYaxis().SetLabelFont(43)
hRatio.GetYaxis().SetLabelSize(20)
hRatio.GetYaxis().SetNdivisions(505)
hRatio.GetYaxis().SetRangeUser(0,2.2)
hRatio.GetXaxis().SetRangeUser(0,xRangeMax)
hRatio.GetXaxis().SetTitleSize(30)
hRatio.GetXaxis().SetTitleFont(43)
hRatio.GetXaxis().SetTitleOffset(4.)
hRatio.GetXaxis().SetLabelFont(43)
hRatio.GetXaxis().SetLabelSize(20)
hRatio.Draw("P E")
pad1.cd()
if nEvents > 1:
textNEvents = ROOT.TLatex()
textNEvents.SetNDC()
textNEvents.DrawLatex(0.55,0.6,"#it{N}_{events} = %d" % nEvents)
leg2 = ROOT.TLegend(0.3,0.7,0.88,0.93,legendTitle)
leg2.SetFillColor(10)
leg2.SetBorderSize(0)
leg2.SetFillStyle(0)
leg2.SetTextSize(0.04)
leg2.AddEntry(h, legendRunLabel, "l")
if h2:
leg2.AddEntry(h2, h2LegendLabel, "l")
if h3:
leg2.AddEntry(h3, h3LegendLabel, "l")
if hRef:
leg2.AddEntry(hRef, legendRefLabel, "l")
leg2.Draw("same")
c.SaveAs(outputFilename)
c.Close()
########################################################################################################
# Plot spectra and ratio (h2,h3 will be divided by h) ################################################
########################################################################################################
def plotSpectraCent(h, h2, h3, nEvents, ispp, outputFilename, xRangeMax, yAxisTitle, ratioYAxisTitle, legendTitle, h1legendLabel, h2legendLabel, h3legendLabel = "", scalingOptions = "", yRatioMax = 32):
h.SetLineColor(4)
if not h3:
h.SetLineColor(2)
h.SetLineWidth(2)
h.SetLineStyle(1)
h.Scale(1./nEvents, scalingOptions)
h.GetYaxis().SetTitle(yAxisTitle)
h.GetYaxis().SetTitleSize(0.06)
h.GetXaxis().SetRangeUser(0,xRangeMax)
h.GetYaxis().SetRangeUser(2e-9,2e3)
if ispp:
h.GetYaxis().SetRangeUser(2e-11,20)
h.GetYaxis().SetLabelFont(43)
h.GetYaxis().SetLabelSize(20)
h2.SetLineColor(1)
h2.SetLineWidth(2)
h2.SetLineStyle(1)
h2.Scale(1./nEvents, scalingOptions)
h2.GetYaxis().SetTitle(yAxisTitle)
h2.GetYaxis().SetTitleSize(0.06)
h2.GetXaxis().SetRangeUser(0,xRangeMax)
h2.GetYaxis().SetRangeUser(2e-9,2e3)
if ispp:
h2.GetYaxis().SetRangeUser(2e-11,20)
h2.GetYaxis().SetLabelFont(43)
h2.GetYaxis().SetLabelSize(20)
if h3:
h3.SetLineStyle(1)
h3.SetLineColor(2)
h3.SetLineWidth(2)
h3.Scale(1./nEvents, scalingOptions)
c = ROOT.TCanvas("c","c: pT",800,850)
c.cd()
pad1 = ROOT.TPad("pad1", "pad1", 0, 0.3, 1, 1.0)
pad1.SetBottomMargin(0)
pad1.SetLeftMargin(0.15)
pad1.SetRightMargin(0.05)
pad1.SetTopMargin(0.05)
pad1.SetLogy()
pad1.Draw()
pad1.cd()
if h3:
h2.Draw("hist E")
h3.Draw("hist E same")
h.Draw("hist E same")
else:
h2.Draw("hist E")
h.Draw("hist E same")
c.cd()
pad2 = ROOT.TPad("pad2", "pad2", 0, 0.05, 1, 0.3)
pad2.SetTopMargin(0)
pad2.SetBottomMargin(0.35)
pad2.SetLeftMargin(0.15)
pad2.SetRightMargin(0.05)
pad2.Draw()
pad2.cd()
hRatio = h2.Clone()
hRatio.Divide(h)
hRatio.SetMarkerStyle(21)
hRatio.SetMarkerColor(1)
if h3:
hRatio2 = h3.Clone()
hRatio2.Divide(h)
hRatio2.SetMarkerStyle(21)
hRatio2.SetMarkerColor(2)
hRatio2.GetYaxis().SetTitle(ratioYAxisTitle)
hRatio2.GetYaxis().SetTitleSize(20)
hRatio2.GetYaxis().SetTitleFont(43)
hRatio2.GetYaxis().SetTitleOffset(2.2)
hRatio2.GetYaxis().SetLabelFont(43)
hRatio2.GetYaxis().SetLabelSize(20)
hRatio2.GetYaxis().SetNdivisions(505)
hRatio2.GetYaxis().SetRangeUser(0,yRatioMax)
hRatio2.GetXaxis().SetRangeUser(0,xRangeMax)
hRatio2.GetXaxis().SetTitleSize(30)
hRatio2.GetXaxis().SetTitleFont(43)
hRatio2.GetXaxis().SetTitleOffset(4.)
hRatio2.GetXaxis().SetLabelFont(43)
hRatio2.GetXaxis().SetLabelSize(20)
hRatio2.Draw("P E")
hRatio.Draw("P E same")
else:
hRatio.GetYaxis().SetTitle(ratioYAxisTitle)
hRatio.GetYaxis().SetTitleSize(20)
hRatio.GetYaxis().SetTitleFont(43)
hRatio.GetYaxis().SetTitleOffset(2.2)
hRatio.GetYaxis().SetLabelFont(43)
hRatio.GetYaxis().SetLabelSize(20)
hRatio.GetYaxis().SetNdivisions(505)
hRatio.GetYaxis().SetRangeUser(0,yRatioMax)
hRatio.GetXaxis().SetRangeUser(0,xRangeMax)
hRatio.GetXaxis().SetTitleSize(30)
hRatio.GetXaxis().SetTitleFont(43)
hRatio.GetXaxis().SetTitleOffset(4.)
hRatio.GetXaxis().SetLabelFont(43)
hRatio.GetXaxis().SetLabelSize(20)
hRatio.Draw("P E")
pad1.cd()
leg = ROOT.TLegend(0.3,0.7,0.88,0.93,legendTitle)
leg.SetFillColor(10)
leg.SetBorderSize(0)
leg.SetFillStyle(0)
leg.SetTextSize(0.04)
leg.AddEntry(h2, h2legendLabel, "l")
if h3:
leg.AddEntry(h3, h3legendLabel, "l")
leg.AddEntry(h, h1legendLabel, "l")
leg.Draw("same")
c.SaveAs(outputFilename)
c.Close()
########################################################################################################
# Plot spectra (and ratio, if reference file suppled) ################################################
########################################################################################################
def plotNEFSpectra(h, h2, h3, nEvents, ispp, xRangeMax, yAxisTitle, h1legendLabel, outputFilename, scalingOptions = "", h2legendLabel = "", h3legendLabel = ""):
h.SetLineColor(1)
h.SetLineWidth(2)
h.SetLineStyle(1)
h.Scale(1./nEvents, scalingOptions)
if ispp:
h.GetYaxis().SetRangeUser(0.0000005, 0.05)
h.GetYaxis().SetTitle(yAxisTitle)
h.GetYaxis().SetTitleSize(0.06)
h.GetXaxis().SetRangeUser(0,xRangeMax)
h.GetYaxis().SetLabelFont(43)
h.GetYaxis().SetLabelSize(20)
if h2:
h2.SetLineColor(2)
h2.SetLineWidth(2)
h2.SetLineStyle(1)
h2.Scale(1./nEvents, scalingOptions)
h2.GetYaxis().SetTitle(yAxisTitle)
h2.GetYaxis().SetTitleSize(0.06)
h2.GetXaxis().SetRangeUser(0,xRangeMax)
#h2.GetYaxis().SetRangeUser(2e-9,2e3)
if ispp:
h2.GetYaxis().SetRangeUser(5e-7,0.05)
h2.GetYaxis().SetLabelFont(43)
h2.GetYaxis().SetLabelSize(20)
h2.GetXaxis().SetTitleOffset(1.4)
c = ROOT.TCanvas("c","c: hist",600,450)
c.cd().SetLogy()
ROOT.gPad.SetLeftMargin(0.16)
ROOT.gPad.SetRightMargin(0.05)
ROOT.gPad.SetBottomMargin(0.14)
ROOT.gPad.SetTopMargin(0.05)
if h3:
h2.Draw("hist E")
h3.Draw("hist E same")
h.Draw("hist E same")
else:
h2.Draw("hist E")
h.Draw("hist E same")
leg = ROOT.TLegend(0.3,0.7,0.88,0.93)
leg.SetFillColor(10)
leg.SetBorderSize(0)
leg.SetFillStyle(0)
leg.SetTextSize(0.04)
leg.AddEntry(h, h1legendLabel, "l")
if h3:
leg.AddEntry(h3, h3legendLabel, "l")
leg.AddEntry(h2, h2legendLabel, "l")
leg.Draw("same")
c.SaveAs(outputFilename)
c.Close()
#########################################################################################
# Function to iterate recursively through an object to set Sumw2 on all TH1/TH2/THnSparse
#########################################################################################
def SetSumw2(obj):
if obj.InheritsFrom(ROOT.TProfile.Class()):
pass
#print("Sumw2 not called for TProfile %s" % obj.GetName())
elif obj.InheritsFrom(ROOT.TH2.Class()):
obj.Sumw2()
#print("Sumw2 called on TH2 %s" % obj.GetName())
elif obj.InheritsFrom(ROOT.TH1.Class()):
obj.Sumw2()
#print("Sumw2 called on TH1 %s" % obj.GetName())
elif obj.InheritsFrom(ROOT.THnSparse.Class()):
obj.Sumw2()
#print("Sumw2 called on THnSparse %s" % obj.GetName())
else:
#print("Not a histogram!")
#print obj.GetName()
for subobj in obj:
SetSumw2(subobj)
#---------------------------------------------------------------------------------------------------
if __name__ == '__main__':
# Define arguments
parser = argparse.ArgumentParser(description="Compare histograms to test the new EMCal corrections framework")
parser.add_argument("-f", "--inputFile", action="store",
type=str, metavar="inputFile",
default="AnalysisResults.root",
help="Path of AnalysisResults.root file")
parser.add_argument("-o", "--outputDir", action="store",
type=str, metavar="outputDir",
default="./outputQA/",
help="Output directory for QA plots to be written to")
parser.add_argument("-r", "--referenceFile", action="store",
type=str, metavar="referenceFile",
default="",
help="Reference root file for the inputFile histos to be compared to (when doing run-by-run QA)")
parser.add_argument("-i", "--imageFormat", action="store",
type=str, metavar="imageFormat",
default=".pdf",
help="Image format to save plots in, e.g. \".pdf\" or \".png\"")
# Parse the arguments
args = parser.parse_args()
print("Configuring...")
print("inputFile: \"{0}\"".format(args.inputFile))
print("ouputDir: \"{0}\"".format(args.outputDir))
print("referenceFile: \"{0}\"".format(args.referenceFile))
print("imageFormat: \"{0}\"".format(args.imageFormat))
# If invalid inputFile is given, exit
if not os.path.exists(args.inputFile):
print("File \"{0}\" does not exist! Exiting!".format(args.inputFile))
sys.exit(0)
plotPWGJEQA(inputFile = args.inputFile, outputDir = args.outputDir, referenceFile = args.referenceFile, fileFormat = args.imageFormat)
| bsd-3-clause |
eduNEXT/edx-platform | openedx/core/djangoapps/credit/migrations/0001_initial.py | 4 | 12583 | import django.core.validators
import django.db.models.deletion
import django.utils.timezone
import jsonfield.fields
import model_utils.fields
from django.conf import settings
from django.db import migrations, models
from opaque_keys.edx.django.models import CourseKeyField
import openedx.core.djangoapps.credit.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CreditCourse',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('course_key', CourseKeyField(unique=True, max_length=255, db_index=True)),
('enabled', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='CreditEligibility',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('username', models.CharField(max_length=255, db_index=True)),
('deadline', models.DateTimeField(default=openedx.core.djangoapps.credit.models.default_deadline_for_credit_eligibility, help_text='Deadline for purchasing and requesting credit.')),
('course', models.ForeignKey(related_name='eligibilities', to='credit.CreditCourse', on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'Credit eligibilities',
},
),
migrations.CreateModel(
name='CreditProvider',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('provider_id', models.CharField(help_text='Unique identifier for this credit provider. Only alphanumeric characters and hyphens (-) are allowed. The identifier is case-sensitive.', unique=True, max_length=255, validators=[django.core.validators.RegexValidator(regex='[a-z,A-Z,0-9,\\-]+', message='Only alphanumeric characters and hyphens (-) are allowed', code='invalid_provider_id')])),
('active', models.BooleanField(default=True, help_text='Whether the credit provider is currently enabled.')),
('display_name', models.CharField(help_text='Name of the credit provider displayed to users', max_length=255)),
('enable_integration', models.BooleanField(default=False, help_text='When true, automatically notify the credit provider when a user requests credit. In order for this to work, a shared secret key MUST be configured for the credit provider in secure auth settings.')),
('provider_url', models.URLField(default='', help_text='URL of the credit provider. If automatic integration is enabled, this will the the end-point that we POST to to notify the provider of a credit request. Otherwise, the user will be shown a link to this URL, so the user can request credit from the provider directly.')),
('provider_status_url', models.URLField(default='', help_text='URL from the credit provider where the user can check the status of his or her request for credit. This is displayed to students *after* they have requested credit.')),
('provider_description', models.TextField(default='', help_text='Description for the credit provider displayed to users.')),
('fulfillment_instructions', models.TextField(help_text='Plain text or html content for displaying further steps on receipt page *after* paying for the credit to get credit for a credit course against a credit provider.', null=True, blank=True)),
('eligibility_email_message', models.TextField(default='', help_text='Plain text or html content for displaying custom message inside credit eligibility email content which is sent when user has met all credit eligibility requirements.')),
('receipt_email_message', models.TextField(default='', help_text='Plain text or html content for displaying custom message inside credit receipt email content which is sent *after* paying to get credit for a credit course.')),
('thumbnail_url', models.URLField(default='', help_text='Thumbnail image url of the credit provider.', max_length=255)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CreditRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('uuid', models.CharField(unique=True, max_length=32, db_index=True)),
('username', models.CharField(max_length=255, db_index=True)),
('parameters', jsonfield.fields.JSONField()),
('status', models.CharField(default='pending', max_length=255, choices=[('pending', 'Pending'), ('approved', 'Approved'), ('rejected', 'Rejected')])),
('course', models.ForeignKey(related_name='credit_requests', to='credit.CreditCourse', on_delete=models.CASCADE)),
('provider', models.ForeignKey(related_name='credit_requests', to='credit.CreditProvider', on_delete=models.CASCADE)),
],
options={
'get_latest_by': 'created',
},
),
migrations.CreateModel(
name='CreditRequirement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('namespace', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('display_name', models.CharField(default='', max_length=255)),
('order', models.PositiveIntegerField(default=0)),
('criteria', jsonfield.fields.JSONField()),
('active', models.BooleanField(default=True)),
('course', models.ForeignKey(related_name='credit_requirements', to='credit.CreditCourse', on_delete=models.CASCADE)),
],
options={
'ordering': ['order'],
},
),
migrations.CreateModel(
name='CreditRequirementStatus',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('username', models.CharField(max_length=255, db_index=True)),
('status', models.CharField(max_length=32, choices=[('satisfied', 'satisfied'), ('failed', 'failed'), ('declined', 'declined')])),
('reason', jsonfield.fields.JSONField(default={})),
('requirement', models.ForeignKey(related_name='statuses', to='credit.CreditRequirement', on_delete=models.CASCADE)),
],
),
migrations.CreateModel(
name='HistoricalCreditRequest',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('uuid', models.CharField(max_length=32, db_index=True)),
('username', models.CharField(max_length=255, db_index=True)),
('parameters', jsonfield.fields.JSONField()),
('status', models.CharField(default='pending', max_length=255, choices=[('pending', 'Pending'), ('approved', 'Approved'), ('rejected', 'Rejected')])),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('course', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='credit.CreditCourse', null=True)),
('history_user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
('provider', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='credit.CreditProvider', null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical credit request',
},
),
migrations.CreateModel(
name='HistoricalCreditRequirementStatus',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('username', models.CharField(max_length=255, db_index=True)),
('status', models.CharField(max_length=32, choices=[('satisfied', 'satisfied'), ('failed', 'failed'), ('declined', 'declined')])),
('reason', jsonfield.fields.JSONField(default={})),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('history_user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
('requirement', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='credit.CreditRequirement', null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical credit requirement status',
},
),
migrations.AlterUniqueTogether(
name='creditrequirementstatus',
unique_together={('username', 'requirement')},
),
migrations.AlterUniqueTogether(
name='creditrequirement',
unique_together={('namespace', 'name', 'course')},
),
migrations.AlterUniqueTogether(
name='creditrequest',
unique_together={('username', 'course', 'provider')},
),
migrations.AlterUniqueTogether(
name='crediteligibility',
unique_together={('username', 'course')},
),
]
| agpl-3.0 |
hyperized/ansible | lib/ansible/plugins/inventory/constructed.py | 42 | 5477 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: constructed
plugin_type: inventory
version_added: "2.4"
short_description: Uses Jinja2 to construct vars and groups based on existing inventory.
description:
- Uses a YAML configuration file with a valid YAML or C(.config) extension to define var expressions and group conditionals
- The Jinja2 conditionals that qualify a host for membership.
- The Jinja2 expressions are calculated and assigned to the variables
- Only variables already available from previous inventories or the fact cache can be used for templating.
- When I(strict) is False, failed expressions will be ignored (assumes vars were missing).
options:
plugin:
description: token that ensures this is a source file for the 'constructed' plugin.
required: True
choices: ['constructed']
extends_documentation_fragment:
- constructed
'''
EXAMPLES = r'''
# inventory.config file in YAML format
plugin: constructed
strict: False
compose:
var_sum: var1 + var2
# this variable will only be set if I have a persistent fact cache enabled (and have non expired facts)
# `strict: False` will skip this instead of producing an error if it is missing facts.
server_type: "ansible_hostname | regex_replace ('(.{6})(.{2}).*', '\\2')"
groups:
# simple name matching
webservers: inventory_hostname.startswith('web')
# using ec2 'tags' (assumes aws inventory)
development: "'devel' in (ec2_tags|list)"
# using other host properties populated in inventory
private_only: not (public_dns_name is defined or ip_address is defined)
# complex group membership
multi_group: (group_names|intersection(['alpha', 'beta', 'omega']))|length >= 2
keyed_groups:
# this creates a group per distro (distro_CentOS, distro_Debian) and assigns the hosts that have matching values to it,
# using the default separator "_"
- prefix: distro
key: ansible_distribution
# the following examples assume the first inventory is from contrib/inventory/ec2.py
# this creates a group per ec2 architecture and assign hosts to the matching ones (arch_x86_64, arch_sparc, etc)
- prefix: arch
key: ec2_architecture
# this creates a group per ec2 region like "us_west_1"
- prefix: ""
separator: ""
key: ec2_region
# this creates a common parent group for all ec2 availability zones
- key: ec2_placement
parent_group: all_ec2_zones
'''
import os
from ansible import constants as C
from ansible.errors import AnsibleParserError
from ansible.inventory.helpers import get_group_vars
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible.module_utils._text import to_native
from ansible.utils.vars import combine_vars
from ansible.vars.fact_cache import FactCache
class InventoryModule(BaseInventoryPlugin, Constructable):
""" constructs groups and vars using Jinja2 template expressions """
NAME = 'constructed'
def __init__(self):
super(InventoryModule, self).__init__()
self._cache = FactCache()
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
file_name, ext = os.path.splitext(path)
if not ext or ext in ['.config'] + C.YAML_FILENAME_EXTENSIONS:
valid = True
return valid
def parse(self, inventory, loader, path, cache=False):
''' parses the inventory file '''
super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
self._read_config_data(path)
strict = self.get_option('strict')
fact_cache = FactCache()
try:
# Go over hosts (less var copies)
for host in inventory.hosts:
# get available variables to templar
hostvars = combine_vars(get_group_vars(inventory.hosts[host].get_groups()), inventory.hosts[host].get_vars())
if host in fact_cache: # adds facts if cache is active
hostvars = combine_vars(hostvars, fact_cache[host])
# create composite vars
self._set_composite_vars(self.get_option('compose'), hostvars, host, strict=strict)
# refetch host vars in case new ones have been created above
hostvars = combine_vars(get_group_vars(inventory.hosts[host].get_groups()), inventory.hosts[host].get_vars())
if host in self._cache: # adds facts if cache is active
hostvars = combine_vars(hostvars, self._cache[host])
# constructed groups based on conditionals
self._add_host_to_composed_groups(self.get_option('groups'), hostvars, host, strict=strict)
# constructed groups based variable values
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars, host, strict=strict)
except Exception as e:
raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
| gpl-3.0 |
xsm110/Apache-Beam | sdks/python/apache_beam/io/gcp/datastore/v1/datastoreio_test.py | 6 | 10278 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from mock import MagicMock, call, patch
from apache_beam.io.gcp.datastore.v1 import fake_datastore
from apache_beam.io.gcp.datastore.v1 import helper
from apache_beam.io.gcp.datastore.v1 import query_splitter
from apache_beam.io.gcp.datastore.v1.datastoreio import _Mutate
from apache_beam.io.gcp.datastore.v1.datastoreio import ReadFromDatastore
from apache_beam.io.gcp.datastore.v1.datastoreio import WriteToDatastore
# Protect against environments where datastore library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from google.cloud.proto.datastore.v1 import datastore_pb2
from google.cloud.proto.datastore.v1 import query_pb2
from google.protobuf import timestamp_pb2
from googledatastore import helper as datastore_helper
except ImportError:
datastore_pb2 = None
# pylint: enable=wrong-import-order, wrong-import-position
@unittest.skipIf(datastore_pb2 is None, 'GCP dependencies are not installed')
class DatastoreioTest(unittest.TestCase):
_PROJECT = 'project'
_KIND = 'kind'
_NAMESPACE = 'namespace'
def setUp(self):
self._mock_datastore = MagicMock()
self._query = query_pb2.Query()
self._query.kind.add().name = self._KIND
def test_get_estimated_size_bytes_without_namespace(self):
entity_bytes = 100
timestamp = timestamp_pb2.Timestamp(seconds=1234)
self.check_estimated_size_bytes(entity_bytes, timestamp)
def test_get_estimated_size_bytes_with_namespace(self):
entity_bytes = 100
timestamp = timestamp_pb2.Timestamp(seconds=1234)
self.check_estimated_size_bytes(entity_bytes, timestamp, self._NAMESPACE)
def test_SplitQueryFn_with_num_splits(self):
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
num_splits = 23
def fake_get_splits(datastore, query, num_splits, partition=None):
return self.split_query(query, num_splits)
with patch.object(query_splitter, 'get_splits',
side_effect=fake_get_splits):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), num_splits)
self.assertEqual(0, len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_SplitQueryFn_without_num_splits(self):
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
# Force SplitQueryFn to compute the number of query splits
num_splits = 0
expected_num_splits = 23
entity_bytes = (expected_num_splits *
ReadFromDatastore._DEFAULT_BUNDLE_SIZE_BYTES)
with patch.object(ReadFromDatastore, 'get_estimated_size_bytes',
return_value=entity_bytes):
def fake_get_splits(datastore, query, num_splits, partition=None):
return self.split_query(query, num_splits)
with patch.object(query_splitter, 'get_splits',
side_effect=fake_get_splits):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), expected_num_splits)
self.assertEqual(0,
len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_SplitQueryFn_with_query_limit(self):
"""A test that verifies no split is performed when the query has a limit."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
self._query.limit.value = 3
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, 4)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(1, len(returned_split_queries))
self.assertEqual(0, len(self._mock_datastore.method_calls))
def test_SplitQueryFn_with_exception(self):
"""A test that verifies that no split is performed when failures occur."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
# Force SplitQueryFn to compute the number of query splits
num_splits = 0
expected_num_splits = 1
entity_bytes = (expected_num_splits *
ReadFromDatastore._DEFAULT_BUNDLE_SIZE_BYTES)
with patch.object(ReadFromDatastore, 'get_estimated_size_bytes',
return_value=entity_bytes):
with patch.object(query_splitter, 'get_splits',
side_effect=ValueError("Testing query split error")):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), expected_num_splits)
self.assertEqual(returned_split_queries[0][1], self._query)
self.assertEqual(0,
len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_DatastoreWriteFn_with_emtpy_batch(self):
self.check_DatastoreWriteFn(0)
def test_DatastoreWriteFn_with_one_batch(self):
num_entities_to_write = _Mutate._WRITE_BATCH_SIZE * 1 - 50
self.check_DatastoreWriteFn(num_entities_to_write)
def test_DatastoreWriteFn_with_multiple_batches(self):
num_entities_to_write = _Mutate._WRITE_BATCH_SIZE * 3 + 50
self.check_DatastoreWriteFn(num_entities_to_write)
def test_DatastoreWriteFn_with_batch_size_exact_multiple(self):
num_entities_to_write = _Mutate._WRITE_BATCH_SIZE * 2
self.check_DatastoreWriteFn(num_entities_to_write)
def check_DatastoreWriteFn(self, num_entities):
"""A helper function to test DatastoreWriteFn."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
entities = [e.entity for e in
fake_datastore.create_entities(num_entities)]
expected_mutations = map(WriteToDatastore.to_upsert_mutation, entities)
actual_mutations = []
self._mock_datastore.commit.side_effect = (
fake_datastore.create_commit(actual_mutations))
datastore_write_fn = _Mutate.DatastoreWriteFn(self._PROJECT)
datastore_write_fn.start_bundle()
for mutation in expected_mutations:
datastore_write_fn.process(mutation)
datastore_write_fn.finish_bundle()
self.assertEqual(actual_mutations, expected_mutations)
self.assertEqual((num_entities - 1) / _Mutate._WRITE_BATCH_SIZE + 1,
self._mock_datastore.commit.call_count)
def verify_unique_keys(self, queries):
"""A helper function that verifies if all the queries have unique keys."""
keys, _ = zip(*queries)
keys = set(keys)
self.assertEqual(len(keys), len(queries))
def check_estimated_size_bytes(self, entity_bytes, timestamp, namespace=None):
"""A helper method to test get_estimated_size_bytes"""
timestamp_req = helper.make_request(
self._PROJECT, namespace, helper.make_latest_timestamp_query(namespace))
timestamp_resp = self.make_stats_response(
{'timestamp': datastore_helper.from_timestamp(timestamp)})
kind_stat_req = helper.make_request(
self._PROJECT, namespace, helper.make_kind_stats_query(
namespace, self._query.kind[0].name,
datastore_helper.micros_from_timestamp(timestamp)))
kind_stat_resp = self.make_stats_response(
{'entity_bytes': entity_bytes})
def fake_run_query(req):
if req == timestamp_req:
return timestamp_resp
elif req == kind_stat_req:
return kind_stat_resp
else:
print kind_stat_req
raise ValueError("Unknown req: %s" % req)
self._mock_datastore.run_query.side_effect = fake_run_query
self.assertEqual(entity_bytes, ReadFromDatastore.get_estimated_size_bytes(
self._PROJECT, namespace, self._query, self._mock_datastore))
self.assertEqual(self._mock_datastore.run_query.call_args_list,
[call(timestamp_req), call(kind_stat_req)])
def make_stats_response(self, property_map):
resp = datastore_pb2.RunQueryResponse()
entity_result = resp.batch.entity_results.add()
datastore_helper.add_properties(entity_result.entity, property_map)
return resp
def split_query(self, query, num_splits):
"""Generate dummy query splits."""
split_queries = []
for _ in range(0, num_splits):
q = query_pb2.Query()
q.CopyFrom(query)
split_queries.append(q)
return split_queries
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
baberthal/CouchPotatoServer | libs/suds/transport/__init__.py | 209 | 3895 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Contains transport interface (classes).
"""
class TransportError(Exception):
def __init__(self, reason, httpcode, fp=None):
Exception.__init__(self, reason)
self.httpcode = httpcode
self.fp = fp
class Request:
"""
A transport request
@ivar url: The url for the request.
@type url: str
@ivar message: The message to be sent in a POST request.
@type message: str
@ivar headers: The http headers to be used for the request.
@type headers: dict
"""
def __init__(self, url, message=None):
"""
@param url: The url for the request.
@type url: str
@param message: The (optional) message to be send in the request.
@type message: str
"""
self.url = url
self.headers = {}
self.message = message
def __str__(self):
s = []
s.append('URL:%s' % self.url)
s.append('HEADERS: %s' % self.headers)
s.append('MESSAGE:')
s.append(self.message)
return '\n'.join(s)
class Reply:
"""
A transport reply
@ivar code: The http code returned.
@type code: int
@ivar message: The message to be sent in a POST request.
@type message: str
@ivar headers: The http headers to be used for the request.
@type headers: dict
"""
def __init__(self, code, headers, message):
"""
@param code: The http code returned.
@type code: int
@param headers: The http returned headers.
@type headers: dict
@param message: The (optional) reply message received.
@type message: str
"""
self.code = code
self.headers = headers
self.message = message
def __str__(self):
s = []
s.append('CODE: %s' % self.code)
s.append('HEADERS: %s' % self.headers)
s.append('MESSAGE:')
s.append(self.message)
return '\n'.join(s)
class Transport:
"""
The transport I{interface}.
"""
def __init__(self):
"""
Constructor.
"""
from suds.transport.options import Options
self.options = Options()
del Options
def open(self, request):
"""
Open the url in the specified request.
@param request: A transport request.
@type request: L{Request}
@return: An input stream.
@rtype: stream
@raise TransportError: On all transport errors.
"""
raise Exception('not-implemented')
def send(self, request):
"""
Send soap message. Implementations are expected to handle:
- proxies
- I{http} headers
- cookies
- sending message
- brokering exceptions into L{TransportError}
@param request: A transport request.
@type request: L{Request}
@return: The reply
@rtype: L{Reply}
@raise TransportError: On all transport errors.
"""
raise Exception('not-implemented')
| gpl-3.0 |
maheshcn/memory-usage-from-ldfile | openpyxl/xml/tests/test_tags.py | 1 | 1093 | # Copyright (c) 2010-2015 openpyxl
from io import BytesIO
import pytest
from openpyxl.xml.functions import start_tag, end_tag, tag, XMLGenerator
@pytest.fixture
def doc():
return BytesIO()
@pytest.fixture
def root(doc):
return XMLGenerator(doc)
class TestSimpleTag:
def test_start_tag(self, doc, root):
start_tag(root, "start")
assert doc.getvalue() == b"<start>"
def test_end_tag(self, doc, root):
""""""
end_tag(root, "blah")
assert doc.getvalue() == b"</blah>"
class TestTagBody:
def test_start_tag(self, doc, root):
start_tag(root, "start", body="just words")
assert doc.getvalue() == b"<start>just words"
def test_end_tag(self, doc, root):
end_tag(root, "end")
assert doc.getvalue() == b"</end>"
def test_start_tag_attrs(doc, root):
start_tag(root, "start", {'width':"10"})
assert doc.getvalue() == b"""<start width="10">"""
def test_tag(doc, root):
tag(root, "start", {'height':"10"}, "words")
assert doc.getvalue() == b"""<start height="10">words</start>"""
| gpl-2.0 |
stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/GL/EXT/shared_texture_palette.py | 9 | 1264 | '''OpenGL extension EXT.shared_texture_palette
This module customises the behaviour of the
OpenGL.raw.GL.EXT.shared_texture_palette to provide a more
Python-friendly API
Overview (from the spec)
EXT_shared_texture_palette defines a shared texture palette which may be
used in place of the texture object palettes provided by
EXT_paletted_texture. This is useful for rapidly changing a palette
common to many textures, rather than having to reload the new palette
for each texture. The extension acts as a switch, causing all lookups
that would normally be done on the texture's palette to instead use the
shared palette.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/shared_texture_palette.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.shared_texture_palette import *
from OpenGL.raw.GL.EXT.shared_texture_palette import _EXTENSION_NAME
def glInitSharedTexturePaletteEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | lgpl-3.0 |
michellemorales/OpenMM | models/inception/inception/data/process_bounding_boxes.py | 19 | 8931 | #!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Process the ImageNet Challenge bounding boxes for TensorFlow model training.
This script is called as
process_bounding_boxes.py <dir> [synsets-file]
Where <dir> is a directory containing the downloaded and unpacked bounding box
data. If [synsets-file] is supplied, then only the bounding boxes whose
synstes are contained within this file are returned. Note that the
[synsets-file] file contains synset ids, one per line.
The script dumps out a CSV text file in which each line contains an entry.
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
The entry can be read as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
The bounding box for <JPEG file name> contains two points (xmin, ymin) and
(xmax, ymax) specifying the lower-left corner and upper-right corner of a
bounding box in *relative* coordinates.
The user supplies a directory where the XML files reside. The directory
structure in the directory <dir> is assumed to look like this:
<dir>/nXXXXXXXX/nXXXXXXXX_YYYY.xml
Each XML file contains a bounding box annotation. The script:
(1) Parses the XML file and extracts the filename, label and bounding box info.
(2) The bounding box is specified in the XML files as integer (xmin, ymin) and
(xmax, ymax) *relative* to image size displayed to the human annotator. The
size of the image displayed to the human annotator is stored in the XML file
as integer (height, width).
Note that the displayed size will differ from the actual size of the image
downloaded from image-net.org. To make the bounding box annotation useable,
we convert bounding box to floating point numbers relative to displayed
height and width of the image.
Note that each XML file might contain N bounding box annotations.
Note that the points are all clamped at a range of [0.0, 1.0] because some
human annotations extend outside the range of the supplied image.
See details here: http://image-net.org/download-bboxes
(3) By default, the script outputs all valid bounding boxes. If a
[synsets-file] is supplied, only the subset of bounding boxes associated
with those synsets are outputted. Importantly, one can supply a list of
synsets in the ImageNet Challenge and output the list of bounding boxes
associated with the training images of the ILSVRC.
We use these bounding boxes to inform the random distortion of images
supplied to the network.
If you run this script successfully, you will see the following output
to stderr:
> Finished processing 544546 XML files.
> Skipped 0 XML files not in ImageNet Challenge.
> Skipped 0 bounding boxes not in ImageNet Challenge.
> Wrote 615299 bounding boxes from 544546 annotated images.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os.path
import sys
import xml.etree.ElementTree as ET
class BoundingBox(object):
pass
def GetItem(name, root, index=0):
count = 0
for item in root.iter(name):
if count == index:
return item.text
count += 1
# Failed to find "index" occurrence of item.
return -1
def GetInt(name, root, index=0):
# In some XML annotation files, the point values are not integers, but floats.
# So we add a float function to avoid ValueError.
return int(float(GetItem(name, root, index)))
def FindNumberBoundingBoxes(root):
index = 0
while True:
if GetInt('xmin', root, index) == -1:
break
index += 1
return index
def ProcessXMLAnnotation(xml_file):
"""Process a single XML file containing a bounding box."""
# pylint: disable=broad-except
try:
tree = ET.parse(xml_file)
except Exception:
print('Failed to parse: ' + xml_file, file=sys.stderr)
return None
# pylint: enable=broad-except
root = tree.getroot()
num_boxes = FindNumberBoundingBoxes(root)
boxes = []
for index in range(num_boxes):
box = BoundingBox()
# Grab the 'index' annotation.
box.xmin = GetInt('xmin', root, index)
box.ymin = GetInt('ymin', root, index)
box.xmax = GetInt('xmax', root, index)
box.ymax = GetInt('ymax', root, index)
box.width = GetInt('width', root)
box.height = GetInt('height', root)
box.filename = GetItem('filename', root) + '.JPEG'
box.label = GetItem('name', root)
xmin = float(box.xmin) / float(box.width)
xmax = float(box.xmax) / float(box.width)
ymin = float(box.ymin) / float(box.height)
ymax = float(box.ymax) / float(box.height)
# Some images contain bounding box annotations that
# extend outside of the supplied image. See, e.g.
# n03127925/n03127925_147.xml
# Additionally, for some bounding boxes, the min > max
# or the box is entirely outside of the image.
min_x = min(xmin, xmax)
max_x = max(xmin, xmax)
box.xmin_scaled = min(max(min_x, 0.0), 1.0)
box.xmax_scaled = min(max(max_x, 0.0), 1.0)
min_y = min(ymin, ymax)
max_y = max(ymin, ymax)
box.ymin_scaled = min(max(min_y, 0.0), 1.0)
box.ymax_scaled = min(max(max_y, 0.0), 1.0)
boxes.append(box)
return boxes
if __name__ == '__main__':
if len(sys.argv) < 2 or len(sys.argv) > 3:
print('Invalid usage\n'
'usage: process_bounding_boxes.py <dir> [synsets-file]',
file=sys.stderr)
sys.exit(-1)
xml_files = glob.glob(sys.argv[1] + '/*/*.xml')
print('Identified %d XML files in %s' % (len(xml_files), sys.argv[1]),
file=sys.stderr)
if len(sys.argv) == 3:
labels = set([l.strip() for l in open(sys.argv[2]).readlines()])
print('Identified %d synset IDs in %s' % (len(labels), sys.argv[2]),
file=sys.stderr)
else:
labels = None
skipped_boxes = 0
skipped_files = 0
saved_boxes = 0
saved_files = 0
for file_index, one_file in enumerate(xml_files):
# Example: <...>/n06470073/n00141669_6790.xml
label = os.path.basename(os.path.dirname(one_file))
# Determine if the annotation is from an ImageNet Challenge label.
if labels is not None and label not in labels:
skipped_files += 1
continue
bboxes = ProcessXMLAnnotation(one_file)
assert bboxes is not None, 'No bounding boxes found in ' + one_file
found_box = False
for bbox in bboxes:
if labels is not None:
if bbox.label != label:
# Note: There is a slight bug in the bounding box annotation data.
# Many of the dog labels have the human label 'Scottish_deerhound'
# instead of the synset ID 'n02092002' in the bbox.label field. As a
# simple hack to overcome this issue, we only exclude bbox labels
# *which are synset ID's* that do not match original synset label for
# the XML file.
if bbox.label in labels:
skipped_boxes += 1
continue
# Guard against improperly specified boxes.
if (bbox.xmin_scaled >= bbox.xmax_scaled or
bbox.ymin_scaled >= bbox.ymax_scaled):
skipped_boxes += 1
continue
# Note bbox.filename occasionally contains '%s' in the name. This is
# data set noise that is fixed by just using the basename of the XML file.
image_filename = os.path.splitext(os.path.basename(one_file))[0]
print('%s.JPEG,%.4f,%.4f,%.4f,%.4f' %
(image_filename,
bbox.xmin_scaled, bbox.ymin_scaled,
bbox.xmax_scaled, bbox.ymax_scaled))
saved_boxes += 1
found_box = True
if found_box:
saved_files += 1
else:
skipped_files += 1
if not file_index % 5000:
print('--> processed %d of %d XML files.' %
(file_index + 1, len(xml_files)),
file=sys.stderr)
print('--> skipped %d boxes and %d XML files.' %
(skipped_boxes, skipped_files), file=sys.stderr)
print('Finished processing %d XML files.' % len(xml_files), file=sys.stderr)
print('Skipped %d XML files not in ImageNet Challenge.' % skipped_files,
file=sys.stderr)
print('Skipped %d bounding boxes not in ImageNet Challenge.' % skipped_boxes,
file=sys.stderr)
print('Wrote %d bounding boxes from %d annotated images.' %
(saved_boxes, saved_files),
file=sys.stderr)
print('Finished.', file=sys.stderr)
| gpl-2.0 |
collects/VTK | Charts/Core/Testing/Python/TestScatterPlotColors.py | 26 | 3217 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import vtk
import vtk.test.Testing
import math
class TestScatterPlotColors(vtk.test.Testing.vtkTest):
def testLinePlot(self):
"Test if colored scatter plots can be built with python"
# Set up a 2D scene, add an XY chart to it
view = vtk.vtkContextView()
view.GetRenderer().SetBackground(1.0, 1.0, 1.0)
view.GetRenderWindow().SetSize(400, 300)
chart = vtk.vtkChartXY()
chart.SetShowLegend(True)
view.GetScene().AddItem(chart)
# Create a table with some points in it
arrX = vtk.vtkFloatArray()
arrX.SetName("XAxis")
arrC = vtk.vtkFloatArray()
arrC.SetName("Cosine")
arrS = vtk.vtkFloatArray()
arrS.SetName("Sine")
arrS2 = vtk.vtkFloatArray()
arrS2.SetName("Tan")
numPoints = 40
inc = 7.5 / (numPoints-1)
for i in range(numPoints):
arrX.InsertNextValue(i * inc)
arrC.InsertNextValue(math.cos(i * inc) + 0.0)
arrS.InsertNextValue(math.sin(i * inc) + 0.0)
arrS2.InsertNextValue(math.tan(i * inc) + 0.5)
table = vtk.vtkTable()
table.AddColumn(arrX)
table.AddColumn(arrC)
table.AddColumn(arrS)
table.AddColumn(arrS2)
# Generate a black-to-red lookup table with fixed alpha
lut = vtk.vtkLookupTable()
lut.SetValueRange(0.2, 1.0)
lut.SetSaturationRange(1, 1)
lut.SetHueRange(0,0)
lut.SetRampToLinear()
lut.SetRange(-1,1)
lut.SetAlpha(0.75)
lut.Build()
# Generate a black-to-blue lookup table with alpha range
lut2 = vtk.vtkLookupTable()
lut2.SetValueRange(0.2, 1.0)
lut2.SetSaturationRange(1, 1)
lut2.SetHueRange(0.6667, 0.6667)
lut2.SetAlphaRange(0.4, 0.8)
lut2.SetRampToLinear()
lut2.SetRange(-1,1)
lut2.Build()
# Add multiple line plots, setting the colors etc
points0 = chart.AddPlot(vtk.vtkChart.POINTS)
points0.SetInputData(table, 0, 1)
points0.SetColor(0, 0, 0, 255)
points0.SetWidth(1.0)
points0.SetMarkerStyle(vtk.vtkPlotPoints.CROSS)
points1 = chart.AddPlot(vtk.vtkChart.POINTS)
points1.SetInputData(table, 0, 2)
points1.SetColor(0, 0, 0, 255)
points1.SetMarkerStyle(vtk.vtkPlotPoints.DIAMOND)
points1.SetScalarVisibility(1)
points1.SetLookupTable(lut)
points1.SelectColorArray(1)
points2 = chart.AddPlot(vtk.vtkChart.POINTS)
points2.SetInputData(table, 0, 3)
points2.SetColor(0, 0, 0, 255)
points2.ScalarVisibilityOn()
points2.SetLookupTable(lut2)
points2.SelectColorArray("Cosine")
points2.SetWidth(4.0)
view.GetRenderWindow().SetMultiSamples(0)
view.GetRenderWindow().Render()
img_file = "TestScatterPlotColors.png"
vtk.test.Testing.compareImage(view.GetRenderWindow(),vtk.test.Testing.getAbsImagePath(img_file),threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestScatterPlotColors, 'test')])
| bsd-3-clause |
liucode/tempest-master | tempest/services/compute/json/migrations_client.py | 8 | 1246 | # Copyright 2014 NEC Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.api_schema.response.compute.v2_1 import migrations as schema
from tempest.common import service_client
class MigrationsClient(service_client.ServiceClient):
def list_migrations(self, **params):
"""Lists all migrations."""
url = 'os-migrations'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_migrations, resp, body)
return service_client.ResponseBody(resp, body)
| apache-2.0 |
punithagk/mpm | profiles/uberdrupal/modules/fckeditor/fckeditor/editor/filemanager/connectors/py/config.py | 126 | 7095 | #!/usr/bin/env python
"""
* FCKeditor - The text editor for Internet - http://www.fckeditor.net
* Copyright (C) 2003-2010 Frederico Caldeira Knabben
*
* == BEGIN LICENSE ==
*
* Licensed under the terms of any of the following licenses at your
* choice:
*
* - GNU General Public License Version 2 or later (the "GPL")
* http://www.gnu.org/licenses/gpl.html
*
* - GNU Lesser General Public License Version 2.1 or later (the "LGPL")
* http://www.gnu.org/licenses/lgpl.html
*
* - Mozilla Public License Version 1.1 or later (the "MPL")
* http://www.mozilla.org/MPL/MPL-1.1.html
*
* == END LICENSE ==
*
* Configuration file for the File Manager Connector for Python
"""
# INSTALLATION NOTE: You must set up your server environment accordingly to run
# python scripts. This connector requires Python 2.4 or greater.
#
# Supported operation modes:
# * WSGI (recommended): You'll need apache + mod_python + modpython_gateway
# or any web server capable of the WSGI python standard
# * Plain Old CGI: Any server capable of running standard python scripts
# (although mod_python is recommended for performance)
# This was the previous connector version operation mode
#
# If you're using Apache web server, replace the htaccess.txt to to .htaccess,
# and set the proper options and paths.
# For WSGI and mod_python, you may need to download modpython_gateway from:
# http://projects.amor.org/misc/svn/modpython_gateway.py and copy it in this
# directory.
# SECURITY: You must explicitly enable this "connector". (Set it to "True").
# WARNING: don't just set "ConfigIsEnabled = True", you must be sure that only
# authenticated users can access this file or use some kind of session checking.
Enabled = False
# Path to user files relative to the document root.
UserFilesPath = '/userfiles/'
# Fill the following value it you prefer to specify the absolute path for the
# user files directory. Useful if you are using a virtual directory, symbolic
# link or alias. Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'UserFilesPath' must point to the same directory.
# WARNING: GetRootPath may not work in virtual or mod_python configurations, and
# may not be thread safe. Use this configuration parameter instead.
UserFilesAbsolutePath = ''
# Due to security issues with Apache modules, it is recommended to leave the
# following setting enabled.
ForceSingleExtension = True
# What the user can do with this connector
ConfigAllowedCommands = [ 'QuickUpload', 'FileUpload', 'GetFolders', 'GetFoldersAndFiles', 'CreateFolder' ]
# Allowed Resource Types
ConfigAllowedTypes = ['File', 'Image', 'Flash', 'Media']
# After file is uploaded, sometimes it is required to change its permissions
# so that it was possible to access it at the later time.
# If possible, it is recommended to set more restrictive permissions, like 0755.
# Set to 0 to disable this feature.
# Note: not needed on Windows-based servers.
ChmodOnUpload = 0755
# See comments above.
# Used when creating folders that does not exist.
ChmodOnFolderCreate = 0755
# Do not touch this 3 lines, see "Configuration settings for each Resource Type"
AllowedExtensions = {}; DeniedExtensions = {};
FileTypesPath = {}; FileTypesAbsolutePath = {};
QuickUploadPath = {}; QuickUploadAbsolutePath = {};
# Configuration settings for each Resource Type
#
# - AllowedExtensions: the possible extensions that can be allowed.
# If it is empty then any file type can be uploaded.
# - DeniedExtensions: The extensions that won't be allowed.
# If it is empty then no restrictions are done here.
#
# For a file to be uploaded it has to fulfill both the AllowedExtensions
# and DeniedExtensions (that's it: not being denied) conditions.
#
# - FileTypesPath: the virtual folder relative to the document root where
# these resources will be located.
# Attention: It must start and end with a slash: '/'
#
# - FileTypesAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'FileTypesPath' must point to the same directory.
# Attention: It must end with a slash: '/'
#
#
# - QuickUploadPath: the virtual folder relative to the document root where
# these resources will be uploaded using the Upload tab in the resources
# dialogs.
# Attention: It must start and end with a slash: '/'
#
# - QuickUploadAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'QuickUploadPath' must point to the same directory.
# Attention: It must end with a slash: '/'
AllowedExtensions['File'] = ['7z','aiff','asf','avi','bmp','csv','doc','fla','flv','gif','gz','gzip','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','ods','odt','pdf','png','ppt','pxd','qt','ram','rar','rm','rmi','rmvb','rtf','sdc','sitd','swf','sxc','sxw','tar','tgz','tif','tiff','txt','vsd','wav','wma','wmv','xls','xml','zip']
DeniedExtensions['File'] = []
FileTypesPath['File'] = UserFilesPath + 'file/'
FileTypesAbsolutePath['File'] = (not UserFilesAbsolutePath == '') and (UserFilesAbsolutePath + 'file/') or ''
QuickUploadPath['File'] = FileTypesPath['File']
QuickUploadAbsolutePath['File'] = FileTypesAbsolutePath['File']
AllowedExtensions['Image'] = ['bmp','gif','jpeg','jpg','png']
DeniedExtensions['Image'] = []
FileTypesPath['Image'] = UserFilesPath + 'image/'
FileTypesAbsolutePath['Image'] = (not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'image/' or ''
QuickUploadPath['Image'] = FileTypesPath['Image']
QuickUploadAbsolutePath['Image']= FileTypesAbsolutePath['Image']
AllowedExtensions['Flash'] = ['swf','flv']
DeniedExtensions['Flash'] = []
FileTypesPath['Flash'] = UserFilesPath + 'flash/'
FileTypesAbsolutePath['Flash'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'flash/' or ''
QuickUploadPath['Flash'] = FileTypesPath['Flash']
QuickUploadAbsolutePath['Flash']= FileTypesAbsolutePath['Flash']
AllowedExtensions['Media'] = ['aiff','asf','avi','bmp','fla', 'flv','gif','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','png','qt','ram','rm','rmi','rmvb','swf','tif','tiff','wav','wma','wmv']
DeniedExtensions['Media'] = []
FileTypesPath['Media'] = UserFilesPath + 'media/'
FileTypesAbsolutePath['Media'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'media/' or ''
QuickUploadPath['Media'] = FileTypesPath['Media']
QuickUploadAbsolutePath['Media']= FileTypesAbsolutePath['Media']
| gpl-2.0 |
carlgao/lenga | images/lenny64-peon/usr/share/python-support/mercurial-common/mercurial/archival.py | 1 | 7571 | # archival.py - revision archival for mercurial
#
# Copyright 2006 Vadim Gelfer <[email protected]>
#
# This software may be used and distributed according to the terms of
# the GNU General Public License, incorporated herein by reference.
from i18n import _
from node import hex
import cStringIO, os, stat, tarfile, time, util, zipfile
import zlib, gzip
def tidyprefix(dest, prefix, suffixes):
'''choose prefix to use for names in archive. make sure prefix is
safe for consumers.'''
if prefix:
prefix = util.normpath(prefix)
else:
if not isinstance(dest, str):
raise ValueError('dest must be string if no prefix')
prefix = os.path.basename(dest)
lower = prefix.lower()
for sfx in suffixes:
if lower.endswith(sfx):
prefix = prefix[:-len(sfx)]
break
lpfx = os.path.normpath(util.localpath(prefix))
prefix = util.pconvert(lpfx)
if not prefix.endswith('/'):
prefix += '/'
if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
raise util.Abort(_('archive prefix contains illegal components'))
return prefix
class tarit:
'''write archive to tar file or stream. can write uncompressed,
or compress with gzip or bzip2.'''
class GzipFileWithTime(gzip.GzipFile):
def __init__(self, *args, **kw):
timestamp = None
if 'timestamp' in kw:
timestamp = kw.pop('timestamp')
if timestamp == None:
self.timestamp = time.time()
else:
self.timestamp = timestamp
gzip.GzipFile.__init__(self, *args, **kw)
def _write_gzip_header(self):
self.fileobj.write('\037\213') # magic header
self.fileobj.write('\010') # compression method
fname = self.filename[:-3]
flags = 0
if fname:
flags = gzip.FNAME
self.fileobj.write(chr(flags))
gzip.write32u(self.fileobj, long(self.timestamp))
self.fileobj.write('\002')
self.fileobj.write('\377')
if fname:
self.fileobj.write(fname + '\000')
def __init__(self, dest, prefix, mtime, kind=''):
self.prefix = tidyprefix(dest, prefix, ['.tar', '.tar.bz2', '.tar.gz',
'.tgz', '.tbz2'])
self.mtime = mtime
def taropen(name, mode, fileobj=None):
if kind == 'gz':
mode = mode[0]
if not fileobj:
fileobj = open(name, mode + 'b')
gzfileobj = self.GzipFileWithTime(name, mode + 'b',
zlib.Z_BEST_COMPRESSION,
fileobj, timestamp=mtime)
return tarfile.TarFile.taropen(name, mode, gzfileobj)
else:
return tarfile.open(name, mode + kind, fileobj)
if isinstance(dest, str):
self.z = taropen(dest, mode='w:')
else:
# Python 2.5-2.5.1 have a regression that requires a name arg
self.z = taropen(name='', mode='w|', fileobj=dest)
def addfile(self, name, mode, islink, data):
i = tarfile.TarInfo(self.prefix + name)
i.mtime = self.mtime
i.size = len(data)
if islink:
i.type = tarfile.SYMTYPE
i.mode = 0777
i.linkname = data
data = None
else:
i.mode = mode
data = cStringIO.StringIO(data)
self.z.addfile(i, data)
def done(self):
self.z.close()
class tellable:
'''provide tell method for zipfile.ZipFile when writing to http
response file object.'''
def __init__(self, fp):
self.fp = fp
self.offset = 0
def __getattr__(self, key):
return getattr(self.fp, key)
def write(self, s):
self.fp.write(s)
self.offset += len(s)
def tell(self):
return self.offset
class zipit:
'''write archive to zip file or stream. can write uncompressed,
or compressed with deflate.'''
def __init__(self, dest, prefix, mtime, compress=True):
self.prefix = tidyprefix(dest, prefix, ('.zip',))
if not isinstance(dest, str):
try:
dest.tell()
except (AttributeError, IOError):
dest = tellable(dest)
self.z = zipfile.ZipFile(dest, 'w',
compress and zipfile.ZIP_DEFLATED or
zipfile.ZIP_STORED)
self.date_time = time.gmtime(mtime)[:6]
def addfile(self, name, mode, islink, data):
i = zipfile.ZipInfo(self.prefix + name, self.date_time)
i.compress_type = self.z.compression
# unzip will not honor unix file modes unless file creator is
# set to unix (id 3).
i.create_system = 3
ftype = stat.S_IFREG
if islink:
mode = 0777
ftype = stat.S_IFLNK
i.external_attr = (mode | ftype) << 16L
self.z.writestr(i, data)
def done(self):
self.z.close()
class fileit:
'''write archive as files in directory.'''
def __init__(self, name, prefix, mtime):
if prefix:
raise util.Abort(_('cannot give prefix when archiving to files'))
self.basedir = name
self.opener = util.opener(self.basedir)
def addfile(self, name, mode, islink, data):
if islink:
self.opener.symlink(data, name)
return
f = self.opener(name, "w", atomictemp=True)
f.write(data)
f.rename()
destfile = os.path.join(self.basedir, name)
os.chmod(destfile, mode)
def done(self):
pass
archivers = {
'files': fileit,
'tar': tarit,
'tbz2': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'bz2'),
'tgz': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'gz'),
'uzip': lambda name, prefix, mtime: zipit(name, prefix, mtime, False),
'zip': zipit,
}
def archive(repo, dest, node, kind, decode=True, matchfn=None,
prefix=None, mtime=None):
'''create archive of repo as it was at node.
dest can be name of directory, name of archive file, or file
object to write archive to.
kind is type of archive to create.
decode tells whether to put files through decode filters from
hgrc.
matchfn is function to filter names of files to write to archive.
prefix is name of path to put before every archive member.'''
def write(name, mode, islink, getdata):
if matchfn and not matchfn(name): return
data = getdata()
if decode:
data = repo.wwritedata(name, data)
archiver.addfile(name, mode, islink, data)
ctx = repo.changectx(node)
if kind not in archivers:
raise util.Abort(_("unknown archive type '%s'" % kind))
archiver = archivers[kind](dest, prefix, mtime or ctx.date()[0])
m = ctx.manifest()
items = m.items()
items.sort()
if repo.ui.configbool("ui", "archivemeta", True):
write('.hg_archival.txt', 0644, False,
lambda: 'repo: %s\nnode: %s\n' % (
hex(repo.changelog.node(0)), hex(node)))
for filename, filenode in items:
write(filename, m.execf(filename) and 0755 or 0644, m.linkf(filename),
lambda: repo.file(filename).read(filenode))
archiver.done()
| mit |
Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/encodings/mac_centeuro.py | 257 | 14102 | """ Python Character Mapping Codec mac_centeuro generated from 'MAPPINGS/VENDORS/APPLE/CENTEURO.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-centeuro',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> CONTROL CHARACTER
'\x01' # 0x01 -> CONTROL CHARACTER
'\x02' # 0x02 -> CONTROL CHARACTER
'\x03' # 0x03 -> CONTROL CHARACTER
'\x04' # 0x04 -> CONTROL CHARACTER
'\x05' # 0x05 -> CONTROL CHARACTER
'\x06' # 0x06 -> CONTROL CHARACTER
'\x07' # 0x07 -> CONTROL CHARACTER
'\x08' # 0x08 -> CONTROL CHARACTER
'\t' # 0x09 -> CONTROL CHARACTER
'\n' # 0x0A -> CONTROL CHARACTER
'\x0b' # 0x0B -> CONTROL CHARACTER
'\x0c' # 0x0C -> CONTROL CHARACTER
'\r' # 0x0D -> CONTROL CHARACTER
'\x0e' # 0x0E -> CONTROL CHARACTER
'\x0f' # 0x0F -> CONTROL CHARACTER
'\x10' # 0x10 -> CONTROL CHARACTER
'\x11' # 0x11 -> CONTROL CHARACTER
'\x12' # 0x12 -> CONTROL CHARACTER
'\x13' # 0x13 -> CONTROL CHARACTER
'\x14' # 0x14 -> CONTROL CHARACTER
'\x15' # 0x15 -> CONTROL CHARACTER
'\x16' # 0x16 -> CONTROL CHARACTER
'\x17' # 0x17 -> CONTROL CHARACTER
'\x18' # 0x18 -> CONTROL CHARACTER
'\x19' # 0x19 -> CONTROL CHARACTER
'\x1a' # 0x1A -> CONTROL CHARACTER
'\x1b' # 0x1B -> CONTROL CHARACTER
'\x1c' # 0x1C -> CONTROL CHARACTER
'\x1d' # 0x1D -> CONTROL CHARACTER
'\x1e' # 0x1E -> CONTROL CHARACTER
'\x1f' # 0x1F -> CONTROL CHARACTER
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> CONTROL CHARACTER
'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0100' # 0x81 -> LATIN CAPITAL LETTER A WITH MACRON
'\u0101' # 0x82 -> LATIN SMALL LETTER A WITH MACRON
'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0104' # 0x84 -> LATIN CAPITAL LETTER A WITH OGONEK
'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
'\u0105' # 0x88 -> LATIN SMALL LETTER A WITH OGONEK
'\u010c' # 0x89 -> LATIN CAPITAL LETTER C WITH CARON
'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
'\u010d' # 0x8B -> LATIN SMALL LETTER C WITH CARON
'\u0106' # 0x8C -> LATIN CAPITAL LETTER C WITH ACUTE
'\u0107' # 0x8D -> LATIN SMALL LETTER C WITH ACUTE
'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
'\u017a' # 0x90 -> LATIN SMALL LETTER Z WITH ACUTE
'\u010e' # 0x91 -> LATIN CAPITAL LETTER D WITH CARON
'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
'\u010f' # 0x93 -> LATIN SMALL LETTER D WITH CARON
'\u0112' # 0x94 -> LATIN CAPITAL LETTER E WITH MACRON
'\u0113' # 0x95 -> LATIN SMALL LETTER E WITH MACRON
'\u0116' # 0x96 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
'\u0117' # 0x98 -> LATIN SMALL LETTER E WITH DOT ABOVE
'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
'\u011a' # 0x9D -> LATIN CAPITAL LETTER E WITH CARON
'\u011b' # 0x9E -> LATIN SMALL LETTER E WITH CARON
'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
'\u2020' # 0xA0 -> DAGGER
'\xb0' # 0xA1 -> DEGREE SIGN
'\u0118' # 0xA2 -> LATIN CAPITAL LETTER E WITH OGONEK
'\xa3' # 0xA3 -> POUND SIGN
'\xa7' # 0xA4 -> SECTION SIGN
'\u2022' # 0xA5 -> BULLET
'\xb6' # 0xA6 -> PILCROW SIGN
'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
'\xae' # 0xA8 -> REGISTERED SIGN
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u2122' # 0xAA -> TRADE MARK SIGN
'\u0119' # 0xAB -> LATIN SMALL LETTER E WITH OGONEK
'\xa8' # 0xAC -> DIAERESIS
'\u2260' # 0xAD -> NOT EQUAL TO
'\u0123' # 0xAE -> LATIN SMALL LETTER G WITH CEDILLA
'\u012e' # 0xAF -> LATIN CAPITAL LETTER I WITH OGONEK
'\u012f' # 0xB0 -> LATIN SMALL LETTER I WITH OGONEK
'\u012a' # 0xB1 -> LATIN CAPITAL LETTER I WITH MACRON
'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
'\u0136' # 0xB5 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
'\u2211' # 0xB7 -> N-ARY SUMMATION
'\u0142' # 0xB8 -> LATIN SMALL LETTER L WITH STROKE
'\u013b' # 0xB9 -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u013c' # 0xBA -> LATIN SMALL LETTER L WITH CEDILLA
'\u013d' # 0xBB -> LATIN CAPITAL LETTER L WITH CARON
'\u013e' # 0xBC -> LATIN SMALL LETTER L WITH CARON
'\u0139' # 0xBD -> LATIN CAPITAL LETTER L WITH ACUTE
'\u013a' # 0xBE -> LATIN SMALL LETTER L WITH ACUTE
'\u0145' # 0xBF -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u0146' # 0xC0 -> LATIN SMALL LETTER N WITH CEDILLA
'\u0143' # 0xC1 -> LATIN CAPITAL LETTER N WITH ACUTE
'\xac' # 0xC2 -> NOT SIGN
'\u221a' # 0xC3 -> SQUARE ROOT
'\u0144' # 0xC4 -> LATIN SMALL LETTER N WITH ACUTE
'\u0147' # 0xC5 -> LATIN CAPITAL LETTER N WITH CARON
'\u2206' # 0xC6 -> INCREMENT
'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
'\xa0' # 0xCA -> NO-BREAK SPACE
'\u0148' # 0xCB -> LATIN SMALL LETTER N WITH CARON
'\u0150' # 0xCC -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
'\u0151' # 0xCE -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\u014c' # 0xCF -> LATIN CAPITAL LETTER O WITH MACRON
'\u2013' # 0xD0 -> EN DASH
'\u2014' # 0xD1 -> EM DASH
'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
'\xf7' # 0xD6 -> DIVISION SIGN
'\u25ca' # 0xD7 -> LOZENGE
'\u014d' # 0xD8 -> LATIN SMALL LETTER O WITH MACRON
'\u0154' # 0xD9 -> LATIN CAPITAL LETTER R WITH ACUTE
'\u0155' # 0xDA -> LATIN SMALL LETTER R WITH ACUTE
'\u0158' # 0xDB -> LATIN CAPITAL LETTER R WITH CARON
'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u0159' # 0xDE -> LATIN SMALL LETTER R WITH CARON
'\u0156' # 0xDF -> LATIN CAPITAL LETTER R WITH CEDILLA
'\u0157' # 0xE0 -> LATIN SMALL LETTER R WITH CEDILLA
'\u0160' # 0xE1 -> LATIN CAPITAL LETTER S WITH CARON
'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
'\u0161' # 0xE4 -> LATIN SMALL LETTER S WITH CARON
'\u015a' # 0xE5 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0xE6 -> LATIN SMALL LETTER S WITH ACUTE
'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
'\u0164' # 0xE8 -> LATIN CAPITAL LETTER T WITH CARON
'\u0165' # 0xE9 -> LATIN SMALL LETTER T WITH CARON
'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
'\u017d' # 0xEB -> LATIN CAPITAL LETTER Z WITH CARON
'\u017e' # 0xEC -> LATIN SMALL LETTER Z WITH CARON
'\u016a' # 0xED -> LATIN CAPITAL LETTER U WITH MACRON
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u016b' # 0xF0 -> LATIN SMALL LETTER U WITH MACRON
'\u016e' # 0xF1 -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
'\u016f' # 0xF3 -> LATIN SMALL LETTER U WITH RING ABOVE
'\u0170' # 0xF4 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\u0171' # 0xF5 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0172' # 0xF6 -> LATIN CAPITAL LETTER U WITH OGONEK
'\u0173' # 0xF7 -> LATIN SMALL LETTER U WITH OGONEK
'\xdd' # 0xF8 -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xfd' # 0xF9 -> LATIN SMALL LETTER Y WITH ACUTE
'\u0137' # 0xFA -> LATIN SMALL LETTER K WITH CEDILLA
'\u017b' # 0xFB -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u0141' # 0xFC -> LATIN CAPITAL LETTER L WITH STROKE
'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u0122' # 0xFE -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
iulian787/spack | var/spack/repos/builtin/packages/py-onnx/package.py | 5 | 1572 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyOnnx(PythonPackage):
"""Open Neural Network Exchange (ONNX) is an open ecosystem that
empowers AI developers to choose the right tools as their
project evolves. ONNX provides an open source format for AI
models, both deep learning and traditional ML. It defines an
extensible computation graph model, as well as definitions of
built-in operators and standard data types. Currently we focus
on the capabilities needed for inferencing (scoring)."""
homepage = "https://github.com/onnx/onnx"
url = "https://pypi.io/packages/source/O/Onnx/onnx-1.6.0.tar.gz"
version('1.6.0', sha256='3b88c3fe521151651a0403c4d131cb2e0311bd28b753ef692020a432a81ce345')
version('1.5.0', sha256='1a584a4ef62a6db178c257fffb06a9d8e61b41c0a80bfd8bcd8a253d72c4b0b4')
depends_on('py-setuptools', type='build')
depends_on('protobuf')
depends_on('py-protobuf+cpp', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'))
depends_on('[email protected]:', when='^python@:3.4.999', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type='build')
# 'python_out' does not recognize dllexport_decl.
patch('remove_dllexport_decl.patch', when='@:1.6.0')
| lgpl-2.1 |
Axelio/pruebas_camelot | virtualenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/poolmanager.py | 550 | 8977 | # urllib3/poolmanager.py
# Copyright 2008-2014 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
try: # Python 3
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .request import RequestMethods
from .util import parse_url
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version')
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example: ::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
This method is used to actually create the connection pools handed out
by :meth:`connection_from_url` and companion methods. It is intended
to be overridden for customization.
"""
pool_cls = pool_classes_by_scheme[scheme]
kwargs = self.connection_pool_kw
if scheme == 'http':
kwargs = self.connection_pool_kw.copy()
for kw in SSL_KEYWORDS:
kwargs.pop(kw, None)
return pool_cls(host, port, **kwargs)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http'):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
scheme = scheme or 'http'
port = port or port_by_scheme.get(scheme, 80)
pool_key = (scheme, host, port)
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(scheme, host, port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url` but
doesn't pass any additional parameters to the
:class:`urllib3.connectionpool.ConnectionPool` constructor.
Additional parameters are taken from the :class:`.PoolManager`
constructor.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 2616, Section 10.3.4
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown
kw['redirect'] = redirect
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary contaning headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
assert self.proxy.scheme in ("http", "https"), \
'Not supported proxy scheme %s' % self.proxy.scheme
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http'):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
kw['headers'] = self._set_proxy_headers(url, kw.get('headers',
self.headers))
return super(ProxyManager, self).urlopen(method, url, redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
| gpl-3.0 |
jcpowermac/ansible-modules-extras | cloud/vmware/vmware_vmkernel_ip_config.py | 75 | 3851 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_vmkernel_ip_config
short_description: Configure the VMkernel IP Address
description:
- Configure the VMkernel IP Address
version_added: 2.0
author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
vmk_name:
description:
- VMkernel interface name
required: True
ip_address:
description:
- IP address to assign to VMkernel interface
required: True
subnet_mask:
description:
- Subnet Mask to assign to VMkernel interface
required: True
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example command from Ansible Playbook
- name: Configure IP address on ESX host
local_action:
module: vmware_vmkernel_ip_config
hostname: esxi_hostname
username: esxi_username
password: esxi_password
vmk_name: vmk0
ip_address: 10.0.0.10
subnet_mask: 255.255.255.0
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask):
host_config_manager = host_system.configManager
host_network_system = host_config_manager.networkSystem
for vnic in host_network_system.networkConfig.vnic:
if vnic.device == vmk_name:
spec = vnic.spec
if spec.ip.ipAddress != ip_address:
spec.ip.dhcp = False
spec.ip.ipAddress = ip_address
spec.ip.subnetMask = subnet_mask
host_network_system.UpdateVirtualNic(vmk_name, spec)
return True
return False
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(vmk_name=dict(required=True, type='str'),
ip_address=dict(required=True, type='str'),
subnet_mask=dict(required=True, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmk_name = module.params['vmk_name']
ip_address = module.params['ip_address']
subnet_mask = module.params['subnet_mask']
try:
content = connect_to_api(module, False)
host = get_all_objs(content, [vim.HostSystem])
if not host:
module.fail_json(msg="Unable to locate Physical Host.")
host_system = host.keys()[0]
changed = configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask)
module.exit_json(changed=changed)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
WSDC-NITWarangal/django | django/template/loaders/filesystem.py | 418 | 2158 | """
Wrapper for loading templates from the filesystem.
"""
import errno
import io
import warnings
from django.core.exceptions import SuspiciousFileOperation
from django.template import Origin, TemplateDoesNotExist
from django.utils._os import safe_join
from django.utils.deprecation import RemovedInDjango20Warning
from .base import Loader as BaseLoader
class Loader(BaseLoader):
def get_dirs(self):
return self.engine.dirs
def get_contents(self, origin):
try:
with io.open(origin.name, encoding=self.engine.file_charset) as fp:
return fp.read()
except IOError as e:
if e.errno == errno.ENOENT:
raise TemplateDoesNotExist(origin)
raise
def get_template_sources(self, template_name, template_dirs=None):
"""
Return an Origin object pointing to an absolute path in each directory
in template_dirs. For security reasons, if a path doesn't lie inside
one of the template_dirs it is excluded from the result set.
"""
if not template_dirs:
template_dirs = self.get_dirs()
for template_dir in template_dirs:
try:
name = safe_join(template_dir, template_name)
except SuspiciousFileOperation:
# The joined path was located outside of this template_dir
# (it might be inside another one, so this isn't fatal).
continue
yield Origin(
name=name,
template_name=template_name,
loader=self,
)
def load_template_source(self, template_name, template_dirs=None):
warnings.warn(
'The load_template_sources() method is deprecated. Use '
'get_template() or get_contents() instead.',
RemovedInDjango20Warning,
)
for origin in self.get_template_sources(template_name, template_dirs):
try:
return self.get_contents(origin), origin.name
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(template_name)
| bsd-3-clause |
loretoparisi/nupic | examples/opf/tools/testDiagnostics.py | 58 | 1606 | import numpy as np
def printMatrix(inputs, spOutput):
''' (i,j)th cell of the diff matrix will have the number of inputs for which the input and output
pattern differ by i bits and the cells activated differ at j places.
Parameters:
--------------------------------------------------------------------
inputs: the input encodings
spOutput: the coincidences activated in response to each input
'''
from pylab import matplotlib as mat
w=len(np.nonzero(inputs[0])[0])
numActive=len(np.nonzero(spOutput[0])[0])
matrix = np.zeros([2*w+1,2*numActive+1])
for x in xrange(len(inputs)):
i = [_hammingDistance(inputs[x], z) for z in inputs[x:]]
j = [_hammingDistance(spOutput[x], a) for a in spOutput[x:]]
for p, q in zip(i,j):
matrix[p,q]+=1
for y in xrange(len(matrix)) :
matrix[y]=[max(10*x, 100) if (x<100 and x>0) else x for x in matrix[y]]
cdict = {'red':((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.7),(1.0,1.0,1.0)),\
'green': ((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.0),(1.0,1.0,1.0)),\
'blue': ((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.0),(1.0,0.5,1.0))}
my_cmap = mat.colors.LinearSegmentedColormap('my_colormap',cdict,256)
pyl=mat.pyplot
pyl.matshow(matrix, cmap = my_cmap)
pyl.colorbar()
pyl.ylabel('Number of bits by which the inputs differ')
pyl.xlabel('Number of cells by which input and output differ')
pyl.title('The difference matrix')
pyl.show()
def _hammingDistance(s1, s2):
"""Hamming distance between two numpy arrays s1 and s2"""
return sum(abs(s1-s2))
| agpl-3.0 |
rvraghav93/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 11 | 25443 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.neighbors import BallTree
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _joint_probabilities_nn
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _kl_divergence_bh
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold import _barnes_hut_tsne
from sklearn.manifold._utils import _binary_search_perplexity
from sklearn.datasets import make_blobs
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 500
desired_perplexity = 25.0
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
P1 = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
# Test that when we use all the neighbors the results are identical
k = n_samples
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2 = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
assert_array_almost_equal(P1, P2, decimal=4)
# Test that the highest P_ij are the same when few neighbors are used
for k in np.linspace(80, n_samples, 10):
k = int(k)
topn = k * 10 # check the top 10 *k entries out of k * k entries
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2k = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
k = 10
n_samples = 100
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
last_P = None
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), neighbors_nn.copy(),
3, verbose=0)
P1 = _joint_probabilities_nn(distances, neighbors_nn, 3, verbose=0)
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
def fun(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[0]
def grad(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
# The Barnes-Hut approximation uses a different method to estimate
# P_ij using only a number of nearest neighbors instead of all
# points (so that k = 3 * perplexity). As a result we set the
# perplexity=5, so that the number of neighbors is 5%.
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(100, n_components).astype(np.float32)
for init in ('random', 'pca'):
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=50,
learning_rate=100.0, init=init, random_state=0,
method=method)
X_embedded = tsne.fit_transform(X)
T = trustworthiness(X, X_embedded, n_neighbors=1)
assert_almost_equal(T, 1.0, decimal=1)
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for n_iter in [200, 250, 300]:
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
n_iter=n_iter, random_state=0)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert_less_equal(kl_divergences[1], kl_divergences[0])
assert_less_equal(kl_divergences[2], kl_divergences[1])
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method='exact')
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
metric="precomputed", random_state=0, verbose=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca', 'random', or numpy array.
m = "'init' must be 'pca', 'random', or a numpy array"
assert_raises_regexp(ValueError, m, TSNE, init="not available")
def test_init_ndarray():
# Initialize TSNE with ndarray and test fit
tsne = TSNE(init=np.zeros((100, 2)))
X_embedded = tsne.fit_transform(np.ones((100, 5)))
assert_array_equal(np.zeros((100, 2)), X_embedded)
def test_init_ndarray_precomputed():
# Initialize TSNE with ndarray and metric 'precomputed'
# Make sure no FutureWarning is thrown from _fit
tsne = TSNE(init=np.zeros((100, 2)), metric="precomputed")
tsne.fit(np.zeros((100, 100)))
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
_barnes_hut_tsne.gradient(pij_input, pos_output, neighbors,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
def test_no_sparse_on_barnes_hut():
# No sparse matrices allowed on Barnes-Hut.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_iter=199, method='barnes_hut')
assert_raises_regexp(TypeError, "A sparse matrix was.*",
tsne.fit_transform, X_csr)
def test_64bit():
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
methods = ['barnes_hut', 'exact']
for method in methods:
for dt in [np.float32, np.float64]:
X = random_state.randn(100, 2).astype(dt)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method)
tsne.fit_transform(X)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
distances = random_state.randn(n_samples, n_features)
distances = distances.astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, False)
kl, gradex = _kl_divergence(params, P, degrees_of_freedom, n_samples,
n_components)
k = n_samples - 1
bt = BallTree(distances)
distances_nn, neighbors_nn = bt.query(distances, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
Pbh = _joint_probabilities_nn(distances, neighbors_nn,
perplexity, False)
kl, gradbh = _kl_divergence_bh(params, Pbh, neighbors_nn,
degrees_of_freedom, n_samples,
n_components, angle=angle,
skip_num_points=0, verbose=False)
assert_array_almost_equal(Pbh, P, decimal=5)
assert_array_almost_equal(gradex, gradbh, decimal=5)
def test_quadtree_similar_point():
# Introduce a point into a quad tree where a similar point already exists.
# Test will hang if it doesn't complete.
Xs = []
# check the case where points are actually different
Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32))
# check the case where points are the same on X axis
Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32))
# check the case where points are arbitrarily close on X axis
Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32))
# check the case where points are the same on Y axis
Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32))
# check the case where points are arbitrarily close on Y axis
Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32))
# check the case where points are arbitrarily close on both axes
Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - x axis
Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - y axis
Xs.append(np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]],
dtype=np.float32))
for X in Xs:
counts = np.zeros(3, dtype='int64')
_barnes_hut_tsne.check_quadtree(X, counts)
m = "Tree consistency failed: unexpected number of points at root node"
assert_equal(counts[0], counts[1], m)
m = "Tree consistency failed: unexpected number of points on the tree"
assert_equal(counts[0], counts[2], m)
def test_index_offset():
# Make sure translating between 1D and N-D indices are preserved
assert_equal(_barnes_hut_tsne.test_index2offset(), 1)
assert_equal(_barnes_hut_tsne.test_index_offset(), 1)
@skip_if_32bit
def test_n_iter_without_progress():
# Use a dummy negative n_iter_without_progress and check output on stdout
random_state = check_random_state(0)
X = random_state.randn(100, 2)
tsne = TSNE(n_iter_without_progress=-1, verbose=2,
random_state=1, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the value of n_iter_without_progress
assert_in("did not make any progress during the "
"last -1 episodes. Finished.", out)
def test_min_grad_norm():
# Make sure that the parameter min_grad_norm is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
min_grad_norm = 0.002
tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
lines_out = out.split('\n')
# extract the gradient norm from the verbose output
gradient_norm_values = []
for line in lines_out:
# When the computation is Finished just an old gradient norm value
# is repeated that we do not need to store
if 'Finished' in line:
break
start_grad_norm = line.find('gradient norm')
if start_grad_norm >= 0:
line = line[start_grad_norm:]
line = line.replace('gradient norm = ', '')
gradient_norm_values.append(float(line))
# Compute how often the gradient norm is smaller than min_grad_norm
gradient_norm_values = np.array(gradient_norm_values)
n_smaller_gradient_norms = \
len(gradient_norm_values[gradient_norm_values <= min_grad_norm])
# The gradient norm can be smaller than min_grad_norm at most once,
# because in the moment it becomes smaller the optimization stops
assert_less_equal(n_smaller_gradient_norms, 1)
def test_accessible_kl_divergence():
# Ensures that the accessible kl_divergence matches the computed value
random_state = check_random_state(0)
X = random_state.randn(100, 2)
tsne = TSNE(n_iter_without_progress=2, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the accessible kl_divergence as the error at
# the last iteration
for line in out.split('\n')[::-1]:
if 'Iteration' in line:
_, _, error = line.partition('error = ')
if error:
error, _, _ = error.partition(',')
break
assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5)
| bsd-3-clause |
dims/nova | nova/tests/unit/cmd/test_baseproxy.py | 5 | 3573 | # Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from nova.cmd import baseproxy
from nova import config
from nova.console import websocketproxy
from nova import test
from nova import version
@mock.patch.object(config, 'parse_args', new=lambda *args, **kwargs: None)
class BaseProxyTestCase(test.NoDBTestCase):
@mock.patch('os.path.exists', return_value=False)
# NOTE(mriedem): sys.exit raises TestingException so we can actually exit
# the test normally.
@mock.patch('sys.exit', side_effect=test.TestingException)
def test_proxy_ssl_without_cert(self, mock_exit, mock_exists):
self.flags(ssl_only=True)
self.assertRaises(test.TestingException, baseproxy.proxy,
'0.0.0.0', '6080')
mock_exit.assert_called_once_with(-1)
@mock.patch('os.path.exists', return_value=False)
@mock.patch('sys.exit', side_effect=test.TestingException)
def test_proxy_web_dir_does_not_exist(self, mock_exit, mock_exists):
self.flags(web='/my/fake/webserver/')
self.assertRaises(test.TestingException, baseproxy.proxy,
'0.0.0.0', '6080')
mock_exit.assert_called_once_with(-1)
@mock.patch('os.path.exists', return_value=True)
@mock.patch.object(logging, 'setup')
@mock.patch.object(gmr.TextGuruMeditation, 'setup_autorun')
@mock.patch('nova.console.websocketproxy.NovaWebSocketProxy.__init__',
return_value=None)
@mock.patch('nova.console.websocketproxy.NovaWebSocketProxy.start_server')
def test_proxy(self, mock_start, mock_init, mock_gmr, mock_log,
mock_exists):
# Force verbose=False so something else testing nova.cmd.baseproxy
# doesn't impact the call to mocked NovaWebSocketProxy.__init__.
self.flags(verbose=False)
baseproxy.proxy('0.0.0.0', '6080')
mock_log.assert_called_once_with(baseproxy.CONF, 'nova')
mock_gmr.mock_assert_called_once_with(version)
mock_init.assert_called_once_with(
listen_host='0.0.0.0', listen_port='6080', source_is_ipv6=False,
verbose=False, cert='self.pem', key=None, ssl_only=False,
daemon=False, record=False, traffic=False,
web='/usr/share/spice-html5', file_only=True,
RequestHandlerClass=websocketproxy.NovaProxyRequestHandler)
mock_start.assert_called_once_with()
@mock.patch('sys.stderr.write')
@mock.patch('os.path.exists', return_value=False)
@mock.patch('sys.exit', side_effect=test.TestingException)
def test_proxy_exit_with_error(self, mock_exit, mock_exists, mock_stderr):
self.flags(ssl_only=True)
self.assertRaises(test.TestingException, baseproxy.proxy,
'0.0.0.0', '6080')
mock_stderr.assert_called_once_with(
'SSL only and self.pem not found\n')
mock_exit.assert_called_once_with(-1)
| apache-2.0 |
joomel1/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py | 118 | 23190 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
from webkitpy.common.net.buildbot.buildbot_mock import MockBuilder
from webkitpy.common.system.executive_mock import MockExecutive2
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.commands.rebaseline import *
from webkitpy.tool.mocktool import MockTool, MockOptions
class _BaseTestCase(unittest.TestCase):
MOCK_WEB_RESULT = 'MOCK Web result, convert 404 to None=True'
WEB_PREFIX = 'http://example.com/f/builders/Apple Lion Release WK1 (Tests)/results/layout-test-results'
command_constructor = None
def setUp(self):
self.tool = MockTool()
self.command = self.command_constructor() # lint warns that command_constructor might not be set, but this is intentional; pylint: disable=E1102
self.command.bind_to_tool(self.tool)
self.lion_port = self.tool.port_factory.get_from_builder_name("Apple Lion Release WK1 (Tests)")
self.lion_expectations_path = self.lion_port.path_to_test_expectations_file()
# FIXME: we should override builders._exact_matches here to point to a set
# of test ports and restore the value in tearDown(), and that way the
# individual tests wouldn't have to worry about it.
def _expand(self, path):
if self.tool.filesystem.isabs(path):
return path
return self.tool.filesystem.join(self.lion_port.layout_tests_dir(), path)
def _read(self, path):
return self.tool.filesystem.read_text_file(self._expand(path))
def _write(self, path, contents):
self.tool.filesystem.write_text_file(self._expand(path), contents)
def _zero_out_test_expectations(self):
for port_name in self.tool.port_factory.all_port_names():
port = self.tool.port_factory.get(port_name)
for path in port.expectations_files():
self._write(path, '')
self.tool.filesystem.written_files = {}
class TestRebaselineTest(_BaseTestCase):
command_constructor = RebaselineTest # AKA webkit-patch rebaseline-test-internal
def setUp(self):
super(TestRebaselineTest, self).setUp()
self.options = MockOptions(builder="Apple Lion Release WK1 (Tests)", test="userscripts/another-test.html", suffixes="txt",
move_overwritten_baselines_to=None, results_directory=None)
def test_baseline_directory(self):
command = self.command
self.assertMultiLineEqual(command._baseline_directory("Apple Win XP Debug (Tests)"), "/mock-checkout/LayoutTests/platform/win-xp")
self.assertMultiLineEqual(command._baseline_directory("Apple Win 7 Release (Tests)"), "/mock-checkout/LayoutTests/platform/win")
self.assertMultiLineEqual(command._baseline_directory("Apple Lion Release WK1 (Tests)"), "/mock-checkout/LayoutTests/platform/mac-lion")
self.assertMultiLineEqual(command._baseline_directory("Apple Lion Release WK2 (Tests)"), "/mock-checkout/LayoutTests/platform/mac-wk2")
self.assertMultiLineEqual(command._baseline_directory("Apple MountainLion Release WK1 (Tests)"), "/mock-checkout/LayoutTests/platform/mac")
self.assertMultiLineEqual(command._baseline_directory("Apple MountainLion Release WK2 (Tests)"), "/mock-checkout/LayoutTests/platform/mac")
self.assertMultiLineEqual(command._baseline_directory("GTK Linux 64-bit Debug"), "/mock-checkout/LayoutTests/platform/gtk-wk1")
self.assertMultiLineEqual(command._baseline_directory("GTK Linux 64-bit Release WK2 (Tests)"), "/mock-checkout/LayoutTests/platform/gtk-wk2")
self.assertMultiLineEqual(command._baseline_directory("EFL Linux 64-bit Release WK2"), "/mock-checkout/LayoutTests/platform/efl-wk2")
self.assertMultiLineEqual(command._baseline_directory("Qt Linux Release"), "/mock-checkout/LayoutTests/platform/qt")
def test_rebaseline_updates_expectations_file_noop(self):
self._zero_out_test_expectations()
self._write(self.lion_expectations_path, """Bug(B) [ Mac Linux XP Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]
Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
""")
self._write("fast/dom/Window/window-postmessage-clone-really-deep-array.html", "Dummy test contents")
self._write("fast/css/large-list-of-rules-crash.html", "Dummy test contents")
self._write("userscripts/another-test.html", "Dummy test contents")
self.options.suffixes = "png,wav,txt"
self.command._rebaseline_test_and_update_expectations(self.options)
self.assertItemsEqual(self.tool.web.urls_fetched,
[self.WEB_PREFIX + '/userscripts/another-test-actual.png',
self.WEB_PREFIX + '/userscripts/another-test-actual.wav',
self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, """Bug(B) [ Mac Linux XP Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]
Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
""")
def test_rebaseline_updates_expectations_file(self):
self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
self._write("userscripts/another-test.html", "Dummy test contents")
self.options.suffixes = 'png,wav,txt'
self.command._rebaseline_test_and_update_expectations(self.options)
self.assertItemsEqual(self.tool.web.urls_fetched,
[self.WEB_PREFIX + '/userscripts/another-test-actual.png',
self.WEB_PREFIX + '/userscripts/another-test-actual.wav',
self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
def test_rebaseline_does_not_include_overrides(self):
self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nBug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
self._write("userscripts/another-test.html", "Dummy test contents")
self.options.suffixes = 'png,wav,txt'
self.command._rebaseline_test_and_update_expectations(self.options)
self.assertItemsEqual(self.tool.web.urls_fetched,
[self.WEB_PREFIX + '/userscripts/another-test-actual.png',
self.WEB_PREFIX + '/userscripts/another-test-actual.wav',
self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
new_expectations = self._read(self.lion_expectations_path)
self.assertMultiLineEqual(new_expectations, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nBug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
def test_rebaseline_test(self):
self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", None, "txt", self.WEB_PREFIX)
self.assertItemsEqual(self.tool.web.urls_fetched, [self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
def test_rebaseline_test_with_results_directory(self):
self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
self.options.results_directory = '/tmp'
self.command._rebaseline_test_and_update_expectations(self.options)
self.assertItemsEqual(self.tool.web.urls_fetched, ['file:///tmp/userscripts/another-test-actual.txt'])
def test_rebaseline_test_and_print_scm_changes(self):
self.command._print_scm_changes = True
self.command._scm_changes = {'add': [], 'delete': []}
self.tool._scm.exists = lambda x: False
self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", None, "txt", None)
self.assertDictEqual(self.command._scm_changes, {'add': ['/mock-checkout/LayoutTests/platform/mac-lion/userscripts/another-test-expected.txt'], 'delete': []})
def test_rebaseline_and_copy_test(self):
self._write("userscripts/another-test-expected.txt", "generic result")
self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", ["mac-lion-wk2"], "txt", None)
self.assertMultiLineEqual(self._read('platform/mac-lion/userscripts/another-test-expected.txt'), self.MOCK_WEB_RESULT)
self.assertMultiLineEqual(self._read('platform/mac-wk2/userscripts/another-test-expected.txt'), 'generic result')
def test_rebaseline_and_copy_test_no_existing_result(self):
self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", ["mac-lion-wk2"], "txt", None)
self.assertMultiLineEqual(self._read('platform/mac-lion/userscripts/another-test-expected.txt'), self.MOCK_WEB_RESULT)
self.assertFalse(self.tool.filesystem.exists(self._expand('platform/mac-lion-wk2/userscripts/another-test-expected.txt')))
def test_rebaseline_and_copy_test_with_lion_result(self):
self._write("platform/mac-lion/userscripts/another-test-expected.txt", "original lion result")
self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", ["mac-lion-wk2"], "txt", self.WEB_PREFIX)
self.assertItemsEqual(self.tool.web.urls_fetched, [self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
self.assertMultiLineEqual(self._read("platform/mac-wk2/userscripts/another-test-expected.txt"), "original lion result")
self.assertMultiLineEqual(self._read("platform/mac-lion/userscripts/another-test-expected.txt"), self.MOCK_WEB_RESULT)
def test_rebaseline_and_copy_no_overwrite_test(self):
self._write("platform/mac-lion/userscripts/another-test-expected.txt", "original lion result")
self._write("platform/mac-lion-wk2/userscripts/another-test-expected.txt", "original lion wk2 result")
self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", ["mac-lion-wk2"], "txt", None)
self.assertMultiLineEqual(self._read("platform/mac-lion-wk2/userscripts/another-test-expected.txt"), "original lion wk2 result")
self.assertMultiLineEqual(self._read("platform/mac-lion/userscripts/another-test-expected.txt"), self.MOCK_WEB_RESULT)
def test_rebaseline_test_internal_with_move_overwritten_baselines_to(self):
self.tool.executive = MockExecutive2()
# FIXME: it's confusing that this is the test- port, and not the regular lion port. Really all of the tests should be using the test ports.
port = self.tool.port_factory.get('test-mac-snowleopard')
self._write(port._filesystem.join(port.layout_tests_dir(), 'platform/test-mac-snowleopard/failures/expected/image-expected.txt'), 'original snowleopard result')
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK Leopard": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
"MOCK SnowLeopard": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"])},
}
options = MockOptions(optimize=True, builder="MOCK SnowLeopard", suffixes="txt",
move_overwritten_baselines_to=["test-mac-leopard"], verbose=True, test="failures/expected/image.html",
results_directory=None)
oc.capture_output()
self.command.execute(options, [], self.tool)
finally:
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-mac-leopard/failures/expected/image-expected.txt')), 'original snowleopard result')
self.assertMultiLineEqual(out, '{"add": []}\n')
class TestRebaselineJson(_BaseTestCase):
command_constructor = RebaselineJson
def setUp(self):
super(TestRebaselineJson, self).setUp()
self.tool.executive = MockExecutive2()
self.old_exact_matches = builders._exact_matches
builders._exact_matches = {
"MOCK builder": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier"]),
"move_overwritten_baselines_to": ["test-mac-leopard"]},
"MOCK builder (Debug)": {"port_name": "test-mac-snowleopard", "specifiers": set(["mock-specifier", "debug"])},
}
def tearDown(self):
builders._exact_matches = self.old_exact_matches
super(TestRebaselineJson, self).tearDown()
def test_rebaseline_all(self):
options = MockOptions(optimize=True, verbose=True, move_overwritten_baselines=False, results_directory=None)
self.command._rebaseline(options, {"user-scripts/another-test.html": {"MOCK builder": ["txt", "png"]}})
# Note that we have one run_in_parallel() call followed by a run_command()
self.assertEqual(self.tool.executive.calls,
[[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'user-scripts/another-test.html', '--verbose']],
['echo', '--verbose', 'optimize-baselines', '--suffixes', 'txt,png', 'user-scripts/another-test.html']])
def test_rebaseline_debug(self):
options = MockOptions(optimize=True, verbose=True, move_overwritten_baselines=False, results_directory=None)
self.command._rebaseline(options, {"user-scripts/another-test.html": {"MOCK builder (Debug)": ["txt", "png"]}})
# Note that we have one run_in_parallel() call followed by a run_command()
self.assertEqual(self.tool.executive.calls,
[[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'user-scripts/another-test.html', '--verbose']],
['echo', '--verbose', 'optimize-baselines', '--suffixes', 'txt,png', 'user-scripts/another-test.html']])
def test_move_overwritten(self):
options = MockOptions(optimize=True, verbose=True, move_overwritten_baselines=True, results_directory=None)
self.command._rebaseline(options, {"user-scripts/another-test.html": {"MOCK builder": ["txt", "png"]}})
# Note that we have one run_in_parallel() call followed by a run_command()
self.assertEqual(self.tool.executive.calls,
[[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'user-scripts/another-test.html', '--move-overwritten-baselines-to', 'test-mac-leopard', '--verbose']],
['echo', '--verbose', 'optimize-baselines', '--suffixes', 'txt,png', 'user-scripts/another-test.html']])
def test_no_optimize(self):
options = MockOptions(optimize=False, verbose=True, move_overwritten_baselines=False, results_directory=None)
self.command._rebaseline(options, {"user-scripts/another-test.html": {"MOCK builder (Debug)": ["txt", "png"]}})
# Note that we have only one run_in_parallel() call
self.assertEqual(self.tool.executive.calls,
[[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder (Debug)', '--test', 'user-scripts/another-test.html', '--verbose']]])
def test_results_directory(self):
options = MockOptions(optimize=False, verbose=True, move_overwritten_baselines=False, results_directory='/tmp')
self.command._rebaseline(options, {"user-scripts/another-test.html": {"MOCK builder": ["txt", "png"]}})
# Note that we have only one run_in_parallel() call
self.assertEqual(self.tool.executive.calls,
[[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'user-scripts/another-test.html', '--results-directory', '/tmp', '--verbose']]])
class TestRebaseline(_BaseTestCase):
# This command shares most of its logic with RebaselineJson, so these tests just test what is different.
command_constructor = Rebaseline # AKA webkit-patch rebaseline
def test_tests_to_update(self):
build = Mock()
OutputCapture().assert_outputs(self, self.command._tests_to_update, [build])
def test_rebaseline(self):
self.command._builders_to_pull_from = lambda: [MockBuilder('MOCK builder')]
self.command._tests_to_update = lambda builder: ['mock/path/to/test.html']
self._zero_out_test_expectations()
old_exact_matches = builders._exact_matches
oc = OutputCapture()
try:
builders._exact_matches = {
"MOCK builder": {"port_name": "test-mac-leopard", "specifiers": set(["mock-specifier"])},
}
oc.capture_output()
self.command.execute(MockOptions(optimize=False, builders=None, suffixes="txt,png", verbose=True, move_overwritten_baselines=False), [], self.tool)
finally:
oc.restore_output()
builders._exact_matches = old_exact_matches
calls = filter(lambda x: x != ['qmake', '-v'] and x[0] != 'perl', self.tool.executive.calls)
self.assertEqual(calls,
[[['echo', 'rebaseline-test-internal', '--suffixes', 'txt,png', '--builder', 'MOCK builder', '--test', 'mock/path/to/test.html', '--verbose']]])
class TestRebaselineExpectations(_BaseTestCase):
command_constructor = RebaselineExpectations
def setUp(self):
super(TestRebaselineExpectations, self).setUp()
self.options = MockOptions(optimize=False, builders=None, suffixes=['txt'], verbose=False, platform=None,
move_overwritten_baselines=False, results_directory=None)
def test_rebaseline_expectations(self):
self._zero_out_test_expectations()
self.tool.executive = MockExecutive2()
self.command._tests_to_rebaseline = lambda port: {'userscripts/another-test.html': set(['txt']), 'userscripts/images.svg': set(['png'])}
self.command.execute(self.options, [], self.tool)
# FIXME: change this to use the test- ports.
calls = filter(lambda x: x != ['qmake', '-v'], self.tool.executive.calls)
self.assertEqual(len(calls), 1)
self.assertEqual(len(calls[0]), 22)
def test_rebaseline_expectations_noop(self):
self._zero_out_test_expectations()
oc = OutputCapture()
try:
oc.capture_output()
self.command.execute(self.options, [], self.tool)
finally:
_, _, logs = oc.restore_output()
self.assertEqual(self.tool.filesystem.written_files, {})
self.assertEqual(logs, 'Did not find any tests marked Rebaseline.\n')
def disabled_test_overrides_are_included_correctly(self):
# This tests that the any tests marked as REBASELINE in the overrides are found, but
# that the overrides do not get written into the main file.
self._zero_out_test_expectations()
self._write(self.lion_expectations_path, '')
self.lion_port.expectations_dict = lambda: {
self.lion_expectations_path: '',
'overrides': ('Bug(x) userscripts/another-test.html [ Failure Rebaseline ]\n'
'Bug(y) userscripts/test.html [ Crash ]\n')}
self._write('/userscripts/another-test.html', '')
self.assertDictEqual(self.command._tests_to_rebaseline(self.lion_port), {'userscripts/another-test.html': set(['png', 'txt', 'wav'])})
self.assertEqual(self._read(self.lion_expectations_path), '')
class _FakeOptimizer(BaselineOptimizer):
def read_results_by_directory(self, baseline_name):
if baseline_name.endswith('txt'):
return {'LayoutTests/passes/text.html': '123456',
'LayoutTests/platform/test-mac-leopard/passes/text.html': 'abcdef'}
return {}
class TestAnalyzeBaselines(_BaseTestCase):
command_constructor = AnalyzeBaselines
def setUp(self):
super(TestAnalyzeBaselines, self).setUp()
self.port = self.tool.port_factory.get('test')
self.tool.port_factory.get = (lambda port_name=None, options=None: self.port)
self.lines = []
self.command._optimizer_class = _FakeOptimizer
self.command._write = (lambda msg: self.lines.append(msg)) # pylint bug warning about unnecessary lambda? pylint: disable=W0108
def test_default(self):
self.command.execute(MockOptions(suffixes='txt', missing=False, platform=None), ['passes/text.html'], self.tool)
self.assertEqual(self.lines,
['passes/text-expected.txt:',
' (generic): 123456',
' test-mac-leopard: abcdef'])
def test_missing_baselines(self):
self.command.execute(MockOptions(suffixes='png,txt', missing=True, platform=None), ['passes/text.html'], self.tool)
self.assertEqual(self.lines,
['passes/text-expected.png: (no baselines found)',
'passes/text-expected.txt:',
' (generic): 123456',
' test-mac-leopard: abcdef'])
| bsd-3-clause |
mlperf/training_results_v0.6 | Google/benchmarks/mask/implementations/tpu-v3-256-mask/mask_rcnn/object_detection/tf_example_decoder.py | 11 | 10004 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Example proto decoder for object detection.
A decoder to decode string tensors containing serialized tensorflow.Example
protos for object detection.
"""
import tensorflow as tf
slim_example_decoder = tf.contrib.slim.tfexample_decoder
class TfExampleDecoder(object):
"""Tensorflow Example proto decoder."""
def __init__(self, use_instance_mask=False):
"""Constructor sets keys_to_features and items_to_handlers."""
self.keys_to_features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/filename':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/key/sha256':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/source_id':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/height':
tf.FixedLenFeature((), tf.int64, 1),
'image/width':
tf.FixedLenFeature((), tf.int64, 1),
# Object boxes and classes.
'image/object/bbox/xmin':
tf.VarLenFeature(tf.float32),
'image/object/bbox/xmax':
tf.VarLenFeature(tf.float32),
'image/object/bbox/ymin':
tf.VarLenFeature(tf.float32),
'image/object/bbox/ymax':
tf.VarLenFeature(tf.float32),
'image/object/class/label':
tf.VarLenFeature(tf.int64),
'image/object/class/text':
tf.VarLenFeature(tf.string),
'image/object/area':
tf.VarLenFeature(tf.float32),
'image/object/is_crowd':
tf.VarLenFeature(tf.int64),
'image/object/difficult':
tf.VarLenFeature(tf.int64),
'image/object/group_of':
tf.VarLenFeature(tf.int64),
'image/object/weight':
tf.VarLenFeature(tf.float32),
'image/segmentation/object':
tf.VarLenFeature(tf.int64),
'image/segmentation/object/class':
tf.VarLenFeature(tf.int64),
'image/object/mask':
tf.VarLenFeature(tf.string),
}
self.items_to_handlers = {
'image': slim_example_decoder.Image(
image_key='image/encoded', format_key='image/format', channels=3),
'source_id': (
slim_example_decoder.Tensor('image/source_id')),
'key': (
slim_example_decoder.Tensor('image/key/sha256')),
'filename': (
slim_example_decoder.Tensor('image/filename')),
# Object boxes and classes.
'groundtruth_boxes': (
slim_example_decoder.BoundingBox(
['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/')),
'groundtruth_area': slim_example_decoder.Tensor(
'image/object/area'),
'groundtruth_is_crowd': (
slim_example_decoder.Tensor('image/object/is_crowd')),
'groundtruth_difficult': (
slim_example_decoder.Tensor('image/object/difficult')),
'groundtruth_group_of': (
slim_example_decoder.Tensor('image/object/group_of')),
'groundtruth_weights': (
slim_example_decoder.Tensor('image/object/weight')),
'groundtruth_classes': (
slim_example_decoder.Tensor('image/object/class/label')),
}
if use_instance_mask:
mask_decoder = slim_example_decoder.ItemHandlerCallback(
['image/object/mask', 'image/height', 'image/width'],
self._decode_png_instance_masks)
self.items_to_handlers.update(
{
'groundtruth_instance_masks': mask_decoder,
'groundtruth_instance_class':
slim_example_decoder.Tensor(
'image/segmentation/object/class'),
}
)
def _decode_png_instance_masks(self, keys_to_tensors):
"""Decode PNG instance segmentation masks and stack into dense tensor.
The instance segmentation masks are reshaped to [num_instances, height,
width].
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float tensor of shape [num_instances, height, width] with values
in {0, 1}.
"""
def decode_png_mask(image_buffer):
image = tf.squeeze(
tf.image.decode_image(image_buffer, channels=1), axis=2)
image.set_shape([None, None])
image = tf.to_float(tf.greater(image, 0))
return image
png_masks = keys_to_tensors['image/object/mask']
height = keys_to_tensors['image/height']
width = keys_to_tensors['image/width']
if isinstance(png_masks, tf.SparseTensor):
png_masks = tf.sparse_tensor_to_dense(png_masks, default_value='')
return tf.cond(
tf.greater(tf.size(png_masks), 0),
lambda: tf.map_fn(decode_png_mask, png_masks, dtype=tf.float32),
lambda: tf.zeros(tf.to_int32(tf.stack([0, height, width]))))
def decode(self, tf_example_string_tensor):
"""Decodes serialized tensorflow example and returns a tensor dictionary.
Args:
tf_example_string_tensor: a string tensor holding a serialized tensorflow
example proto.
Returns:
A dictionary of the following tensors.
image - 3D uint8 tensor of shape [None, None, 3]
containing image.
source_id - string tensor containing original
image id.
key - string tensor with unique sha256 hash key.
filename - string tensor with original dataset
filename.
groundtruth_boxes - 2D float32 tensor of shape
[None, 4] containing box corners.
groundtruth_classes - 1D int64 tensor of shape
groundtruth_weights - 1D float32 tensor of
shape [None] indicating the weights of groundtruth boxes.
[None] containing classes for the boxes.
groundtruth_area - 1D float32 tensor of shape
[None] containing containing object mask area in pixel squared.
groundtruth_is_crowd - 1D bool tensor of shape
[None] indicating if the boxes enclose a crowd.
Optional:
groundtruth_difficult - 1D bool tensor of shape
[None] indicating if the boxes represent `difficult` instances.
groundtruth_group_of - 1D bool tensor of shape
[None] indicating if the boxes represent `group_of` instances.
groundtruth_instance_masks - 3D float32 tensor of
shape [None, None, None] containing instance masks.
"""
serialized_example = tf.reshape(tf_example_string_tensor, shape=[])
decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features,
self.items_to_handlers)
keys = sorted(decoder.list_items())
tensors = decoder.decode(serialized_example, items=keys)
tensor_dict = dict(zip(keys, tensors))
is_crowd = 'groundtruth_is_crowd'
tensor_dict[is_crowd] = tf.cast(tensor_dict[is_crowd], dtype=tf.bool)
tensor_dict['image'].set_shape([None, None, 3])
if 'groundtruth_instance_masks' not in tensor_dict:
tensor_dict['groundtruth_instance_masks'] = None
return tensor_dict
class TfExampleSegmentationDecoder(object):
"""Tensorflow Example proto decoder."""
def __init__(self):
"""Constructor sets keys_to_features and items_to_handlers."""
self.keys_to_features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/filename':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/format':
tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/height':
tf.FixedLenFeature((), tf.int64, default_value=0),
'image/width':
tf.FixedLenFeature((), tf.int64, default_value=0),
'image/segmentation/class/encoded':
tf.FixedLenFeature((), tf.string, default_value=''),
'image/segmentation/class/format':
tf.FixedLenFeature((), tf.string, default_value='png'),
}
self.items_to_handlers = {
'image': slim_example_decoder.Image(
image_key='image/encoded', format_key='image/format', channels=3),
'labels_class': slim_example_decoder.Image(
image_key='image/segmentation/class/encoded',
format_key='image/segmentation/class/format',
channels=1)
}
def decode(self, tf_example_string_tensor):
"""Decodes serialized tensorflow example and returns a tensor dictionary.
Args:
tf_example_string_tensor: a string tensor holding a serialized tensorflow
example proto.
Returns:
A dictionary of the following tensors.
image - 3D uint8 tensor of shape [None, None, 3] containing image.
labels_class - 2D unit8 tensor of shape [None, None] containing
pixel-wise class labels.
"""
serialized_example = tf.reshape(tf_example_string_tensor, shape=[])
decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features,
self.items_to_handlers)
keys = sorted(decoder.list_items())
keys = ['image', 'labels_class']
tensors = decoder.decode(serialized_example, items=keys)
tensor_dict = dict(zip(keys, tensors))
tensor_dict['image'].set_shape([None, None, 3])
return tensor_dict
| apache-2.0 |
MagicStack/MagicPython | test/docstrings/escaping1.py | 1 | 4218 | '''Module docstring
{{ %d simple \\ string \
foo \' \" \a \b \c \f \n \r \t \v \5 \55 \555 \05 \005
multiline "unicode" string \
\xf1 \u1234aaaa \U1234aaaa
\N{BLACK SPADE SUIT}
'''
''' : punctuation.definition.string.begin.python, source.python, string.quoted.docstring.multi.python
Module docstring : source.python, string.quoted.docstring.multi.python
: source.python, string.quoted.docstring.multi.python
{{ %d simple : source.python, string.quoted.docstring.multi.python
\\ : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
string : source.python, string.quoted.docstring.multi.python
\ : constant.language.python, source.python, string.quoted.docstring.multi.python
foo : source.python, string.quoted.docstring.multi.python
\' : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
: source.python, string.quoted.docstring.multi.python
\" : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
: source.python, string.quoted.docstring.multi.python
\a : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
: source.python, string.quoted.docstring.multi.python
\b : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
\c : source.python, string.quoted.docstring.multi.python
\f : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
: source.python, string.quoted.docstring.multi.python
\n : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
: source.python, string.quoted.docstring.multi.python
\r : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
: source.python, string.quoted.docstring.multi.python
\t : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
: source.python, string.quoted.docstring.multi.python
\v : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
: source.python, string.quoted.docstring.multi.python
\5 : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
: source.python, string.quoted.docstring.multi.python
\55 : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
: source.python, string.quoted.docstring.multi.python
\555 : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
: source.python, string.quoted.docstring.multi.python
\05 : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
: source.python, string.quoted.docstring.multi.python
\005 : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
: source.python, string.quoted.docstring.multi.python
multiline "unicode" string : source.python, string.quoted.docstring.multi.python
\ : constant.language.python, source.python, string.quoted.docstring.multi.python
: source.python, string.quoted.docstring.multi.python
\xf1 : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
: source.python, string.quoted.docstring.multi.python
\u1234 : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
aaaa : source.python, string.quoted.docstring.multi.python
\U1234aaaa : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
: source.python, string.quoted.docstring.multi.python
\N{BLACK SPADE SUIT} : constant.character.escape.python, source.python, string.quoted.docstring.multi.python
''' : punctuation.definition.string.end.python, source.python, string.quoted.docstring.multi.python
| mit |
xgds/xgds_planner2 | xgds_planner2/choosePlanExporter.py | 1 | 1880 | #__BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#__END_LICENSE__
from django.conf import settings
from geocamUtil import loader
class ExporterInfo(object):
def __init__(self, formatCode, extension, exporterClass, customLabel=None):
self.formatCode = formatCode
self.extension = extension
self.exporterClass = exporterClass
if customLabel:
self.label = customLabel
else:
self.label = exporterClass.label
self.url = None
PLAN_EXPORTERS = []
PLAN_EXPORTERS_BY_FORMAT = {}
for exporterInfo in settings.XGDS_PLANNER_PLAN_EXPORTERS:
# _formatCode, _extension, _exporterClassName, _customLabel
_formatCode = exporterInfo[0]
_extension = exporterInfo[1]
_exporterClassName = exporterInfo[2]
_customLabel = None
if len(exporterInfo) > 3:
_customLabel = exporterInfo[3]
_exporterInfo = ExporterInfo(_formatCode,
_extension,
loader.getClassByName(_exporterClassName),
_customLabel)
PLAN_EXPORTERS.append(_exporterInfo)
PLAN_EXPORTERS_BY_FORMAT[_formatCode] = _exporterInfo
| apache-2.0 |
maliciamrg/xbmc-addon-tvtumbler | resources/lib/tvdb_api/tvdb_exceptions.py | 2 | 1233 | #!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvdb_api
#repository:http://github.com/dbr/tvdb_api
#license:unlicense (http://unlicense.org/)
"""Custom exceptions used or raised by tvdb_api
"""
__author__ = "dbr/Ben"
__version__ = "1.8.2"
__all__ = ["tvdb_error", "tvdb_userabort", "tvdb_shownotfound",
"tvdb_seasonnotfound", "tvdb_episodenotfound", "tvdb_attributenotfound"]
class tvdb_exception(Exception):
"""Any exception generated by tvdb_api
"""
pass
class tvdb_error(tvdb_exception):
"""An error with thetvdb.com (Cannot connect, for example)
"""
pass
class tvdb_userabort(tvdb_exception):
"""User aborted the interactive selection (via
the q command, ^c etc)
"""
pass
class tvdb_shownotfound(tvdb_exception):
"""Show cannot be found on thetvdb.com (non-existant show)
"""
pass
class tvdb_seasonnotfound(tvdb_exception):
"""Season cannot be found on thetvdb.com
"""
pass
class tvdb_episodenotfound(tvdb_exception):
"""Episode cannot be found on thetvdb.com
"""
pass
class tvdb_attributenotfound(tvdb_exception):
"""Raised if an episode does not have the requested
attribute (such as a episode name)
"""
pass
| gpl-3.0 |
rgeleta/odoo | addons/stock/report/stock_graph.py | 326 | 4514 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from pychart import *
import pychart.legend
import time
from openerp.report.misc import choice_colors
from openerp import tools
#
# Draw a graph for stocks
#
class stock_graph(object):
def __init__(self, io):
self._datas = {}
self._canvas = canvas.init(fname=io, format='pdf')
self._canvas.set_author("Odoo")
self._canvas.set_title("Stock Level Forecast")
self._names = {}
self.val_min = ''
self.val_max = ''
def add(self, product_id, product_name, datas):
if hasattr(product_name, 'replace'):
product_name=product_name.replace('/', '//')
if product_id not in self._datas:
self._datas[product_id] = {}
self._names[product_id] = tools.ustr(product_name)
for (dt,stock) in datas:
if not dt in self._datas[product_id]:
self._datas[product_id][dt]=0
self._datas[product_id][dt]+=stock
if self.val_min:
self.val_min = min(self.val_min,dt)
else:
self.val_min = dt
self.val_max = max(self.val_max,dt)
def draw(self):
colors = choice_colors(len(self._datas.keys()))
user_color = {}
for user in self._datas.keys():
user_color[user] = colors.pop()
val_min = int(time.mktime(time.strptime(self.val_min,'%Y-%m-%d')))
val_max = int(time.mktime(time.strptime(self.val_max,'%Y-%m-%d')))
plots = []
for product_id in self._datas:
f = fill_style.Plain()
f.bgcolor = user_color[user]
datas = self._datas[product_id].items()
datas = map(lambda x: (int(time.mktime(time.strptime(x[0],'%Y-%m-%d'))),x[1]), datas)
datas.sort()
datas2 = []
val = 0
for d in datas:
val+=d[1]
if len(datas2):
d2 = d[0]-60*61*24
if datas2[-1][0]<d2-1000:
datas2.append((d2,datas2[-1][1]))
datas2.append((d[0],val))
if len(datas2) and datas2[-1][0]<val_max-100:
datas2.append((val_max, datas2[-1][1]))
if len(datas2)==1:
datas2.append( (datas2[0][0]+100, datas2[0][1]) )
st = line_style.T()
st.color = user_color[product_id]
st.width = 1
st.cap_style=1
st.join_style=1
plot = line_plot.T(label=self._names[product_id], data=datas2, line_style=st)
plots.append(plot)
interval = max((val_max-val_min)/15, 86400)
x_axis = axis.X(format=lambda x:'/a60{}'+time.strftime('%Y-%m-%d',time.gmtime(x)), tic_interval=interval, label=None)
# For add the report header on the top of the report.
tb = text_box.T(loc=(300, 500), text="/hL/15/bStock Level Forecast", line_style=None)
tb.draw()
ar = area.T(size = (620,435), x_range=(val_min,val_max+1), y_axis = axis.Y(format="%d", label="Virtual Stock (Unit)"), x_axis=x_axis)
for plot in plots:
ar.add_plot(plot)
ar.draw(self._canvas)
def close(self):
self._canvas.close()
if __name__ == '__main__':
gt = stock_graph('test.pdf')
gt.add(1, 'Pomme', [('2005-07-29', 6), ('2005-07-30', -2), ('2005-07-31', 4)])
gt.add(2, 'Cailloux', [('2005-07-29', 9), ('2005-07-30', -4), ('2005-07-31', 2)])
gt.draw()
gt.close()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
def-/commandergenius | project/jni/python/src/Demo/turtle/tdemo_wikipedia.py | 42 | 1347 | """ turtle-example-suite:
tdemo_wikipedia3.py
This example is
inspired by the Wikipedia article on turtle
graphics. (See example wikipedia1 for URLs)
First we create (ne-1) (i.e. 35 in this
example) copies of our first turtle p.
Then we let them perform their steps in
parallel.
Followed by a complete undo().
"""
from turtle import Screen, Turtle, mainloop
from time import clock, sleep
def mn_eck(p, ne,sz):
turtlelist = [p]
#create ne-1 additional turtles
for i in range(1,ne):
q = p.clone()
q.rt(360.0/ne)
turtlelist.append(q)
p = q
for i in range(ne):
c = abs(ne/2.0-i)/(ne*.7)
# let those ne turtles make a step
# in parallel:
for t in turtlelist:
t.rt(360./ne)
t.pencolor(1-c,0,c)
t.fd(sz)
def main():
s = Screen()
s.bgcolor("black")
p=Turtle()
p.speed(0)
p.hideturtle()
p.pencolor("red")
p.pensize(3)
s.tracer(36,0)
at = clock()
mn_eck(p, 36, 19)
et = clock()
z1 = et-at
sleep(1)
at = clock()
while any([t.undobufferentries() for t in s.turtles()]):
for t in s.turtles():
t.undo()
et = clock()
return "Laufzeit: %.3f sec" % (z1+et-at)
if __name__ == '__main__':
msg = main()
print msg
mainloop()
| lgpl-2.1 |
atlassian/boto | boto/sdb/db/manager/xmlmanager.py | 153 | 18612 | # Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from boto.utils import find_class, Password
from boto.sdb.db.key import Key
from boto.sdb.db.model import Model
from boto.compat import six, encodebytes
from datetime import datetime
from xml.dom.minidom import getDOMImplementation, parse, parseString, Node
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
class XMLConverter(object):
"""
Responsible for converting base Python types to format compatible with underlying
database. For SimpleDB, that means everything needs to be converted to a string
when stored in SimpleDB and from a string when retrieved.
To convert a value, pass it to the encode or decode method. The encode method
will take a Python native value and convert to DB format. The decode method will
take a DB format value and convert it to Python native format. To find the appropriate
method to call, the generic encode/decode methods will look for the type-specific
method by searching for a method called "encode_<type name>" or "decode_<type name>".
"""
def __init__(self, manager):
self.manager = manager
self.type_map = { bool : (self.encode_bool, self.decode_bool),
int : (self.encode_int, self.decode_int),
Model : (self.encode_reference, self.decode_reference),
Key : (self.encode_reference, self.decode_reference),
Password : (self.encode_password, self.decode_password),
datetime : (self.encode_datetime, self.decode_datetime)}
if six.PY2:
self.type_map[long] = (self.encode_long, self.decode_long)
def get_text_value(self, parent_node):
value = ''
for node in parent_node.childNodes:
if node.nodeType == node.TEXT_NODE:
value += node.data
return value
def encode(self, item_type, value):
if item_type in self.type_map:
encode = self.type_map[item_type][0]
return encode(value)
return value
def decode(self, item_type, value):
if item_type in self.type_map:
decode = self.type_map[item_type][1]
return decode(value)
else:
value = self.get_text_value(value)
return value
def encode_prop(self, prop, value):
if isinstance(value, list):
if hasattr(prop, 'item_type'):
new_value = []
for v in value:
item_type = getattr(prop, "item_type")
if Model in item_type.mro():
item_type = Model
new_value.append(self.encode(item_type, v))
return new_value
else:
return value
else:
return self.encode(prop.data_type, value)
def decode_prop(self, prop, value):
if prop.data_type == list:
if hasattr(prop, 'item_type'):
item_type = getattr(prop, "item_type")
if Model in item_type.mro():
item_type = Model
values = []
for item_node in value.getElementsByTagName('item'):
value = self.decode(item_type, item_node)
values.append(value)
return values
else:
return self.get_text_value(value)
else:
return self.decode(prop.data_type, value)
def encode_int(self, value):
value = int(value)
return '%d' % value
def decode_int(self, value):
value = self.get_text_value(value)
if value:
value = int(value)
else:
value = None
return value
def encode_long(self, value):
value = long(value)
return '%d' % value
def decode_long(self, value):
value = self.get_text_value(value)
return long(value)
def encode_bool(self, value):
if value == True:
return 'true'
else:
return 'false'
def decode_bool(self, value):
value = self.get_text_value(value)
if value.lower() == 'true':
return True
else:
return False
def encode_datetime(self, value):
return value.strftime(ISO8601)
def decode_datetime(self, value):
value = self.get_text_value(value)
try:
return datetime.strptime(value, ISO8601)
except:
return None
def encode_reference(self, value):
if isinstance(value, six.string_types):
return value
if value is None:
return ''
else:
val_node = self.manager.doc.createElement("object")
val_node.setAttribute('id', value.id)
val_node.setAttribute('class', '%s.%s' % (value.__class__.__module__, value.__class__.__name__))
return val_node
def decode_reference(self, value):
if not value:
return None
try:
value = value.childNodes[0]
class_name = value.getAttribute("class")
id = value.getAttribute("id")
cls = find_class(class_name)
return cls.get_by_ids(id)
except:
return None
def encode_password(self, value):
if value and len(value) > 0:
return str(value)
else:
return None
def decode_password(self, value):
value = self.get_text_value(value)
return Password(value)
class XMLManager(object):
def __init__(self, cls, db_name, db_user, db_passwd,
db_host, db_port, db_table, ddl_dir, enable_ssl):
self.cls = cls
if not db_name:
db_name = cls.__name__.lower()
self.db_name = db_name
self.db_user = db_user
self.db_passwd = db_passwd
self.db_host = db_host
self.db_port = db_port
self.db_table = db_table
self.ddl_dir = ddl_dir
self.s3 = None
self.converter = XMLConverter(self)
self.impl = getDOMImplementation()
self.doc = self.impl.createDocument(None, 'objects', None)
self.connection = None
self.enable_ssl = enable_ssl
self.auth_header = None
if self.db_user:
base64string = encodebytes('%s:%s' % (self.db_user, self.db_passwd))[:-1]
authheader = "Basic %s" % base64string
self.auth_header = authheader
def _connect(self):
if self.db_host:
if self.enable_ssl:
from httplib import HTTPSConnection as Connection
else:
from httplib import HTTPConnection as Connection
self.connection = Connection(self.db_host, self.db_port)
def _make_request(self, method, url, post_data=None, body=None):
"""
Make a request on this connection
"""
if not self.connection:
self._connect()
try:
self.connection.close()
except:
pass
self.connection.connect()
headers = {}
if self.auth_header:
headers["Authorization"] = self.auth_header
self.connection.request(method, url, body, headers)
resp = self.connection.getresponse()
return resp
def new_doc(self):
return self.impl.createDocument(None, 'objects', None)
def _object_lister(self, cls, doc):
for obj_node in doc.getElementsByTagName('object'):
if not cls:
class_name = obj_node.getAttribute('class')
cls = find_class(class_name)
id = obj_node.getAttribute('id')
obj = cls(id)
for prop_node in obj_node.getElementsByTagName('property'):
prop_name = prop_node.getAttribute('name')
prop = obj.find_property(prop_name)
if prop:
if hasattr(prop, 'item_type'):
value = self.get_list(prop_node, prop.item_type)
else:
value = self.decode_value(prop, prop_node)
value = prop.make_value_from_datastore(value)
setattr(obj, prop.name, value)
yield obj
def reset(self):
self._connect()
def get_doc(self):
return self.doc
def encode_value(self, prop, value):
return self.converter.encode_prop(prop, value)
def decode_value(self, prop, value):
return self.converter.decode_prop(prop, value)
def get_s3_connection(self):
if not self.s3:
self.s3 = boto.connect_s3(self.aws_access_key_id, self.aws_secret_access_key)
return self.s3
def get_list(self, prop_node, item_type):
values = []
try:
items_node = prop_node.getElementsByTagName('items')[0]
except:
return []
for item_node in items_node.getElementsByTagName('item'):
value = self.converter.decode(item_type, item_node)
values.append(value)
return values
def get_object_from_doc(self, cls, id, doc):
obj_node = doc.getElementsByTagName('object')[0]
if not cls:
class_name = obj_node.getAttribute('class')
cls = find_class(class_name)
if not id:
id = obj_node.getAttribute('id')
obj = cls(id)
for prop_node in obj_node.getElementsByTagName('property'):
prop_name = prop_node.getAttribute('name')
prop = obj.find_property(prop_name)
value = self.decode_value(prop, prop_node)
value = prop.make_value_from_datastore(value)
if value is not None:
try:
setattr(obj, prop.name, value)
except:
pass
return obj
def get_props_from_doc(self, cls, id, doc):
"""
Pull out the properties from this document
Returns the class, the properties in a hash, and the id if provided as a tuple
:return: (cls, props, id)
"""
obj_node = doc.getElementsByTagName('object')[0]
if not cls:
class_name = obj_node.getAttribute('class')
cls = find_class(class_name)
if not id:
id = obj_node.getAttribute('id')
props = {}
for prop_node in obj_node.getElementsByTagName('property'):
prop_name = prop_node.getAttribute('name')
prop = cls.find_property(prop_name)
value = self.decode_value(prop, prop_node)
value = prop.make_value_from_datastore(value)
if value is not None:
props[prop.name] = value
return (cls, props, id)
def get_object(self, cls, id):
if not self.connection:
self._connect()
if not self.connection:
raise NotImplementedError("Can't query without a database connection")
url = "/%s/%s" % (self.db_name, id)
resp = self._make_request('GET', url)
if resp.status == 200:
doc = parse(resp)
else:
raise Exception("Error: %s" % resp.status)
return self.get_object_from_doc(cls, id, doc)
def query(self, cls, filters, limit=None, order_by=None):
if not self.connection:
self._connect()
if not self.connection:
raise NotImplementedError("Can't query without a database connection")
from urllib import urlencode
query = str(self._build_query(cls, filters, limit, order_by))
if query:
url = "/%s?%s" % (self.db_name, urlencode({"query": query}))
else:
url = "/%s" % self.db_name
resp = self._make_request('GET', url)
if resp.status == 200:
doc = parse(resp)
else:
raise Exception("Error: %s" % resp.status)
return self._object_lister(cls, doc)
def _build_query(self, cls, filters, limit, order_by):
import types
if len(filters) > 4:
raise Exception('Too many filters, max is 4')
parts = []
properties = cls.properties(hidden=False)
for filter, value in filters:
name, op = filter.strip().split()
found = False
for property in properties:
if property.name == name:
found = True
if types.TypeType(value) == list:
filter_parts = []
for val in value:
val = self.encode_value(property, val)
filter_parts.append("'%s' %s '%s'" % (name, op, val))
parts.append("[%s]" % " OR ".join(filter_parts))
else:
value = self.encode_value(property, value)
parts.append("['%s' %s '%s']" % (name, op, value))
if not found:
raise Exception('%s is not a valid field' % name)
if order_by:
if order_by.startswith("-"):
key = order_by[1:]
type = "desc"
else:
key = order_by
type = "asc"
parts.append("['%s' starts-with ''] sort '%s' %s" % (key, key, type))
return ' intersection '.join(parts)
def query_gql(self, query_string, *args, **kwds):
raise NotImplementedError("GQL queries not supported in XML")
def save_list(self, doc, items, prop_node):
items_node = doc.createElement('items')
prop_node.appendChild(items_node)
for item in items:
item_node = doc.createElement('item')
items_node.appendChild(item_node)
if isinstance(item, Node):
item_node.appendChild(item)
else:
text_node = doc.createTextNode(item)
item_node.appendChild(text_node)
def save_object(self, obj, expected_value=None):
"""
Marshal the object and do a PUT
"""
doc = self.marshal_object(obj)
if obj.id:
url = "/%s/%s" % (self.db_name, obj.id)
else:
url = "/%s" % (self.db_name)
resp = self._make_request("PUT", url, body=doc.toxml())
new_obj = self.get_object_from_doc(obj.__class__, None, parse(resp))
obj.id = new_obj.id
for prop in obj.properties():
try:
propname = prop.name
except AttributeError:
propname = None
if propname:
value = getattr(new_obj, prop.name)
if value:
setattr(obj, prop.name, value)
return obj
def marshal_object(self, obj, doc=None):
if not doc:
doc = self.new_doc()
if not doc:
doc = self.doc
obj_node = doc.createElement('object')
if obj.id:
obj_node.setAttribute('id', obj.id)
obj_node.setAttribute('class', '%s.%s' % (obj.__class__.__module__,
obj.__class__.__name__))
root = doc.documentElement
root.appendChild(obj_node)
for property in obj.properties(hidden=False):
prop_node = doc.createElement('property')
prop_node.setAttribute('name', property.name)
prop_node.setAttribute('type', property.type_name)
value = property.get_value_for_datastore(obj)
if value is not None:
value = self.encode_value(property, value)
if isinstance(value, list):
self.save_list(doc, value, prop_node)
elif isinstance(value, Node):
prop_node.appendChild(value)
else:
text_node = doc.createTextNode(six.text_type(value).encode("ascii", "ignore"))
prop_node.appendChild(text_node)
obj_node.appendChild(prop_node)
return doc
def unmarshal_object(self, fp, cls=None, id=None):
if isinstance(fp, six.string_types):
doc = parseString(fp)
else:
doc = parse(fp)
return self.get_object_from_doc(cls, id, doc)
def unmarshal_props(self, fp, cls=None, id=None):
"""
Same as unmarshalling an object, except it returns
from "get_props_from_doc"
"""
if isinstance(fp, six.string_types):
doc = parseString(fp)
else:
doc = parse(fp)
return self.get_props_from_doc(cls, id, doc)
def delete_object(self, obj):
url = "/%s/%s" % (self.db_name, obj.id)
return self._make_request("DELETE", url)
def set_key_value(self, obj, name, value):
self.domain.put_attributes(obj.id, {name: value}, replace=True)
def delete_key_value(self, obj, name):
self.domain.delete_attributes(obj.id, name)
def get_key_value(self, obj, name):
a = self.domain.get_attributes(obj.id, name)
if name in a:
return a[name]
else:
return None
def get_raw_item(self, obj):
return self.domain.get_item(obj.id)
def set_property(self, prop, obj, name, value):
pass
def get_property(self, prop, obj, name):
pass
def load_object(self, obj):
if not obj._loaded:
obj = obj.get_by_id(obj.id)
obj._loaded = True
return obj
| mit |
rvalyi/geraldo | site/newsite/django_1_0/tests/regressiontests/forms/forms.py | 10 | 83944 | # -*- coding: utf-8 -*-
tests = r"""
>>> from django.forms import *
>>> from django.core.files.uploadedfile import SimpleUploadedFile
>>> import datetime
>>> import time
>>> import re
>>> try:
... from decimal import Decimal
... except ImportError:
... from django.utils._decimal import Decimal
#########
# Forms #
#########
A Form is a collection of Fields. It knows how to validate a set of data and it
knows how to render itself in a couple of default ways (e.g., an HTML table).
You can pass it data in __init__(), as a dictionary.
# Form ########################################################################
>>> class Person(Form):
... first_name = CharField()
... last_name = CharField()
... birthday = DateField()
Pass a dictionary to a Form's __init__().
>>> p = Person({'first_name': u'John', 'last_name': u'Lennon', 'birthday': u'1940-10-9'})
>>> p.is_bound
True
>>> p.errors
{}
>>> p.is_valid()
True
>>> p.errors.as_ul()
u''
>>> p.errors.as_text()
u''
>>> p.cleaned_data["first_name"], p.cleaned_data["last_name"], p.cleaned_data["birthday"]
(u'John', u'Lennon', datetime.date(1940, 10, 9))
>>> print p['first_name']
<input type="text" name="first_name" value="John" id="id_first_name" />
>>> print p['last_name']
<input type="text" name="last_name" value="Lennon" id="id_last_name" />
>>> print p['birthday']
<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />
>>> print p['nonexistentfield']
Traceback (most recent call last):
...
KeyError: "Key 'nonexistentfield' not found in Form"
>>> for boundfield in p:
... print boundfield
<input type="text" name="first_name" value="John" id="id_first_name" />
<input type="text" name="last_name" value="Lennon" id="id_last_name" />
<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />
>>> for boundfield in p:
... print boundfield.label, boundfield.data
First name John
Last name Lennon
Birthday 1940-10-9
>>> print p
<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="Lennon" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>
Empty dictionaries are valid, too.
>>> p = Person({})
>>> p.is_bound
True
>>> p.errors['first_name']
[u'This field is required.']
>>> p.errors['last_name']
[u'This field is required.']
>>> p.errors['birthday']
[u'This field is required.']
>>> p.is_valid()
False
>>> p.cleaned_data
Traceback (most recent call last):
...
AttributeError: 'Person' object has no attribute 'cleaned_data'
>>> print p
<tr><th><label for="id_first_name">First name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="birthday" id="id_birthday" /></td></tr>
>>> print p.as_table()
<tr><th><label for="id_first_name">First name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="birthday" id="id_birthday" /></td></tr>
>>> print p.as_ul()
<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></li>
>>> print p.as_p()
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></p>
If you don't pass any values to the Form's __init__(), or if you pass None,
the Form will be considered unbound and won't do any validation. Form.errors
will be an empty dictionary *but* Form.is_valid() will return False.
>>> p = Person()
>>> p.is_bound
False
>>> p.errors
{}
>>> p.is_valid()
False
>>> p.cleaned_data
Traceback (most recent call last):
...
AttributeError: 'Person' object has no attribute 'cleaned_data'
>>> print p
<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /></td></tr>
>>> print p.as_table()
<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /></td></tr>
>>> print p.as_ul()
<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></li>
>>> print p.as_p()
<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></p>
Unicode values are handled properly.
>>> p = Person({'first_name': u'John', 'last_name': u'\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111', 'birthday': '1940-10-9'})
>>> p.as_table()
u'<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>\n<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></td></tr>\n<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>'
>>> p.as_ul()
u'<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" value="John" id="id_first_name" /></li>\n<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></li>\n<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></li>'
>>> p.as_p()
u'<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" value="John" id="id_first_name" /></p>\n<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></p>\n<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></p>'
>>> p = Person({'last_name': u'Lennon'})
>>> p.errors['first_name']
[u'This field is required.']
>>> p.errors['birthday']
[u'This field is required.']
>>> p.is_valid()
False
>>> p.errors.as_ul()
u'<ul class="errorlist"><li>first_name<ul class="errorlist"><li>This field is required.</li></ul></li><li>birthday<ul class="errorlist"><li>This field is required.</li></ul></li></ul>'
>>> print p.errors.as_text()
* first_name
* This field is required.
* birthday
* This field is required.
>>> p.cleaned_data
Traceback (most recent call last):
...
AttributeError: 'Person' object has no attribute 'cleaned_data'
>>> p['first_name'].errors
[u'This field is required.']
>>> p['first_name'].errors.as_ul()
u'<ul class="errorlist"><li>This field is required.</li></ul>'
>>> p['first_name'].errors.as_text()
u'* This field is required.'
>>> p = Person()
>>> print p['first_name']
<input type="text" name="first_name" id="id_first_name" />
>>> print p['last_name']
<input type="text" name="last_name" id="id_last_name" />
>>> print p['birthday']
<input type="text" name="birthday" id="id_birthday" />
cleaned_data will always *only* contain a key for fields defined in the
Form, even if you pass extra data when you define the Form. In this
example, we pass a bunch of extra fields to the form constructor,
but cleaned_data contains only the form's fields.
>>> data = {'first_name': u'John', 'last_name': u'Lennon', 'birthday': u'1940-10-9', 'extra1': 'hello', 'extra2': 'hello'}
>>> p = Person(data)
>>> p.is_valid()
True
>>> p.cleaned_data['first_name']
u'John'
>>> p.cleaned_data['last_name']
u'Lennon'
>>> p.cleaned_data['birthday']
datetime.date(1940, 10, 9)
cleaned_data will include a key and value for *all* fields defined in the Form,
even if the Form's data didn't include a value for fields that are not
required. In this example, the data dictionary doesn't include a value for the
"nick_name" field, but cleaned_data includes it. For CharFields, it's set to the
empty string.
>>> class OptionalPersonForm(Form):
... first_name = CharField()
... last_name = CharField()
... nick_name = CharField(required=False)
>>> data = {'first_name': u'John', 'last_name': u'Lennon'}
>>> f = OptionalPersonForm(data)
>>> f.is_valid()
True
>>> f.cleaned_data['nick_name']
u''
>>> f.cleaned_data['first_name']
u'John'
>>> f.cleaned_data['last_name']
u'Lennon'
For DateFields, it's set to None.
>>> class OptionalPersonForm(Form):
... first_name = CharField()
... last_name = CharField()
... birth_date = DateField(required=False)
>>> data = {'first_name': u'John', 'last_name': u'Lennon'}
>>> f = OptionalPersonForm(data)
>>> f.is_valid()
True
>>> print f.cleaned_data['birth_date']
None
>>> f.cleaned_data['first_name']
u'John'
>>> f.cleaned_data['last_name']
u'Lennon'
"auto_id" tells the Form to add an "id" attribute to each form element.
If it's a string that contains '%s', Django will use that as a format string
into which the field's name will be inserted. It will also put a <label> around
the human-readable labels for a field.
>>> p = Person(auto_id='%s_id')
>>> print p.as_table()
<tr><th><label for="first_name_id">First name:</label></th><td><input type="text" name="first_name" id="first_name_id" /></td></tr>
<tr><th><label for="last_name_id">Last name:</label></th><td><input type="text" name="last_name" id="last_name_id" /></td></tr>
<tr><th><label for="birthday_id">Birthday:</label></th><td><input type="text" name="birthday" id="birthday_id" /></td></tr>
>>> print p.as_ul()
<li><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" /></li>
<li><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" /></li>
<li><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" /></li>
>>> print p.as_p()
<p><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" /></p>
<p><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" /></p>
<p><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" /></p>
If auto_id is any True value whose str() does not contain '%s', the "id"
attribute will be the name of the field.
>>> p = Person(auto_id=True)
>>> print p.as_ul()
<li><label for="first_name">First name:</label> <input type="text" name="first_name" id="first_name" /></li>
<li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" /></li>
<li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" /></li>
If auto_id is any False value, an "id" attribute won't be output unless it
was manually entered.
>>> p = Person(auto_id=False)
>>> print p.as_ul()
<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
In this example, auto_id is False, but the "id" attribute for the "first_name"
field is given. Also note that field gets a <label>, while the others don't.
>>> class PersonNew(Form):
... first_name = CharField(widget=TextInput(attrs={'id': 'first_name_id'}))
... last_name = CharField()
... birthday = DateField()
>>> p = PersonNew(auto_id=False)
>>> print p.as_ul()
<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
If the "id" attribute is specified in the Form and auto_id is True, the "id"
attribute in the Form gets precedence.
>>> p = PersonNew(auto_id=True)
>>> print p.as_ul()
<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" /></li>
<li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" /></li>
<li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" /></li>
>>> class SignupForm(Form):
... email = EmailField()
... get_spam = BooleanField()
>>> f = SignupForm(auto_id=False)
>>> print f['email']
<input type="text" name="email" />
>>> print f['get_spam']
<input type="checkbox" name="get_spam" />
>>> f = SignupForm({'email': '[email protected]', 'get_spam': True}, auto_id=False)
>>> print f['email']
<input type="text" name="email" value="[email protected]" />
>>> print f['get_spam']
<input checked="checked" type="checkbox" name="get_spam" />
Any Field can have a Widget class passed to its constructor:
>>> class ContactForm(Form):
... subject = CharField()
... message = CharField(widget=Textarea)
>>> f = ContactForm(auto_id=False)
>>> print f['subject']
<input type="text" name="subject" />
>>> print f['message']
<textarea rows="10" cols="40" name="message"></textarea>
as_textarea(), as_text() and as_hidden() are shortcuts for changing the output
widget type:
>>> f['subject'].as_textarea()
u'<textarea rows="10" cols="40" name="subject"></textarea>'
>>> f['message'].as_text()
u'<input type="text" name="message" />'
>>> f['message'].as_hidden()
u'<input type="hidden" name="message" />'
The 'widget' parameter to a Field can also be an instance:
>>> class ContactForm(Form):
... subject = CharField()
... message = CharField(widget=Textarea(attrs={'rows': 80, 'cols': 20}))
>>> f = ContactForm(auto_id=False)
>>> print f['message']
<textarea rows="80" cols="20" name="message"></textarea>
Instance-level attrs are *not* carried over to as_textarea(), as_text() and
as_hidden():
>>> f['message'].as_text()
u'<input type="text" name="message" />'
>>> f = ContactForm({'subject': 'Hello', 'message': 'I love you.'}, auto_id=False)
>>> f['subject'].as_textarea()
u'<textarea rows="10" cols="40" name="subject">Hello</textarea>'
>>> f['message'].as_text()
u'<input type="text" name="message" value="I love you." />'
>>> f['message'].as_hidden()
u'<input type="hidden" name="message" value="I love you." />'
For a form with a <select>, use ChoiceField:
>>> class FrameworkForm(Form):
... name = CharField()
... language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')])
>>> f = FrameworkForm(auto_id=False)
>>> print f['language']
<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>
>>> f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
>>> print f['language']
<select name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>
A subtlety: If one of the choices' value is the empty string and the form is
unbound, then the <option> for the empty-string choice will get selected="selected".
>>> class FrameworkForm(Form):
... name = CharField()
... language = ChoiceField(choices=[('', '------'), ('P', 'Python'), ('J', 'Java')])
>>> f = FrameworkForm(auto_id=False)
>>> print f['language']
<select name="language">
<option value="" selected="selected">------</option>
<option value="P">Python</option>
<option value="J">Java</option>
</select>
You can specify widget attributes in the Widget constructor.
>>> class FrameworkForm(Form):
... name = CharField()
... language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(attrs={'class': 'foo'}))
>>> f = FrameworkForm(auto_id=False)
>>> print f['language']
<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>
>>> f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
>>> print f['language']
<select class="foo" name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>
When passing a custom widget instance to ChoiceField, note that setting
'choices' on the widget is meaningless. The widget will use the choices
defined on the Field, not the ones defined on the Widget.
>>> class FrameworkForm(Form):
... name = CharField()
... language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(choices=[('R', 'Ruby'), ('P', 'Perl')], attrs={'class': 'foo'}))
>>> f = FrameworkForm(auto_id=False)
>>> print f['language']
<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>
>>> f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
>>> print f['language']
<select class="foo" name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>
You can set a ChoiceField's choices after the fact.
>>> class FrameworkForm(Form):
... name = CharField()
... language = ChoiceField()
>>> f = FrameworkForm(auto_id=False)
>>> print f['language']
<select name="language">
</select>
>>> f.fields['language'].choices = [('P', 'Python'), ('J', 'Java')]
>>> print f['language']
<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>
Add widget=RadioSelect to use that widget with a ChoiceField.
>>> class FrameworkForm(Form):
... name = CharField()
... language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=RadioSelect)
>>> f = FrameworkForm(auto_id=False)
>>> print f['language']
<ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul>
>>> print f
<tr><th>Name:</th><td><input type="text" name="name" /></td></tr>
<tr><th>Language:</th><td><ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul></td></tr>
>>> print f.as_ul()
<li>Name: <input type="text" name="name" /></li>
<li>Language: <ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul></li>
Regarding auto_id and <label>, RadioSelect is a special case. Each radio button
gets a distinct ID, formed by appending an underscore plus the button's
zero-based index.
>>> f = FrameworkForm(auto_id='id_%s')
>>> print f['language']
<ul>
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul>
When RadioSelect is used with auto_id, and the whole form is printed using
either as_table() or as_ul(), the label for the RadioSelect will point to the
ID of the *first* radio button.
>>> print f
<tr><th><label for="id_name">Name:</label></th><td><input type="text" name="name" id="id_name" /></td></tr>
<tr><th><label for="id_language_0">Language:</label></th><td><ul>
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></td></tr>
>>> print f.as_ul()
<li><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></li>
<li><label for="id_language_0">Language:</label> <ul>
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></li>
>>> print f.as_p()
<p><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></p>
<p><label for="id_language_0">Language:</label> <ul>
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></p>
MultipleChoiceField is a special case, as its data is required to be a list:
>>> class SongForm(Form):
... name = CharField()
... composers = MultipleChoiceField()
>>> f = SongForm(auto_id=False)
>>> print f['composers']
<select multiple="multiple" name="composers">
</select>
>>> class SongForm(Form):
... name = CharField()
... composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')])
>>> f = SongForm(auto_id=False)
>>> print f['composers']
<select multiple="multiple" name="composers">
<option value="J">John Lennon</option>
<option value="P">Paul McCartney</option>
</select>
>>> f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False)
>>> print f['name']
<input type="text" name="name" value="Yesterday" />
>>> print f['composers']
<select multiple="multiple" name="composers">
<option value="J">John Lennon</option>
<option value="P" selected="selected">Paul McCartney</option>
</select>
MultipleChoiceField rendered as_hidden() is a special case. Because it can
have multiple values, its as_hidden() renders multiple <input type="hidden">
tags.
>>> f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False)
>>> print f['composers'].as_hidden()
<input type="hidden" name="composers" value="P" />
>>> f = SongForm({'name': 'From Me To You', 'composers': ['P', 'J']}, auto_id=False)
>>> print f['composers'].as_hidden()
<input type="hidden" name="composers" value="P" />
<input type="hidden" name="composers" value="J" />
MultipleChoiceField can also be used with the CheckboxSelectMultiple widget.
>>> class SongForm(Form):
... name = CharField()
... composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
>>> f = SongForm(auto_id=False)
>>> print f['composers']
<ul>
<li><label><input type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>
>>> f = SongForm({'composers': ['J']}, auto_id=False)
>>> print f['composers']
<ul>
<li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>
>>> f = SongForm({'composers': ['J', 'P']}, auto_id=False)
>>> print f['composers']
<ul>
<li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input checked="checked" type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>
Regarding auto_id, CheckboxSelectMultiple is a special case. Each checkbox
gets a distinct ID, formed by appending an underscore plus the checkbox's
zero-based index.
>>> f = SongForm(auto_id='%s_id')
>>> print f['composers']
<ul>
<li><label for="composers_id_0"><input type="checkbox" name="composers" value="J" id="composers_id_0" /> John Lennon</label></li>
<li><label for="composers_id_1"><input type="checkbox" name="composers" value="P" id="composers_id_1" /> Paul McCartney</label></li>
</ul>
Data for a MultipleChoiceField should be a list. QueryDict and MultiValueDict
conveniently work with this.
>>> data = {'name': 'Yesterday', 'composers': ['J', 'P']}
>>> f = SongForm(data)
>>> f.errors
{}
>>> from django.http import QueryDict
>>> data = QueryDict('name=Yesterday&composers=J&composers=P')
>>> f = SongForm(data)
>>> f.errors
{}
>>> from django.utils.datastructures import MultiValueDict
>>> data = MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P']))
>>> f = SongForm(data)
>>> f.errors
{}
The MultipleHiddenInput widget renders multiple values as hidden fields.
>>> class SongFormHidden(Form):
... name = CharField()
... composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=MultipleHiddenInput)
>>> f = SongFormHidden(MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P'])), auto_id=False)
>>> print f.as_ul()
<li>Name: <input type="text" name="name" value="Yesterday" /><input type="hidden" name="composers" value="J" />
<input type="hidden" name="composers" value="P" /></li>
When using CheckboxSelectMultiple, the framework expects a list of input and
returns a list of input.
>>> f = SongForm({'name': 'Yesterday'}, auto_id=False)
>>> f.errors['composers']
[u'This field is required.']
>>> f = SongForm({'name': 'Yesterday', 'composers': ['J']}, auto_id=False)
>>> f.errors
{}
>>> f.cleaned_data['composers']
[u'J']
>>> f.cleaned_data['name']
u'Yesterday'
>>> f = SongForm({'name': 'Yesterday', 'composers': ['J', 'P']}, auto_id=False)
>>> f.errors
{}
>>> f.cleaned_data['composers']
[u'J', u'P']
>>> f.cleaned_data['name']
u'Yesterday'
Validation errors are HTML-escaped when output as HTML.
>>> class EscapingForm(Form):
... special_name = CharField()
... def clean_special_name(self):
... raise ValidationError("Something's wrong with '%s'" % self.cleaned_data['special_name'])
>>> f = EscapingForm({'special_name': "Nothing to escape"}, auto_id=False)
>>> print f
<tr><th>Special name:</th><td><ul class="errorlist"><li>Something's wrong with 'Nothing to escape'</li></ul><input type="text" name="special_name" value="Nothing to escape" /></td></tr>
>>> f = EscapingForm({'special_name': "Should escape < & > and <script>alert('xss')</script>"}, auto_id=False)
>>> print f
<tr><th>Special name:</th><td><ul class="errorlist"><li>Something's wrong with 'Should escape < & > and <script>alert('xss')</script>'</li></ul><input type="text" name="special_name" value="Should escape < & > and <script>alert('xss')</script>" /></td></tr>
""" + \
r""" # [This concatenation is to keep the string below the jython's 32K limit].
# Validating multiple fields in relation to another ###########################
There are a couple of ways to do multiple-field validation. If you want the
validation message to be associated with a particular field, implement the
clean_XXX() method on the Form, where XXX is the field name. As in
Field.clean(), the clean_XXX() method should return the cleaned value. In the
clean_XXX() method, you have access to self.cleaned_data, which is a dictionary
of all the data that has been cleaned *so far*, in order by the fields,
including the current field (e.g., the field XXX if you're in clean_XXX()).
>>> class UserRegistration(Form):
... username = CharField(max_length=10)
... password1 = CharField(widget=PasswordInput)
... password2 = CharField(widget=PasswordInput)
... def clean_password2(self):
... if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
... raise ValidationError(u'Please make sure your passwords match.')
... return self.cleaned_data['password2']
>>> f = UserRegistration(auto_id=False)
>>> f.errors
{}
>>> f = UserRegistration({}, auto_id=False)
>>> f.errors['username']
[u'This field is required.']
>>> f.errors['password1']
[u'This field is required.']
>>> f.errors['password2']
[u'This field is required.']
>>> f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
>>> f.errors['password2']
[u'Please make sure your passwords match.']
>>> f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False)
>>> f.errors
{}
>>> f.cleaned_data['username']
u'adrian'
>>> f.cleaned_data['password1']
u'foo'
>>> f.cleaned_data['password2']
u'foo'
Another way of doing multiple-field validation is by implementing the
Form's clean() method. If you do this, any ValidationError raised by that
method will not be associated with a particular field; it will have a
special-case association with the field named '__all__'.
Note that in Form.clean(), you have access to self.cleaned_data, a dictionary of
all the fields/values that have *not* raised a ValidationError. Also note
Form.clean() is required to return a dictionary of all clean data.
>>> class UserRegistration(Form):
... username = CharField(max_length=10)
... password1 = CharField(widget=PasswordInput)
... password2 = CharField(widget=PasswordInput)
... def clean(self):
... if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
... raise ValidationError(u'Please make sure your passwords match.')
... return self.cleaned_data
>>> f = UserRegistration(auto_id=False)
>>> f.errors
{}
>>> f = UserRegistration({}, auto_id=False)
>>> print f.as_table()
<tr><th>Username:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="password" name="password2" /></td></tr>
>>> f.errors['username']
[u'This field is required.']
>>> f.errors['password1']
[u'This field is required.']
>>> f.errors['password2']
[u'This field is required.']
>>> f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
>>> f.errors['__all__']
[u'Please make sure your passwords match.']
>>> print f.as_table()
<tr><td colspan="2"><ul class="errorlist"><li>Please make sure your passwords match.</li></ul></td></tr>
<tr><th>Username:</th><td><input type="text" name="username" value="adrian" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" value="foo" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" value="bar" /></td></tr>
>>> print f.as_ul()
<li><ul class="errorlist"><li>Please make sure your passwords match.</li></ul></li>
<li>Username: <input type="text" name="username" value="adrian" maxlength="10" /></li>
<li>Password1: <input type="password" name="password1" value="foo" /></li>
<li>Password2: <input type="password" name="password2" value="bar" /></li>
>>> f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False)
>>> f.errors
{}
>>> f.cleaned_data['username']
u'adrian'
>>> f.cleaned_data['password1']
u'foo'
>>> f.cleaned_data['password2']
u'foo'
# Dynamic construction ########################################################
It's possible to construct a Form dynamically by adding to the self.fields
dictionary in __init__(). Don't forget to call Form.__init__() within the
subclass' __init__().
>>> class Person(Form):
... first_name = CharField()
... last_name = CharField()
... def __init__(self, *args, **kwargs):
... super(Person, self).__init__(*args, **kwargs)
... self.fields['birthday'] = DateField()
>>> p = Person(auto_id=False)
>>> print p
<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" /></td></tr>
Instances of a dynamic Form do not persist fields from one Form instance to
the next.
>>> class MyForm(Form):
... def __init__(self, data=None, auto_id=False, field_list=[]):
... Form.__init__(self, data, auto_id=auto_id)
... for field in field_list:
... self.fields[field[0]] = field[1]
>>> field_list = [('field1', CharField()), ('field2', CharField())]
>>> my_form = MyForm(field_list=field_list)
>>> print my_form
<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>
>>> field_list = [('field3', CharField()), ('field4', CharField())]
>>> my_form = MyForm(field_list=field_list)
>>> print my_form
<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>
>>> class MyForm(Form):
... default_field_1 = CharField()
... default_field_2 = CharField()
... def __init__(self, data=None, auto_id=False, field_list=[]):
... Form.__init__(self, data, auto_id=auto_id)
... for field in field_list:
... self.fields[field[0]] = field[1]
>>> field_list = [('field1', CharField()), ('field2', CharField())]
>>> my_form = MyForm(field_list=field_list)
>>> print my_form
<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr>
<tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr>
<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>
>>> field_list = [('field3', CharField()), ('field4', CharField())]
>>> my_form = MyForm(field_list=field_list)
>>> print my_form
<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr>
<tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr>
<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>
Similarly, changes to field attributes do not persist from one Form instance
to the next.
>>> class Person(Form):
... first_name = CharField(required=False)
... last_name = CharField(required=False)
... def __init__(self, names_required=False, *args, **kwargs):
... super(Person, self).__init__(*args, **kwargs)
... if names_required:
... self.fields['first_name'].required = True
... self.fields['first_name'].widget.attrs['class'] = 'required'
... self.fields['last_name'].required = True
... self.fields['last_name'].widget.attrs['class'] = 'required'
>>> f = Person(names_required=False)
>>> f['first_name'].field.required, f['last_name'].field.required
(False, False)
>>> f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs
({}, {})
>>> f = Person(names_required=True)
>>> f['first_name'].field.required, f['last_name'].field.required
(True, True)
>>> f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs
({'class': 'required'}, {'class': 'required'})
>>> f = Person(names_required=False)
>>> f['first_name'].field.required, f['last_name'].field.required
(False, False)
>>> f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs
({}, {})
>>> class Person(Form):
... first_name = CharField(max_length=30)
... last_name = CharField(max_length=30)
... def __init__(self, name_max_length=None, *args, **kwargs):
... super(Person, self).__init__(*args, **kwargs)
... if name_max_length:
... self.fields['first_name'].max_length = name_max_length
... self.fields['last_name'].max_length = name_max_length
>>> f = Person(name_max_length=None)
>>> f['first_name'].field.max_length, f['last_name'].field.max_length
(30, 30)
>>> f = Person(name_max_length=20)
>>> f['first_name'].field.max_length, f['last_name'].field.max_length
(20, 20)
>>> f = Person(name_max_length=None)
>>> f['first_name'].field.max_length, f['last_name'].field.max_length
(30, 30)
HiddenInput widgets are displayed differently in the as_table(), as_ul()
and as_p() output of a Form -- their verbose names are not displayed, and a
separate row is not displayed. They're displayed in the last row of the
form, directly after that row's form element.
>>> class Person(Form):
... first_name = CharField()
... last_name = CharField()
... hidden_text = CharField(widget=HiddenInput)
... birthday = DateField()
>>> p = Person(auto_id=False)
>>> print p
<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></td></tr>
>>> print p.as_ul()
<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></li>
>>> print p.as_p()
<p>First name: <input type="text" name="first_name" /></p>
<p>Last name: <input type="text" name="last_name" /></p>
<p>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></p>
With auto_id set, a HiddenInput still gets an ID, but it doesn't get a label.
>>> p = Person(auto_id='id_%s')
>>> print p
<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></td></tr>
>>> print p.as_ul()
<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></li>
>>> print p.as_p()
<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></p>
If a field with a HiddenInput has errors, the as_table() and as_ul() output
will include the error message(s) with the text "(Hidden field [fieldname]) "
prepended. This message is displayed at the top of the output, regardless of
its field's order in the form.
>>> p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'}, auto_id=False)
>>> print p
<tr><td colspan="2"><ul class="errorlist"><li>(Hidden field hidden_text) This field is required.</li></ul></td></tr>
<tr><th>First name:</th><td><input type="text" name="first_name" value="John" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" value="Lennon" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></td></tr>
>>> print p.as_ul()
<li><ul class="errorlist"><li>(Hidden field hidden_text) This field is required.</li></ul></li>
<li>First name: <input type="text" name="first_name" value="John" /></li>
<li>Last name: <input type="text" name="last_name" value="Lennon" /></li>
<li>Birthday: <input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></li>
>>> print p.as_p()
<ul class="errorlist"><li>(Hidden field hidden_text) This field is required.</li></ul>
<p>First name: <input type="text" name="first_name" value="John" /></p>
<p>Last name: <input type="text" name="last_name" value="Lennon" /></p>
<p>Birthday: <input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></p>
A corner case: It's possible for a form to have only HiddenInputs.
>>> class TestForm(Form):
... foo = CharField(widget=HiddenInput)
... bar = CharField(widget=HiddenInput)
>>> p = TestForm(auto_id=False)
>>> print p.as_table()
<input type="hidden" name="foo" /><input type="hidden" name="bar" />
>>> print p.as_ul()
<input type="hidden" name="foo" /><input type="hidden" name="bar" />
>>> print p.as_p()
<input type="hidden" name="foo" /><input type="hidden" name="bar" />
A Form's fields are displayed in the same order in which they were defined.
>>> class TestForm(Form):
... field1 = CharField()
... field2 = CharField()
... field3 = CharField()
... field4 = CharField()
... field5 = CharField()
... field6 = CharField()
... field7 = CharField()
... field8 = CharField()
... field9 = CharField()
... field10 = CharField()
... field11 = CharField()
... field12 = CharField()
... field13 = CharField()
... field14 = CharField()
>>> p = TestForm(auto_id=False)
>>> print p
<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>
<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>
<tr><th>Field5:</th><td><input type="text" name="field5" /></td></tr>
<tr><th>Field6:</th><td><input type="text" name="field6" /></td></tr>
<tr><th>Field7:</th><td><input type="text" name="field7" /></td></tr>
<tr><th>Field8:</th><td><input type="text" name="field8" /></td></tr>
<tr><th>Field9:</th><td><input type="text" name="field9" /></td></tr>
<tr><th>Field10:</th><td><input type="text" name="field10" /></td></tr>
<tr><th>Field11:</th><td><input type="text" name="field11" /></td></tr>
<tr><th>Field12:</th><td><input type="text" name="field12" /></td></tr>
<tr><th>Field13:</th><td><input type="text" name="field13" /></td></tr>
<tr><th>Field14:</th><td><input type="text" name="field14" /></td></tr>
Some Field classes have an effect on the HTML attributes of their associated
Widget. If you set max_length in a CharField and its associated widget is
either a TextInput or PasswordInput, then the widget's rendered HTML will
include the "maxlength" attribute.
>>> class UserRegistration(Form):
... username = CharField(max_length=10) # uses TextInput by default
... password = CharField(max_length=10, widget=PasswordInput)
... realname = CharField(max_length=10, widget=TextInput) # redundantly define widget, just to test
... address = CharField() # no max_length defined here
>>> p = UserRegistration(auto_id=False)
>>> print p.as_ul()
<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" maxlength="10" /></li>
<li>Realname: <input type="text" name="realname" maxlength="10" /></li>
<li>Address: <input type="text" name="address" /></li>
If you specify a custom "attrs" that includes the "maxlength" attribute,
the Field's max_length attribute will override whatever "maxlength" you specify
in "attrs".
>>> class UserRegistration(Form):
... username = CharField(max_length=10, widget=TextInput(attrs={'maxlength': 20}))
... password = CharField(max_length=10, widget=PasswordInput)
>>> p = UserRegistration(auto_id=False)
>>> print p.as_ul()
<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" maxlength="10" /></li>
# Specifying labels ###########################################################
You can specify the label for a field by using the 'label' argument to a Field
class. If you don't specify 'label', Django will use the field name with
underscores converted to spaces, and the initial letter capitalized.
>>> class UserRegistration(Form):
... username = CharField(max_length=10, label='Your username')
... password1 = CharField(widget=PasswordInput)
... password2 = CharField(widget=PasswordInput, label='Password (again)')
>>> p = UserRegistration(auto_id=False)
>>> print p.as_ul()
<li>Your username: <input type="text" name="username" maxlength="10" /></li>
<li>Password1: <input type="password" name="password1" /></li>
<li>Password (again): <input type="password" name="password2" /></li>
Labels for as_* methods will only end in a colon if they don't end in other
punctuation already.
>>> class Questions(Form):
... q1 = CharField(label='The first question')
... q2 = CharField(label='What is your name?')
... q3 = CharField(label='The answer to life is:')
... q4 = CharField(label='Answer this question!')
... q5 = CharField(label='The last question. Period.')
>>> print Questions(auto_id=False).as_p()
<p>The first question: <input type="text" name="q1" /></p>
<p>What is your name? <input type="text" name="q2" /></p>
<p>The answer to life is: <input type="text" name="q3" /></p>
<p>Answer this question! <input type="text" name="q4" /></p>
<p>The last question. Period. <input type="text" name="q5" /></p>
>>> print Questions().as_p()
<p><label for="id_q1">The first question:</label> <input type="text" name="q1" id="id_q1" /></p>
<p><label for="id_q2">What is your name?</label> <input type="text" name="q2" id="id_q2" /></p>
<p><label for="id_q3">The answer to life is:</label> <input type="text" name="q3" id="id_q3" /></p>
<p><label for="id_q4">Answer this question!</label> <input type="text" name="q4" id="id_q4" /></p>
<p><label for="id_q5">The last question. Period.</label> <input type="text" name="q5" id="id_q5" /></p>
A label can be a Unicode object or a bytestring with special characters.
>>> class UserRegistration(Form):
... username = CharField(max_length=10, label='ŠĐĆŽćžšđ')
... password = CharField(widget=PasswordInput, label=u'\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
>>> p = UserRegistration(auto_id=False)
>>> p.as_ul()
u'<li>\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111: <input type="text" name="username" maxlength="10" /></li>\n<li>\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111: <input type="password" name="password" /></li>'
If a label is set to the empty string for a field, that field won't get a label.
>>> class UserRegistration(Form):
... username = CharField(max_length=10, label='')
... password = CharField(widget=PasswordInput)
>>> p = UserRegistration(auto_id=False)
>>> print p.as_ul()
<li> <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
>>> p = UserRegistration(auto_id='id_%s')
>>> print p.as_ul()
<li> <input id="id_username" type="text" name="username" maxlength="10" /></li>
<li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>
If label is None, Django will auto-create the label from the field name. This
is default behavior.
>>> class UserRegistration(Form):
... username = CharField(max_length=10, label=None)
... password = CharField(widget=PasswordInput)
>>> p = UserRegistration(auto_id=False)
>>> print p.as_ul()
<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
>>> p = UserRegistration(auto_id='id_%s')
>>> print p.as_ul()
<li><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></li>
<li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>
# Label Suffix ################################################################
You can specify the 'label_suffix' argument to a Form class to modify the
punctuation symbol used at the end of a label. By default, the colon (:) is
used, and is only appended to the label if the label doesn't already end with a
punctuation symbol: ., !, ? or :. If you specify a different suffix, it will
be appended regardless of the last character of the label.
>>> class FavoriteForm(Form):
... color = CharField(label='Favorite color?')
... animal = CharField(label='Favorite animal')
...
>>> f = FavoriteForm(auto_id=False)
>>> print f.as_ul()
<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal: <input type="text" name="animal" /></li>
>>> f = FavoriteForm(auto_id=False, label_suffix='?')
>>> print f.as_ul()
<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal? <input type="text" name="animal" /></li>
>>> f = FavoriteForm(auto_id=False, label_suffix='')
>>> print f.as_ul()
<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal <input type="text" name="animal" /></li>
>>> f = FavoriteForm(auto_id=False, label_suffix=u'\u2192')
>>> f.as_ul()
u'<li>Favorite color? <input type="text" name="color" /></li>\n<li>Favorite animal\u2192 <input type="text" name="animal" /></li>'
""" + \
r""" # [This concatenation is to keep the string below the jython's 32K limit].
# Initial data ################################################################
You can specify initial data for a field by using the 'initial' argument to a
Field class. This initial data is displayed when a Form is rendered with *no*
data. It is not displayed when a Form is rendered with any data (including an
empty dictionary). Also, the initial value is *not* used if data for a
particular required field isn't provided.
>>> class UserRegistration(Form):
... username = CharField(max_length=10, initial='django')
... password = CharField(widget=PasswordInput)
Here, we're not submitting any data, so the initial value will be displayed.
>>> p = UserRegistration(auto_id=False)
>>> print p.as_ul()
<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
Here, we're submitting data, so the initial value will *not* be displayed.
>>> p = UserRegistration({}, auto_id=False)
>>> print p.as_ul()
<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
>>> p = UserRegistration({'username': u''}, auto_id=False)
>>> print p.as_ul()
<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
>>> p = UserRegistration({'username': u'foo'}, auto_id=False)
>>> print p.as_ul()
<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
An 'initial' value is *not* used as a fallback if data is not provided. In this
example, we don't provide a value for 'username', and the form raises a
validation error rather than using the initial value for 'username'.
>>> p = UserRegistration({'password': 'secret'})
>>> p.errors['username']
[u'This field is required.']
>>> p.is_valid()
False
# Dynamic initial data ########################################################
The previous technique dealt with "hard-coded" initial data, but it's also
possible to specify initial data after you've already created the Form class
(i.e., at runtime). Use the 'initial' parameter to the Form constructor. This
should be a dictionary containing initial values for one or more fields in the
form, keyed by field name.
>>> class UserRegistration(Form):
... username = CharField(max_length=10)
... password = CharField(widget=PasswordInput)
Here, we're not submitting any data, so the initial value will be displayed.
>>> p = UserRegistration(initial={'username': 'django'}, auto_id=False)
>>> print p.as_ul()
<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
>>> p = UserRegistration(initial={'username': 'stephane'}, auto_id=False)
>>> print p.as_ul()
<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
The 'initial' parameter is meaningless if you pass data.
>>> p = UserRegistration({}, initial={'username': 'django'}, auto_id=False)
>>> print p.as_ul()
<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
>>> p = UserRegistration({'username': u''}, initial={'username': 'django'}, auto_id=False)
>>> print p.as_ul()
<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
>>> p = UserRegistration({'username': u'foo'}, initial={'username': 'django'}, auto_id=False)
>>> print p.as_ul()
<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
A dynamic 'initial' value is *not* used as a fallback if data is not provided.
In this example, we don't provide a value for 'username', and the form raises a
validation error rather than using the initial value for 'username'.
>>> p = UserRegistration({'password': 'secret'}, initial={'username': 'django'})
>>> p.errors['username']
[u'This field is required.']
>>> p.is_valid()
False
If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(),
then the latter will get precedence.
>>> class UserRegistration(Form):
... username = CharField(max_length=10, initial='django')
... password = CharField(widget=PasswordInput)
>>> p = UserRegistration(initial={'username': 'babik'}, auto_id=False)
>>> print p.as_ul()
<li>Username: <input type="text" name="username" value="babik" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
# Callable initial data ########################################################
The previous technique dealt with raw values as initial data, but it's also
possible to specify callable data.
>>> class UserRegistration(Form):
... username = CharField(max_length=10)
... password = CharField(widget=PasswordInput)
We need to define functions that get called later.
>>> def initial_django():
... return 'django'
>>> def initial_stephane():
... return 'stephane'
Here, we're not submitting any data, so the initial value will be displayed.
>>> p = UserRegistration(initial={'username': initial_django}, auto_id=False)
>>> print p.as_ul()
<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
The 'initial' parameter is meaningless if you pass data.
>>> p = UserRegistration({}, initial={'username': initial_django}, auto_id=False)
>>> print p.as_ul()
<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
>>> p = UserRegistration({'username': u''}, initial={'username': initial_django}, auto_id=False)
>>> print p.as_ul()
<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
>>> p = UserRegistration({'username': u'foo'}, initial={'username': initial_django}, auto_id=False)
>>> print p.as_ul()
<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
A callable 'initial' value is *not* used as a fallback if data is not provided.
In this example, we don't provide a value for 'username', and the form raises a
validation error rather than using the initial value for 'username'.
>>> p = UserRegistration({'password': 'secret'}, initial={'username': initial_django})
>>> p.errors['username']
[u'This field is required.']
>>> p.is_valid()
False
If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(),
then the latter will get precedence.
>>> class UserRegistration(Form):
... username = CharField(max_length=10, initial=initial_django)
... password = CharField(widget=PasswordInput)
>>> p = UserRegistration(auto_id=False)
>>> print p.as_ul()
<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
>>> p = UserRegistration(initial={'username': initial_stephane}, auto_id=False)
>>> print p.as_ul()
<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
# Help text ###################################################################
You can specify descriptive text for a field by using the 'help_text' argument
to a Field class. This help text is displayed when a Form is rendered.
>>> class UserRegistration(Form):
... username = CharField(max_length=10, help_text='e.g., [email protected]')
... password = CharField(widget=PasswordInput, help_text='Choose wisely.')
>>> p = UserRegistration(auto_id=False)
>>> print p.as_ul()
<li>Username: <input type="text" name="username" maxlength="10" /> e.g., [email protected]</li>
<li>Password: <input type="password" name="password" /> Choose wisely.</li>
>>> print p.as_p()
<p>Username: <input type="text" name="username" maxlength="10" /> e.g., [email protected]</p>
<p>Password: <input type="password" name="password" /> Choose wisely.</p>
>>> print p.as_table()
<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /><br />e.g., [email protected]</td></tr>
<tr><th>Password:</th><td><input type="password" name="password" /><br />Choose wisely.</td></tr>
The help text is displayed whether or not data is provided for the form.
>>> p = UserRegistration({'username': u'foo'}, auto_id=False)
>>> print p.as_ul()
<li>Username: <input type="text" name="username" value="foo" maxlength="10" /> e.g., [email protected]</li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /> Choose wisely.</li>
help_text is not displayed for hidden fields. It can be used for documentation
purposes, though.
>>> class UserRegistration(Form):
... username = CharField(max_length=10, help_text='e.g., [email protected]')
... password = CharField(widget=PasswordInput)
... next = CharField(widget=HiddenInput, initial='/', help_text='Redirect destination')
>>> p = UserRegistration(auto_id=False)
>>> print p.as_ul()
<li>Username: <input type="text" name="username" maxlength="10" /> e.g., [email protected]</li>
<li>Password: <input type="password" name="password" /><input type="hidden" name="next" value="/" /></li>
Help text can include arbitrary Unicode characters.
>>> class UserRegistration(Form):
... username = CharField(max_length=10, help_text='ŠĐĆŽćžšđ')
>>> p = UserRegistration(auto_id=False)
>>> p.as_ul()
u'<li>Username: <input type="text" name="username" maxlength="10" /> \u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111</li>'
# Subclassing forms ###########################################################
You can subclass a Form to add fields. The resulting form subclass will have
all of the fields of the parent Form, plus whichever fields you define in the
subclass.
>>> class Person(Form):
... first_name = CharField()
... last_name = CharField()
... birthday = DateField()
>>> class Musician(Person):
... instrument = CharField()
>>> p = Person(auto_id=False)
>>> print p.as_ul()
<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
>>> m = Musician(auto_id=False)
>>> print m.as_ul()
<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
<li>Instrument: <input type="text" name="instrument" /></li>
Yes, you can subclass multiple forms. The fields are added in the order in
which the parent classes are listed.
>>> class Person(Form):
... first_name = CharField()
... last_name = CharField()
... birthday = DateField()
>>> class Instrument(Form):
... instrument = CharField()
>>> class Beatle(Person, Instrument):
... haircut_type = CharField()
>>> b = Beatle(auto_id=False)
>>> print b.as_ul()
<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
<li>Instrument: <input type="text" name="instrument" /></li>
<li>Haircut type: <input type="text" name="haircut_type" /></li>
# Forms with prefixes #########################################################
Sometimes it's necessary to have multiple forms display on the same HTML page,
or multiple copies of the same form. We can accomplish this with form prefixes.
Pass the keyword argument 'prefix' to the Form constructor to use this feature.
This value will be prepended to each HTML form field name. One way to think
about this is "namespaces for HTML forms". Notice that in the data argument,
each field's key has the prefix, in this case 'person1', prepended to the
actual field name.
>>> class Person(Form):
... first_name = CharField()
... last_name = CharField()
... birthday = DateField()
>>> data = {
... 'person1-first_name': u'John',
... 'person1-last_name': u'Lennon',
... 'person1-birthday': u'1940-10-9'
... }
>>> p = Person(data, prefix='person1')
>>> print p.as_ul()
<li><label for="id_person1-first_name">First name:</label> <input type="text" name="person1-first_name" value="John" id="id_person1-first_name" /></li>
<li><label for="id_person1-last_name">Last name:</label> <input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" /></li>
<li><label for="id_person1-birthday">Birthday:</label> <input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" /></li>
>>> print p['first_name']
<input type="text" name="person1-first_name" value="John" id="id_person1-first_name" />
>>> print p['last_name']
<input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" />
>>> print p['birthday']
<input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" />
>>> p.errors
{}
>>> p.is_valid()
True
>>> p.cleaned_data['first_name']
u'John'
>>> p.cleaned_data['last_name']
u'Lennon'
>>> p.cleaned_data['birthday']
datetime.date(1940, 10, 9)
Let's try submitting some bad data to make sure form.errors and field.errors
work as expected.
>>> data = {
... 'person1-first_name': u'',
... 'person1-last_name': u'',
... 'person1-birthday': u''
... }
>>> p = Person(data, prefix='person1')
>>> p.errors['first_name']
[u'This field is required.']
>>> p.errors['last_name']
[u'This field is required.']
>>> p.errors['birthday']
[u'This field is required.']
>>> p['first_name'].errors
[u'This field is required.']
>>> p['person1-first_name'].errors
Traceback (most recent call last):
...
KeyError: "Key 'person1-first_name' not found in Form"
In this example, the data doesn't have a prefix, but the form requires it, so
the form doesn't "see" the fields.
>>> data = {
... 'first_name': u'John',
... 'last_name': u'Lennon',
... 'birthday': u'1940-10-9'
... }
>>> p = Person(data, prefix='person1')
>>> p.errors['first_name']
[u'This field is required.']
>>> p.errors['last_name']
[u'This field is required.']
>>> p.errors['birthday']
[u'This field is required.']
With prefixes, a single data dictionary can hold data for multiple instances
of the same form.
>>> data = {
... 'person1-first_name': u'John',
... 'person1-last_name': u'Lennon',
... 'person1-birthday': u'1940-10-9',
... 'person2-first_name': u'Jim',
... 'person2-last_name': u'Morrison',
... 'person2-birthday': u'1943-12-8'
... }
>>> p1 = Person(data, prefix='person1')
>>> p1.is_valid()
True
>>> p1.cleaned_data['first_name']
u'John'
>>> p1.cleaned_data['last_name']
u'Lennon'
>>> p1.cleaned_data['birthday']
datetime.date(1940, 10, 9)
>>> p2 = Person(data, prefix='person2')
>>> p2.is_valid()
True
>>> p2.cleaned_data['first_name']
u'Jim'
>>> p2.cleaned_data['last_name']
u'Morrison'
>>> p2.cleaned_data['birthday']
datetime.date(1943, 12, 8)
By default, forms append a hyphen between the prefix and the field name, but a
form can alter that behavior by implementing the add_prefix() method. This
method takes a field name and returns the prefixed field, according to
self.prefix.
>>> class Person(Form):
... first_name = CharField()
... last_name = CharField()
... birthday = DateField()
... def add_prefix(self, field_name):
... return self.prefix and '%s-prefix-%s' % (self.prefix, field_name) or field_name
>>> p = Person(prefix='foo')
>>> print p.as_ul()
<li><label for="id_foo-prefix-first_name">First name:</label> <input type="text" name="foo-prefix-first_name" id="id_foo-prefix-first_name" /></li>
<li><label for="id_foo-prefix-last_name">Last name:</label> <input type="text" name="foo-prefix-last_name" id="id_foo-prefix-last_name" /></li>
<li><label for="id_foo-prefix-birthday">Birthday:</label> <input type="text" name="foo-prefix-birthday" id="id_foo-prefix-birthday" /></li>
>>> data = {
... 'foo-prefix-first_name': u'John',
... 'foo-prefix-last_name': u'Lennon',
... 'foo-prefix-birthday': u'1940-10-9'
... }
>>> p = Person(data, prefix='foo')
>>> p.is_valid()
True
>>> p.cleaned_data['first_name']
u'John'
>>> p.cleaned_data['last_name']
u'Lennon'
>>> p.cleaned_data['birthday']
datetime.date(1940, 10, 9)
# Forms with NullBooleanFields ################################################
NullBooleanField is a bit of a special case because its presentation (widget)
is different than its data. This is handled transparently, though.
>>> class Person(Form):
... name = CharField()
... is_cool = NullBooleanField()
>>> p = Person({'name': u'Joe'}, auto_id=False)
>>> print p['is_cool']
<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>
>>> p = Person({'name': u'Joe', 'is_cool': u'1'}, auto_id=False)
>>> print p['is_cool']
<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>
>>> p = Person({'name': u'Joe', 'is_cool': u'2'}, auto_id=False)
>>> print p['is_cool']
<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>
>>> p = Person({'name': u'Joe', 'is_cool': u'3'}, auto_id=False)
>>> print p['is_cool']
<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>
>>> p = Person({'name': u'Joe', 'is_cool': True}, auto_id=False)
>>> print p['is_cool']
<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>
>>> p = Person({'name': u'Joe', 'is_cool': False}, auto_id=False)
>>> print p['is_cool']
<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>
# Forms with FileFields ################################################
FileFields are a special case because they take their data from the request.FILES,
not request.POST.
>>> class FileForm(Form):
... file1 = FileField()
>>> f = FileForm(auto_id=False)
>>> print f
<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>
>>> f = FileForm(data={}, files={}, auto_id=False)
>>> print f
<tr><th>File1:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="file" name="file1" /></td></tr>
>>> f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', '')}, auto_id=False)
>>> print f
<tr><th>File1:</th><td><ul class="errorlist"><li>The submitted file is empty.</li></ul><input type="file" name="file1" /></td></tr>
>>> f = FileForm(data={}, files={'file1': 'something that is not a file'}, auto_id=False)
>>> print f
<tr><th>File1:</th><td><ul class="errorlist"><li>No file was submitted. Check the encoding type on the form.</li></ul><input type="file" name="file1" /></td></tr>
>>> f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', 'some content')}, auto_id=False)
>>> print f
<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>
>>> f.is_valid()
True
>>> f = FileForm(data={}, files={'file1': SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह')}, auto_id=False)
>>> print f
<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>
# Basic form processing in a view #############################################
>>> from django.template import Template, Context
>>> class UserRegistration(Form):
... username = CharField(max_length=10)
... password1 = CharField(widget=PasswordInput)
... password2 = CharField(widget=PasswordInput)
... def clean(self):
... if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
... raise ValidationError(u'Please make sure your passwords match.')
... return self.cleaned_data
>>> def my_function(method, post_data):
... if method == 'POST':
... form = UserRegistration(post_data, auto_id=False)
... else:
... form = UserRegistration(auto_id=False)
... if form.is_valid():
... return 'VALID: %r' % form.cleaned_data
... t = Template('<form action="" method="post">\n<table>\n{{ form }}\n</table>\n<input type="submit" />\n</form>')
... return t.render(Context({'form': form}))
Case 1: GET (an empty form, with no errors).
>>> print my_function('GET', {})
<form action="" method="post">
<table>
<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>
</table>
<input type="submit" />
</form>
Case 2: POST with erroneous data (a redisplayed form, with errors).
>>> print my_function('POST', {'username': 'this-is-a-long-username', 'password1': 'foo', 'password2': 'bar'})
<form action="" method="post">
<table>
<tr><td colspan="2"><ul class="errorlist"><li>Please make sure your passwords match.</li></ul></td></tr>
<tr><th>Username:</th><td><ul class="errorlist"><li>Ensure this value has at most 10 characters (it has 23).</li></ul><input type="text" name="username" value="this-is-a-long-username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" value="foo" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" value="bar" /></td></tr>
</table>
<input type="submit" />
</form>
Case 3: POST with valid data (the success message).
>>> print my_function('POST', {'username': 'adrian', 'password1': 'secret', 'password2': 'secret'})
VALID: {'username': u'adrian', 'password1': u'secret', 'password2': u'secret'}
# Some ideas for using templates with forms ###################################
>>> class UserRegistration(Form):
... username = CharField(max_length=10, help_text="Good luck picking a username that doesn't already exist.")
... password1 = CharField(widget=PasswordInput)
... password2 = CharField(widget=PasswordInput)
... def clean(self):
... if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
... raise ValidationError(u'Please make sure your passwords match.')
... return self.cleaned_data
You have full flexibility in displaying form fields in a template. Just pass a
Form instance to the template, and use "dot" access to refer to individual
fields. Note, however, that this flexibility comes with the responsibility of
displaying all the errors, including any that might not be associated with a
particular field.
>>> t = Template('''<form action="">
... {{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
... {{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
... {{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
... <input type="submit" />
... </form>''')
>>> print t.render(Context({'form': UserRegistration(auto_id=False)}))
<form action="">
<p><label>Your username: <input type="text" name="username" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>
>>> print t.render(Context({'form': UserRegistration({'username': 'django'}, auto_id=False)}))
<form action="">
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<ul class="errorlist"><li>This field is required.</li></ul><p><label>Password: <input type="password" name="password1" /></label></p>
<ul class="errorlist"><li>This field is required.</li></ul><p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>
Use form.[field].label to output a field's label. You can specify the label for
a field by using the 'label' argument to a Field class. If you don't specify
'label', Django will use the field name with underscores converted to spaces,
and the initial letter capitalized.
>>> t = Template('''<form action="">
... <p><label>{{ form.username.label }}: {{ form.username }}</label></p>
... <p><label>{{ form.password1.label }}: {{ form.password1 }}</label></p>
... <p><label>{{ form.password2.label }}: {{ form.password2 }}</label></p>
... <input type="submit" />
... </form>''')
>>> print t.render(Context({'form': UserRegistration(auto_id=False)}))
<form action="">
<p><label>Username: <input type="text" name="username" maxlength="10" /></label></p>
<p><label>Password1: <input type="password" name="password1" /></label></p>
<p><label>Password2: <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>
User form.[field].label_tag to output a field's label with a <label> tag
wrapped around it, but *only* if the given field has an "id" attribute.
Recall from above that passing the "auto_id" argument to a Form gives each
field an "id" attribute.
>>> t = Template('''<form action="">
... <p>{{ form.username.label_tag }}: {{ form.username }}</p>
... <p>{{ form.password1.label_tag }}: {{ form.password1 }}</p>
... <p>{{ form.password2.label_tag }}: {{ form.password2 }}</p>
... <input type="submit" />
... </form>''')
>>> print t.render(Context({'form': UserRegistration(auto_id=False)}))
<form action="">
<p>Username: <input type="text" name="username" maxlength="10" /></p>
<p>Password1: <input type="password" name="password1" /></p>
<p>Password2: <input type="password" name="password2" /></p>
<input type="submit" />
</form>
>>> print t.render(Context({'form': UserRegistration(auto_id='id_%s')}))
<form action="">
<p><label for="id_username">Username</label>: <input id="id_username" type="text" name="username" maxlength="10" /></p>
<p><label for="id_password1">Password1</label>: <input type="password" name="password1" id="id_password1" /></p>
<p><label for="id_password2">Password2</label>: <input type="password" name="password2" id="id_password2" /></p>
<input type="submit" />
</form>
User form.[field].help_text to output a field's help text. If the given field
does not have help text, nothing will be output.
>>> t = Template('''<form action="">
... <p>{{ form.username.label_tag }}: {{ form.username }}<br />{{ form.username.help_text }}</p>
... <p>{{ form.password1.label_tag }}: {{ form.password1 }}</p>
... <p>{{ form.password2.label_tag }}: {{ form.password2 }}</p>
... <input type="submit" />
... </form>''')
>>> print t.render(Context({'form': UserRegistration(auto_id=False)}))
<form action="">
<p>Username: <input type="text" name="username" maxlength="10" /><br />Good luck picking a username that doesn't already exist.</p>
<p>Password1: <input type="password" name="password1" /></p>
<p>Password2: <input type="password" name="password2" /></p>
<input type="submit" />
</form>
>>> Template('{{ form.password1.help_text }}').render(Context({'form': UserRegistration(auto_id=False)}))
u''
The label_tag() method takes an optional attrs argument: a dictionary of HTML
attributes to add to the <label> tag.
>>> f = UserRegistration(auto_id='id_%s')
>>> for bf in f:
... print bf.label_tag(attrs={'class': 'pretty'})
<label for="id_username" class="pretty">Username</label>
<label for="id_password1" class="pretty">Password1</label>
<label for="id_password2" class="pretty">Password2</label>
To display the errors that aren't associated with a particular field -- e.g.,
the errors caused by Form.clean() -- use {{ form.non_field_errors }} in the
template. If used on its own, it is displayed as a <ul> (or an empty string, if
the list of errors is empty). You can also use it in {% if %} statements.
>>> t = Template('''<form action="">
... {{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
... {{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
... {{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
... <input type="submit" />
... </form>''')
>>> print t.render(Context({'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)}))
<form action="">
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" value="foo" /></label></p>
<p><label>Password (again): <input type="password" name="password2" value="bar" /></label></p>
<input type="submit" />
</form>
>>> t = Template('''<form action="">
... {{ form.non_field_errors }}
... {{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
... {{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
... {{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
... <input type="submit" />
... </form>''')
>>> print t.render(Context({'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)}))
<form action="">
<ul class="errorlist"><li>Please make sure your passwords match.</li></ul>
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" value="foo" /></label></p>
<p><label>Password (again): <input type="password" name="password2" value="bar" /></label></p>
<input type="submit" />
</form>
# The empty_permitted attribute ##############################################
Sometimes (pretty much in formsets) we want to allow a form to pass validation
if it is completely empty. We can accomplish this by using the empty_permitted
agrument to a form constructor.
>>> class SongForm(Form):
... artist = CharField()
... name = CharField()
First let's show what happens id empty_permitted=False (the default):
>>> data = {'artist': '', 'song': ''}
>>> form = SongForm(data, empty_permitted=False)
>>> form.is_valid()
False
>>> form.errors
{'name': [u'This field is required.'], 'artist': [u'This field is required.']}
>>> form.cleaned_data
Traceback (most recent call last):
...
AttributeError: 'SongForm' object has no attribute 'cleaned_data'
Now let's show what happens when empty_permitted=True and the form is empty.
>>> form = SongForm(data, empty_permitted=True)
>>> form.is_valid()
True
>>> form.errors
{}
>>> form.cleaned_data
{}
But if we fill in data for one of the fields, the form is no longer empty and
the whole thing must pass validation.
>>> data = {'artist': 'The Doors', 'song': ''}
>>> form = SongForm(data, empty_permitted=False)
>>> form.is_valid()
False
>>> form.errors
{'name': [u'This field is required.']}
>>> form.cleaned_data
Traceback (most recent call last):
...
AttributeError: 'SongForm' object has no attribute 'cleaned_data'
If a field is not given in the data then None is returned for its data. Lets
make sure that when checking for empty_permitted that None is treated
accordingly.
>>> data = {'artist': None, 'song': ''}
>>> form = SongForm(data, empty_permitted=True)
>>> form.is_valid()
True
However, we *really* need to be sure we are checking for None as any data in
initial that returns False on a boolean call needs to be treated literally.
>>> class PriceForm(Form):
... amount = FloatField()
... qty = IntegerField()
>>> data = {'amount': '0.0', 'qty': ''}
>>> form = PriceForm(data, initial={'amount': 0.0}, empty_permitted=True)
>>> form.is_valid()
True
"""
| lgpl-3.0 |
mohammed52/something.pk | node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py | 1509 | 17165 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
| mit |
mikeolteanu/livepythonconsole-app-engine | boilerplate/external/apiclient/mimeparse.py | 189 | 6486 | # Copyright (C) 2007 Joe Gregorio
#
# Licensed under the MIT License
"""MIME-Type Parser
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of the
HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q'
quality parameter.
- quality(): Determines the quality ('q') of a mime-type when
compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be
pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q')
from a list of candidates.
"""
__version__ = '0.1.3'
__author__ = 'Joe Gregorio'
__email__ = '[email protected]'
__license__ = 'MIT License'
__credits__ = ''
def parse_mime_type(mime_type):
"""Parses a mime-type into its component parts.
Carves up a mime-type and returns a tuple of the (type, subtype, params)
where 'params' is a dictionary of all the parameters for the media range.
For example, the media range 'application/xhtml;q=0.5' would get parsed
into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(';')
params = dict([tuple([s.strip() for s in param.split('=', 1)])\
for param in parts[1:]
])
full_type = parts[0].strip()
# Java URLConnection class sends an Accept header that includes a
# single '*'. Turn it into a legal wildcard.
if full_type == '*':
full_type = '*/*'
(type, subtype) = full_type.split('/')
return (type.strip(), subtype.strip(), params)
def parse_media_range(range):
"""Parse a media-range into its component parts.
Carves up a media range and returns a tuple of the (type, subtype,
params) where 'params' is a dictionary of all the parameters for the media
range. For example, the media range 'application/*;q=0.5' would get parsed
into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there is a value for 'q'
in the params dictionary, filling it in with a proper default if
necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if not params.has_key('q') or not params['q'] or \
not float(params['q']) or float(params['q']) > 1\
or float(params['q']) < 0:
params['q'] = '1'
return (type, subtype, params)
def fitness_and_quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns a tuple of
the fitness value and the value of the 'q' quality parameter of the best
match, or (-1, 0) if no match was found. Just as for quality_parsed(),
'parsed_ranges' must be a list of parsed media ranges.
"""
best_fitness = -1
best_fit_q = 0
(target_type, target_subtype, target_params) =\
parse_media_range(mime_type)
for (type, subtype, params) in parsed_ranges:
type_match = (type == target_type or\
type == '*' or\
target_type == '*')
subtype_match = (subtype == target_subtype or\
subtype == '*' or\
target_subtype == '*')
if type_match and subtype_match:
param_matches = reduce(lambda x, y: x + y, [1 for (key, value) in \
target_params.iteritems() if key != 'q' and \
params.has_key(key) and value == params[key]], 0)
fitness = (type == target_type) and 100 or 0
fitness += (subtype == target_subtype) and 10 or 0
fitness += param_matches
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params['q']
return best_fitness, float(best_fit_q)
def quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns the 'q'
quality parameter of the best match, 0 if no match was found. This function
bahaves the same as quality() except that 'parsed_ranges' must be a list of
parsed media ranges.
"""
return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
def quality(mime_type, ranges):
"""Return the quality ('q') of a mime-type against a list of media-ranges.
Returns the quality 'q' of a mime-type when compared against the
media-ranges in ranges. For example:
>>> quality('text/html','text/*;q=0.3, text/html;q=0.7,
text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
0.7
"""
parsed_ranges = [parse_media_range(r) for r in ranges.split(',')]
return quality_parsed(mime_type, parsed_ranges)
def best_match(supported, header):
"""Return mime-type with the highest quality ('q') from list of candidates.
Takes a list of supported mime-types and finds the best match for all the
media-ranges listed in header. The value of header must be a string that
conforms to the format of the HTTP Accept: header. The value of 'supported'
is a list of mime-types. The list of supported mime-types should be sorted
in order of increasing desirability, in case of a situation where there is
a tie.
>>> best_match(['application/xbel+xml', 'text/xml'],
'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
split_header = _filter_blank(header.split(','))
parsed_header = [parse_media_range(r) for r in split_header]
weighted_matches = []
pos = 0
for mime_type in supported:
weighted_matches.append((fitness_and_quality_parsed(mime_type,
parsed_header), pos, mime_type))
pos += 1
weighted_matches.sort()
return weighted_matches[-1][0][1] and weighted_matches[-1][2] or ''
def _filter_blank(i):
for s in i:
if s.strip():
yield s
| lgpl-3.0 |
dea82/StateMachineCreator | dev-tools/cpplint.py | 1 | 249692 | #!/usr/bin/env python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import glob
import itertools
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
import xml.etree.ElementTree
# if empty, use defaults
_header_extensions = set([])
# if empty, use defaults
_valid_extensions = set([])
# Files with any of these extensions are considered to be
# header files (and will undergo different style checks).
# This set can be extended by using the --headers
# option (also supported in CPPLINT.cfg)
def GetHeaderExtensions():
if not _header_extensions:
return set(['h', 'hpp', 'hxx', 'h++', 'cuh'])
return _header_extensions
# The allowed extensions for file names
# This is set by --extensions flag
def GetAllExtensions():
if not _valid_extensions:
return GetHeaderExtensions().union(set(['c', 'cc', 'cpp', 'cxx', 'c++', 'cu']))
return _valid_extensions
def GetNonHeaderExtensions():
return GetAllExtensions().difference(GetHeaderExtensions())
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=emacs|eclipse|vs7|junit]
[--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--repository=path]
[--root=subdir] [--linelength=digits] [--recursive]
[--exclude=path]
[--headers=ext1,ext2]
[--extensions=hpp,cpp,...]
<file> [file] ...
The style guidelines this tries to follow are those in
https://google.github.io/styleguide/cppguide.html
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are %s.
Other file types will be ignored.
Change the extensions with the --extensions flag.
Flags:
output=emacs|eclipse|vs7|junit
By default, the output is formatted to ease emacs parsing. Output
compatible with eclipse (eclipse), Visual Studio (vs7), and JUnit
XML parsers such as those used in Jenkins and Bamboo may also be
used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
Errors with lower verbosity levels have lower confidence and are more
likely to be false positives.
quiet
Supress output other than linting errors, such as information about
which files have been processed and excluded.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
repository=path
The top level directory of the repository, used to derive the header
guard CPP variable. By default, this is determined by searching for a
path that contains .git, .hg, or .svn. When this flag is specified, the
given path is used instead. This option allows the header guard CPP
variable to remain consistent even if members of a team have different
repository root directories (such as when checking out a subdirectory
with SVN). In addition, users of non-mainstream version control systems
can use this flag to ensure readable header guard CPP variables.
Examples:
Assuming that Alice checks out ProjectName and Bob checks out
ProjectName/trunk and trunk contains src/chrome/ui/browser.h, then
with no --repository flag, the header guard CPP variable will be:
Alice => TRUNK_SRC_CHROME_BROWSER_UI_BROWSER_H_
Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
If Alice uses the --repository=trunk flag and Bob omits the flag or
uses --repository=. then the header guard CPP variable will be:
Alice => SRC_CHROME_BROWSER_UI_BROWSER_H_
Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
root=subdir
The root directory used for deriving header guard CPP variables. This
directory is relative to the top level directory of the repository which
by default is determined by searching for a directory that contains .git,
.hg, or .svn but can also be controlled with the --repository flag. If
the specified directory does not exist, this flag is ignored.
Examples:
Assuming that src is the top level directory of the repository, the
header guard CPP variables for src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
recursive
Search for files to lint recursively. Each directory given in the list
of files to be linted is replaced by all files that descend from that
directory. Files with extensions not in the valid extensions list are
excluded.
exclude=path
Exclude the given path from the list of files to be linted. Relative
paths are evaluated relative to the current directory and shell globbing
is performed. This flag can be provided multiple times to exclude
multiple files.
Examples:
--exclude=one.cc
--exclude=src/*.cc
--exclude=src/*.cc --exclude=test/*.cc
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=%s
headers=extension,extension,...
The allowed header extensions that cpplint will consider to be header files
(by default, only files with extensions %s
will be assumed to be headers)
Examples:
--headers=%s
cpplint.py supports per-directory configurations specified in CPPLINT.cfg
files. CPPLINT.cfg file can contain a number of key=value pairs.
Currently the following options are supported:
set noparent
filter=+filter1,-filter2,...
exclude_files=regex
linelength=80
root=subdir
"set noparent" option prevents cpplint from traversing directory tree
upwards looking for more .cfg files in parent directories. This option
is usually placed in the top-level project directory.
The "filter" option is similar in function to --filter flag. It specifies
message filters in addition to the |_DEFAULT_FILTERS| and those specified
through --filter command-line flag.
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
through the linter.
"linelength" specifies the allowed line length for the project.
The "root" option is similar in function to the --root flag (see example
above).
CPPLINT.cfg has an effect on files in the same directory and all
subdirectories, unless overridden by a nested configuration file.
Example file:
filter=-build/include_order,+build/include_alpha
exclude_files=.*\\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
processed by linter, in the current directory (where the .cfg
file is located) and all subdirectories.
""" % (list(GetAllExtensions()),
','.join(list(GetAllExtensions())),
GetHeaderExtensions(),
','.join(GetHeaderExtensions()))
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/c++14',
'build/c++tr1',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/framework',
'build/include_subdir',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces_literals',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/strings',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/indentation_namespace',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_if_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo',
]
# These error categories are no longer enforced by cpplint, but for backwards-
# compatibility they may still appear in NOLINT comments.
_LEGACY_ERROR_CATEGORIES = [
'readability/streams',
'readability/function',
]
# The default state of the category filter. This is overridden by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# The default list of categories suppressed for C (not C++) files.
_DEFAULT_C_SUPPRESSED_CATEGORIES = [
'readability/casting',
]
# The default list of categories suppressed for Linux Kernel files.
_DEFAULT_KERNEL_SUPPRESSED_CATEGORIES = [
'whitespace/tab',
]
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'scoped_allocator',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# Type names
_TYPES = re.compile(
r'^(?:'
# [dcl.type.simple]
r'(char(16_t|32_t)?)|wchar_t|'
r'bool|short|int|long|signed|unsigned|float|double|'
# [support.types]
r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|'
# [cstdint.syn]
r'(u?int(_fast|_least)?(8|16|32|64)_t)|'
r'(u?int(max|ptr)_t)|'
r')$')
# These headers are excluded from [build/framework] and [build/include_order]
# checks:
# - Anything not following google file name conventions (containing an
# uppercase character, such as Python.h or nsStringAPI.h, for example).
# - Lua headers.
_THIRD_PARTY_HEADERS_PATTERN = re.compile(
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Pattern for matching FileInfo.BaseName() against test file name
_test_suffixes = ['_test', '_regtest', '_unittest']
_TEST_FILE_SUFFIX = '(' + '|'.join(_test_suffixes) + r')$'
# Pattern that matches only complete whitespace, possibly across multiple lines.
_EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL)
# Assertion macros. These are defined in base/logging.h and
# testing/base/public/gunit.h.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE', 'ASSERT_TRUE',
'EXPECT_FALSE', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(macro_var, {}) for macro_var in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives framework C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
# Match strings that indicate we're working on a C (not C++) file.
_SEARCH_C_FILE = re.compile(r'\b(?:LINT_C_FILE|'
r'vim?:\s*.*(\s*|:)filetype=c(\s*|:|$))')
# Match string that indicates we're working on a Linux Kernel file.
_SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)')
_regexp_compile_cache = {}
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The top level repository directory. If set, _root is calculated relative to
# this directory instead of the directory containing version control artifacts.
# This is set by the --repository flag.
_repository = None
# Files to exclude from linting. This is set by the --exclude flag.
_excludes = None
# Whether to supress PrintInfo messages
_quiet = False
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
try:
xrange(1, 0)
except NameError:
# -- pylint: disable=redefined-builtin
xrange = range
try:
unicode
except NameError:
# -- pylint: disable=redefined-builtin
basestring = unicode = str
try:
long(2)
except NameError:
# -- pylint: disable=redefined-builtin
long = int
if sys.version_info < (3,):
# -- pylint: disable=no-member
# BINARY_TYPE = str
itervalues = dict.itervalues
iteritems = dict.iteritems
else:
# BINARY_TYPE = bytes
itervalues = dict.values
iteritems = dict.items
def unicode_escape_decode(x):
if sys.version_info < (3,):
return codecs.unicode_escape_decode(x)[0]
else:
return x
# {str, bool}: a map from error categories to booleans which indicate if the
# category should be suppressed for every line.
_global_error_suppressions = {}
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of line error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
elif category not in _LEGACY_ERROR_CATEGORIES:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ProcessGlobalSuppresions(lines):
"""Updates the list of global error suppressions.
Parses any lint directives in the file that have global effect.
Args:
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
"""
for line in lines:
if _SEARCH_C_FILE.search(line):
for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:
_global_error_suppressions[category] = True
if _SEARCH_KERNEL_FILE.search(line):
for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:
_global_error_suppressions[category] = True
def ResetNolintSuppressions():
"""Resets the set of NOLINT suppressions to empty."""
_error_suppressions.clear()
_global_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment or
global suppression.
"""
return (_global_error_suppressions.get(category, False) or
linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
def _IsSourceExtension(s):
"""File extension (excluding dot) matches a source file extension."""
return s in GetNonHeaderExtensions()
class _IncludeState(object):
"""Tracks line numbers for includes, and the order in which includes appear.
include_list contains list of lists of (header, line number) pairs.
It's a lists of lists rather than just one flat list to make it
easier to update across preprocessor boundaries.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
self.include_list = [[]]
self._section = None
self._last_header = None
self.ResetSection('')
def FindHeader(self, header):
"""Check if a header has already been included.
Args:
header: header to check.
Returns:
Line number of previous occurrence, or -1 if the header has not
been seen before.
"""
for section_list in self.include_list:
for f in section_list:
if f[0] == header:
return f[1]
return -1
def ResetSection(self, directive):
"""Reset section checking for preprocessor directive.
Args:
directive: preprocessor directive (e.g. "if", "else").
"""
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
# Update list of includes. Note that we never pop from the
# framework list.
if directive in ('if', 'ifdef', 'ifndef'):
self.include_list.append([])
elif directive in ('else', 'elif'):
self.include_list[-1] = []
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
Match(r'^\s*#\s*framework\b', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next framework.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
# backup of filter list. Used to restore the state after each file.
self._filters_backup = self.filters[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "eclipse" - format that eclipse can parse
# "vs7" - format that Microsoft Visual Studio 7 can parse
# "junit" - format that Jenkins, Bamboo, etc can parse
self.output_format = 'emacs'
# For JUnit output, save errors and failures until the end so that they
# can be written into the XML
self._junit_errors = []
self._junit_failures = []
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
self.AddFilters(filters)
def AddFilters(self, filters):
""" Adds more filters to the existing list of error-message filters. """
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def BackupFilters(self):
""" Saves the current filter list to backup storage."""
self._filters_backup = self.filters[:]
def RestoreFilters(self):
""" Restores filters previously backed up."""
self.filters = self._filters_backup[:]
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in sorted(iteritems(self.errors_by_category)):
self.PrintInfo('Category \'%s\' errors found: %d\n' %
(category, count))
if self.error_count > 0:
self.PrintInfo('Total errors found: %d\n' % self.error_count)
def PrintInfo(self, message):
if not _quiet and self.output_format != 'junit':
sys.stderr.write(message)
def PrintError(self, message):
if self.output_format == 'junit':
self._junit_errors.append(message)
else:
sys.stderr.write(message)
def AddJUnitFailure(self, filename, linenum, message, category, confidence):
self._junit_failures.append((filename, linenum, message, category,
confidence))
def FormatJUnitXML(self):
num_errors = len(self._junit_errors)
num_failures = len(self._junit_failures)
testsuite = xml.etree.ElementTree.Element('testsuite')
testsuite.attrib['name'] = 'cpplint'
testsuite.attrib['errors'] = str(num_errors)
testsuite.attrib['failures'] = str(num_failures)
if num_errors == 0 and num_failures == 0:
testsuite.attrib['tests'] = str(1)
xml.etree.ElementTree.SubElement(testsuite, 'testcase', name='passed')
else:
testsuite.attrib['tests'] = str(num_errors + num_failures)
if num_errors > 0:
testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
testcase.attrib['name'] = 'errors'
error = xml.etree.ElementTree.SubElement(testcase, 'error')
error.text = '\n'.join(self._junit_errors)
if num_failures > 0:
# Group failures by file
failed_file_order = []
failures_by_file = {}
for failure in self._junit_failures:
failed_file = failure[0]
if failed_file not in failed_file_order:
failed_file_order.append(failed_file)
failures_by_file[failed_file] = []
failures_by_file[failed_file].append(failure)
# Create a testcase for each file
for failed_file in failed_file_order:
failures = failures_by_file[failed_file]
testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
testcase.attrib['name'] = failed_file
failure = xml.etree.ElementTree.SubElement(testcase, 'failure')
template = '{0}: {1} [{2}] [{3}]'
texts = [template.format(f[1], f[2], f[3], f[4]) for f in failures]
failure.text = '\n'.join(texts)
xml_decl = '<?xml version="1.0" encoding="UTF-8" ?>\n'
return xml_decl + xml.etree.ElementTree.tostring(testsuite, 'utf-8').decode('utf-8')
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
def _AddFilters(filters):
"""Adds more filter overrides.
Unlike _SetFilters, this function does not reset the current list of filters
available.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.AddFilters(filters)
def _BackupFilters():
""" Saves the current filter list to backup storage."""
_cpplint_state.BackupFilters()
def _RestoreFilters():
""" Restores filters previously backed up."""
_cpplint_state.RestoreFilters()
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if not self.in_a_function:
return
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the framework order in a file."""
pass
class FileInfo(object):
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
r"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't framework things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
# If the user specified a repository path, it exists, and the file is
# contained in it, use the specified repository path
if _repository:
repo = FileInfo(_repository).FullName()
root_dir = project_dir
while os.path.exists(root_dir):
# allow case insensitive compare on Windows
if os.path.normcase(root_dir) == os.path.normcase(repo):
return os.path.relpath(fullname, root_dir).replace('\\', '/')
one_up_dir = os.path.dirname(root_dir)
if one_up_dir == root_dir:
break
root_dir = one_up_dir
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = current_dir = os.path.dirname(fullname)
while current_dir != os.path.dirname(current_dir):
if (os.path.exists(os.path.join(current_dir, ".git")) or
os.path.exists(os.path.join(current_dir, ".hg")) or
os.path.exists(os.path.join(current_dir, ".svn"))):
root_dir = current_dir
current_dir = os.path.dirname(current_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period, includes that period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return _IsSourceExtension(self.Extension()[1:])
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
_cpplint_state.PrintError('%s(%s): warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'junit':
_cpplint_state.AddJUnitFailure(filename, linenum, message, category,
confidence)
else:
final_message = '%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence)
sys.stderr.write(final_message)
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Match a single C style comment on the same line.
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
#
# Once we have matched a raw string, we check the prefix of the
# line to make sure that the line is not part of a single line
# comment. It's done this way because we remove raw strings
# before removing comments as opposed to removing comments
# before removing raw strings. This is because there are some
# cpplint checks that requires the comments to be preserved, but
# we don't want to check comments that are inside raw strings.
matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if (matched and
not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//',
matched.group(1))):
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
else:
break
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '/**/'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 4 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments.
2) lines member contains lines without comments.
3) raw_lines member contains all the lines without processing.
4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
strings removed.
All these members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
"""Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
stack.append(char)
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
"""Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
i -= 1
else:
# If there is a matching '>', we can pop the expression stack.
# Otherwise, ignore this '<' since it must be an operator.
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
# Found start of expression.
#
# If there are any unmatched '>' on the stack, they must be
# operators. Remove those.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '<', the matching '>' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
# Check last line
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find start of expression before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in range(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetIndentLevel(line):
"""Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
"""
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
# Replace 'c++' with 'cpp'.
filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
suffix = os.sep
# On Windows using directory separator will leave us with
# "bogus escape error" unless we properly escape regex.
if suffix == '\\':
suffix += '\\'
file_path_from_root = re.sub('^' + _root + suffix, '', file_path_from_root)
return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, clean_lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
clean_lines: A CleansedLines instance containing the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
raw_lines = clean_lines.lines_without_raw_strings
for i in raw_lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
# Allow pragma once instead of header guards
for i in raw_lines:
if Search(r'^\s*#pragma\s+once', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = ''
ifndef_linenum = 0
define = ''
endif = ''
endif_linenum = 0
for linenum, line in enumerate(raw_lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef or not define or ifndef != define:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
# Check for "//" comments on endif line.
ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
error)
match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
if match:
if match.group(1) == '_':
# Issue low severity warning for deprecated double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif // %s"' % cppvar)
return
# Didn't find the corresponding "//" comment. If this file does not
# contain any "//" comments at all, it could be that the compiler
# only wants "/**/" comments, look for those instead.
no_single_line_comments = True
for i in xrange(1, len(raw_lines) - 1):
line = raw_lines[i]
if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
no_single_line_comments = False
break
if no_single_line_comments:
match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
if match:
if match.group(1) == '_':
# Low severity warning for double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif /* %s */"' % cppvar)
return
# Didn't find anything
error(filename, endif_linenum, 'build/header_guard', 5,
'#endif line should be "#endif // %s"' % cppvar)
def CheckHeaderFileIncluded(filename, include_state, error):
"""Logs an error if a source file does not framework its header."""
# Do not check test files
fileinfo = FileInfo(filename)
if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
return
for ext in GetHeaderExtensions():
basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
headerfile = basefilename + '.' + ext
if not os.path.exists(headerfile):
continue
headername = FileInfo(headerfile).RepositoryName()
first_include = None
for section_list in include_state.include_list:
for f in section_list:
if headername in f[0] or f[0] in headername:
return
if not first_include:
first_include = f[1]
error(filename, first_include, 'build/framework', 5,
'%s should framework its header file %s' % (fileinfo.RepositoryName(),
headername))
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if unicode_escape_decode('\ufffd') in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
if Search(r'^#define', clean_lines[linenum]):
return True
if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, linenum, seen_open_brace):
self.starting_linenum = linenum
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def IsBlockInfo(self):
"""Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
Returns:
True for this class, False for derived classes.
"""
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
"""Stores information about an 'extern "C"' block."""
def __init__(self, linenum):
_BlockInfo.__init__(self, linenum, True)
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, linenum, False)
self.name = name
self.is_derived = False
self.check_namespace_indentation = True
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# If there is a DISALLOW macro, it should appear near the end of
# the class.
seen_last_thing_in_class = False
for i in xrange(linenum - 1, self.starting_linenum, -1):
match = Search(
r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
self.name + r'\)',
clean_lines.elided[i])
if match:
if seen_last_thing_in_class:
error(filename, i, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
break
if not Match(r'^\s*$', clean_lines.elided[i]):
seen_last_thing_in_class = True
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, linenum, False)
self.name = name or ''
self.check_namespace_indentation = True
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' +
re.escape(self.name) + r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
if Match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
else:
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Top of the previous stack before each Update().
#
# Because the nesting_stack is updated at the end of each line, we
# had to do some convoluted checks to find out what is the current
# scope at the beginning of the line. This check is simplified by
# saving the previous top of nesting stack.
#
# We could save the full stack, but we only need the top. Copying
# the full nesting stack would slow down cpplint by ~10%.
self.previous_stack_top = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def InExternC(self):
"""Check if we are currently one level inside an 'extern "C"' block.
Returns:
True if top of the stack is an extern block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
def InClassDeclaration(self):
"""Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ClassInfo)
def InAsmBlock(self):
"""Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
return self.stack and self.stack[-1].inline_asm != _NO_ASM
def InTemplateArgumentList(self, clean_lines, linenum, pos):
"""Check if current position is inside template argument list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: position just after the suspected template argument.
Returns:
True if (linenum, pos) is inside template arguments.
"""
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
# These things do not look like template argument list:
# class Suspect {
# class Suspect x; }
if token in ('{', '}', ';'): return False
# These things look like template argument list:
# template <class Suspect>
# template <class Suspect = default_value>
# template <class Suspect[]>
# template <class Suspect...>
if token in ('>', '=', '[', ']', '.'): return True
# Check if token is an unmatched '<'.
# If not, move on to the next character.
if token != '<':
pos += 1
if pos >= len(line):
linenum += 1
pos = 0
continue
# We can't be sure if we just find a single '<', and need to
# find the matching '>'.
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
if end_pos < 0:
# Not sure if template argument list or syntax error in file
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?'
r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo(linenum))
else:
self.stack.append(_BlockInfo(linenum, True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage-class specifier (static, extern, typedef, etc) should be '
'at the beginning of the declaration.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
explicit_constructor_match = Match(
r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
variadic_args = [arg for arg in constructor_args if '&&...' in arg]
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1) or
# variadic arguments with zero or one argument
(len(constructor_args) <= 2 and
len(variadic_args) >= 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args or variadic_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and
not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
not Search(r'\bcase\s+\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
is_namespace_indent_item = (
len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
nesting_state.previous_stack_top == nesting_state.stack[-2])
if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
clean_lines.elided, line):
CheckItemIndentationInNamespace(filename, clean_lines.elided,
line, error)
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in range(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(line, filename, linenum, next_line_start, error):
"""Checks for common mistakes in comments.
Args:
line: The line in question.
filename: The name of the current file.
linenum: The number of the line to check.
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0:
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the // unless
# it's a /// or //! Doxygen comment.
if (Match(r'//[^ ]*\w', comment) and
not Match(r'(///|//\!)(\s+|$)', comment)):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'return []() {};'
if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if ((Search(r'[\w.]=', line) or
Search(r'=[\w.]', line))
and not Search(r'\b(if|while|for) ', line)
# Operators taken from [lex.operators] in C++11 standard.
and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
and not Search(r'operator=', line)):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-framework lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. This is because there are too
# many false positives due to RValue references.
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*framework', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line)
if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
Search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
def _IsType(clean_lines, nesting_state, expr):
"""Check if expression looks like a type name, returns true if so.
Args:
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
expr: The expression to check.
Returns:
True, if token looks like a type.
"""
# Keep only the last token in the expression
last_word = Match(r'^.*(\b\S+)$', expr)
if last_word:
token = last_word.group(1)
else:
token = expr
# Match native types and stdint types
if _TYPES.match(token):
return True
# Try a bit harder to match templated types. Walk up the nesting
# stack until we find something that resembles a typename
# declaration for what we are looking for.
typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) +
r'\b')
block_index = len(nesting_state.stack) - 1
while block_index >= 0:
if isinstance(nesting_state.stack[block_index], _NamespaceInfo):
return False
# Found where the opening brace is. We want to scan from this
# line up to the beginning of the function, minus a few lines.
# template <typename Type1, // stop scanning here
# ...>
# class C
# : public ... { // start scanning here
last_line = nesting_state.stack[block_index].starting_linenum
next_block_start = 0
if block_index > 0:
next_block_start = nesting_state.stack[block_index - 1].starting_linenum
first_line = last_line
while first_line >= next_block_start:
if clean_lines.elided[first_line].find('template') >= 0:
break
first_line -= 1
if first_line < next_block_start:
# Didn't find any "template" keyword before reaching the next block,
# there are probably no template things to check for this block
block_index -= 1
continue
# Look for typename in the specified range
for i in xrange(first_line, last_line + 1, 1):
if Search(typename_pattern, clean_lines.elided[i]):
return True
block_index -= 1
return False
def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces when they are delimiting blocks, classes, namespaces etc.
# And since you should never have braces at the beginning of a line,
# this is an easy test. Except that braces used for initialization don't
# follow the same rule; we often don't want spaces before those.
match = Match(r'^(.*[^ ({>]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
leading_text = match.group(1)
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
# We also suppress warnings for `uint64_t{expression}` etc., as the style
# guide recommends brace initialization for integral types to avoid
# overflow/truncation.
if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
and not _IsType(clean_lines, nesting_state, leading_text)):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise.
"""
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block. We also allow a brace on the
# following line if it is part of an array initialization and would not fit
# within the 80 character limit of the preceding line.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline) and
not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on
# - Compound literals
# - Lambdas
# - alignas specifier with anonymous structs
# - decltype
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
Search(r'\bdecltype$', line_prefix) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
# We need to check the line forward for NOLINT
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1,
error)
ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum,
error)
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression.
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
# Check for if statements that have completely empty bodies (no comments)
# and no else clauses.
if end_pos >= 0 and matched.group(1) == 'if':
# Find the position of the opening { for the if statement.
# Return without logging an error if it has no brackets.
opening_linenum = end_linenum
opening_line_fragment = end_line[end_pos:]
# Loop until EOF or find anything that's not whitespace or opening {.
while not Search(r'^\s*\{', opening_line_fragment):
if Search(r'^(?!\s*$)', opening_line_fragment):
# Conditional has no brackets.
return
opening_linenum += 1
if opening_linenum == len(clean_lines.elided):
# Couldn't find conditional's opening { or any code before EOF.
return
opening_line_fragment = clean_lines.elided[opening_linenum]
# Set opening_line (opening_line_fragment may not be entire opening line).
opening_line = clean_lines.elided[opening_linenum]
# Find the position of the closing }.
opening_pos = opening_line_fragment.find('{')
if opening_linenum == end_linenum:
# We need to make opening_pos relative to the start of the entire line.
opening_pos += end_pos
(closing_line, closing_linenum, closing_pos) = CloseExpression(
clean_lines, opening_linenum, opening_pos)
if closing_pos < 0:
return
# Now construct the body of the conditional. This consists of the portion
# of the opening line after the {, all lines until the closing line,
# and the portion of the closing line before the }.
if (clean_lines.raw_lines[opening_linenum] !=
CleanseComments(clean_lines.raw_lines[opening_linenum])):
# Opening line ends with a comment, so conditional isn't empty.
return
if closing_linenum > opening_linenum:
# Opening line after the {. Ignore comments here since we checked above.
bodylist = list(opening_line[opening_pos+1:])
# All lines until closing line, excluding closing line, with comments.
bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
# Closing line before the }. Won't (and can't) have comments.
bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1])
body = '\n'.join(bodylist)
else:
# If statement has brackets and fits on a single line.
body = opening_line[opening_pos+1:closing_pos-1]
# Check if the body is empty
if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
return
# The body is empty. Now make sure there's not an else clause.
current_linenum = closing_linenum
current_line_fragment = closing_line[closing_pos:]
# Loop until EOF or find anything that's not whitespace or else clause.
while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
if Search(r'^(?=\s*else)', current_line_fragment):
# Found an else clause, so don't log an error.
return
current_linenum += 1
if current_linenum == len(clean_lines.elided):
break
current_line_fragment = clean_lines.elided[current_linenum]
# The body is empty and there's no else clause until EOF or other code.
error(filename, end_linenum, 'whitespace/empty_if_body', 4,
('If statement had no body and no else clause'))
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
if not Match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
prev = raw_lines[linenum - 1] if linenum > 0 else ''
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
scope_or_label_pattern = r'\s*\w+\s*\w*:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
# There are certain situations we allow one space, notably for
# section labels, and also lines containing multi-line raw strings.
# We also don't check for lines that look like continuation lines
# (of lines ending in double quotes, commas, equals, or angle brackets)
# because the rules for how to indent those are non-trivial.
if (not Search(r'[",=><] *$', prev) and
(initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# Check if the line is a header guard.
is_header_guard = False
if file_extension in GetHeaderExtensions():
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #framework lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
#
# Doxygen documentation copying can get pretty long when using an overloaded
# function declaration
if (not line.startswith('#framework') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^\s*//\s*[^\s]*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line) and
not Match(r'^\s*/// [@\\](copydoc|copydetails|copybrief) .*$', line)):
line_width = GetLineWidth(line)
if line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# allow simple single line lambdas
not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}\n\r]*\}',
line) and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*framework\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in itertools.chain(
('%s.%s' % (test_suffix.lstrip('_'), ext)
for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())),
('%s.%s' % (suffix, ext)
for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'framework' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #framework used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
# Headers with C++ extensions shouldn't be considered C system headers
if is_system and os.path.splitext(include)[1] in ['.hpp', '.hxx', '.h++']:
is_system = False
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the framework we're checking share a
# basename when we drop common extensions, and the framework
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
target_dir_pub = os.path.normpath(target_dir + '/../public')
target_dir_pub = target_dir_pub.replace('\\', '/')
if target_base == include_base and (
include_dir == target_dir or
include_dir == target_dir_pub):
return _LIKELY_MY_HEADER
# If the target and framework share some initial basename
# component, it's possible the target is implementing the
# framework, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #framework lines.
Strings on #framework lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #framework lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "framework" should use the new style "foo/bar.h" instead of just "bar.h"
# Only do this check if the included header follows google naming
# conventions. If not, assume that it's a 3rd party API that
# requires special framework conventions.
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the framework convention.
match = Match(r'#framework\s*"([^/]+\.h)"', line)
if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
error(filename, linenum, 'build/include_subdir', 4,
'Include the directory when naming .h files')
# we shouldn't framework a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/framework', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
return
for extension in GetNonHeaderExtensions():
if (include.endswith('.' + extension) and
os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
error(filename, linenum, 'build/framework', 4,
'Do not framework .' + extension + ' files from other packages')
return
if not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each framework statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(unknown): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(itervalues(matching_punctuation))
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
# Stream types.
_RE_PATTERN_REF_STREAM_PARAM = (
r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset framework state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Perform other checks now that we are sure that this is not an framework line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if file_extension in GetHeaderExtensions():
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
if Search(r'\bliterals\b', line):
error(filename, linenum, 'build/namespaces_literals', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
else:
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension in GetHeaderExtensions()
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
"""Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Match two lines at a time to support multiline declarations
if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access, and
# also because globals can be destroyed when some threads are still running.
# TODO(unknown): Generalize this to also find static unique_ptr instances.
# TODO(unknown): File bugs for clang-tidy to find these.
match = Match(
r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'
r'([a-zA-Z0-9_:]+)\b(.*)',
line)
# Remove false positives:
# - String pointers (as opposed to values).
# string *pointer
# const string *pointer
# string const *pointer
# string *const pointer
#
# - Functions and template specializations.
# string Function<Type>(...
# string Class<Type>::Method(...
#
# - Operators. These are matched separately because operator names
# cross non-word boundaries, and trying to match both operators
# and functions at the same time would decrease accuracy of
# matching identifiers.
# string Class::operator*()
if (match and
not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))):
if Search(r'\bconst\b', line):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string '
'instead: "%schar%s %s[]".' %
(match.group(1), match.group(2) or '', match.group(3)))
else:
error(filename, linenum, 'runtime/string', 4,
'Static/global string variables are not permitted.')
if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or
Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
"""Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False
def IsOutOfLineMethodDefinition(clean_lines, linenum):
"""Check if current line contains an out-of-line method definition.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains an out-of-line method definition.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
return False
def IsInitializerList(clean_lines, linenum):
"""Check if current line is inside constructor initializer list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
for i in xrange(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
remove_function_body = Match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
if Search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
if Search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
if Search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
# - An opening brace, probably the start of current class or namespace.
#
# Current line is probably not inside an initializer list since
# we saw one of those things without seeing the starting colon.
return False
# Got to the beginning of the file without seeing the start of
# constructor initializer list.
return False
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# If a function is inherited, current function doesn't have much of
# a choice, so any non-const references should not be blamed on
# derived function.
if IsDerivedFunction(clean_lines, linenum):
return
# Don't warn on out-of-line method definitions, as we would warn on the
# in-line declaration, if it isn't marked with 'override'.
if IsOutOfLineMethodDefinition(clean_lines, linenum):
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
if (nesting_state.previous_stack_top and
not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
# Not at toplevel, not within a class, and not within a namespace
return
# Avoid initializer lists. We only need to scan back from the
# current line for something that starts with ':'.
#
# We don't need to check the current line, since the '&' would
# appear inside the second set of parentheses on the current line as
# opposed to the first set.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 10), -1):
previous_line = clean_lines.elided[i]
if not Search(r'[),]\s*$', previous_line):
break
if Match(r'^\s*:\s+\S', previous_line):
return
# Avoid preprocessors
if Search(r'\\\s*$', line):
return
# Avoid constructor initializer lists
if IsInitializerList(clean_lines, linenum):
return
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and
not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
"""Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
# matched_new_or_template is used to silence two false positives:
# - New operators
# - Template arguments with function types
#
# For template arguments, we match on types immediately following
# an opening bracket without any spaces. This is a fast way to
# silence the common case where the function type is the first
# template argument. False negative with less-than comparison is
# avoided because those operators are usually followed by a space.
#
# function<double(double)> // bracket + no space = false positive
# value < double(42) // bracket + space = true positive
matched_new_or_template = match.group(1)
# Avoid arrays by looking for brackets that come after the closing
# parenthesis.
if Match(r'\([^()]+\)\s*\[', match.group(3)):
return
# Other things to ignore:
# - Function pointers
# - Casts to pointer types
# - Placement new
# - Alias declarations
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
#
# Some non-identifier character is required before the '&' for the
# expression to be recognized as a cast. These are casts:
# expression = &static_cast<int*>(temporary());
# function(&(int*)(temporary()));
#
# This is not a cast:
# reference_type&(int* function_param);
match = Search(
r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match:
# Try a better error message when the & is bound to something
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
if context.endswith(' operator++') or context.endswith(' operator--'):
return False
# A single unnamed argument for a function tends to look like old style cast.
# If we see those, don't issue warnings for deprecated casts.
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
remainder):
return False
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator', 'make_shared', 'make_unique', 'shared_ptr',
'unique_ptr', 'weak_ptr')),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<tuple>', ('tuple',)),
('<unordered_map>', ('unordered_map', 'unordered_multimap')),
('<unordered_set>', ('unordered_set', 'unordered_multiset')),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_HEADERS_MAYBE_TEMPLATES = (
('<algorithm>', ('copy', 'max', 'min', 'min_element', 'sort',
'transform',
)),
('<utility>', ('forward', 'make_pair', 'move', 'swap')),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_headers_maybe_templates = []
for _header, _templates in _HEADERS_MAYBE_TEMPLATES:
for _template in _templates:
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_headers_maybe_templates.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
_header))
# Other scripts may reach in and modify this pattern.
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would framework
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real framework paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the source (e.g. .cc) file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
fileinfo_cc = FileInfo(filename_cc)
if not fileinfo_cc.Extension().lstrip('.') in GetNonHeaderExtensions():
return (False, '')
fileinfo_h = FileInfo(filename_h)
if not fileinfo_h.Extension().lstrip('.') in GetHeaderExtensions():
return (False, '')
filename_cc = filename_cc[:-(len(fileinfo_cc.Extension()))]
matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo_cc.BaseName())
if matched_test_suffix:
filename_cc = filename_cc[:-len(matched_test_suffix.group(1))]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
filename_h = filename_h[:-(len(fileinfo_h.Extension()))]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_dict, io=codecs):
"""Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to framework a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to framework the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in range(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_headers_maybe_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
matched = pattern.search(line)
if matched:
# Don't warn about IWYU in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required[header] = (linenum, template)
# The policy is that if you #framework something in foo.h you don't need to
# framework it again in foo.cc. Here, we will look at possible includes.
# Let's flatten the include_state include_list and copy it into a dictionary.
include_dict = dict([item for sublist in include_state.include_list
for item in sublist])
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = list(include_dict.keys())
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_dict, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't framework it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if not header_found:
for extension in GetNonHeaderExtensions():
if filename.endswith('.' + extension):
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in sorted(required, key=required.__getitem__):
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #framework ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
if not virtual: return
# Ignore "virtual" keywords that are near access-specifiers. These
# are only used in class base-specifier and do not apply to member
# functions.
if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
return
# Ignore the "virtual" keyword from virtual base classes. Usually
# there is a column on the same line in these cases (virtual base
# classes are rare in google3 because multiple inheritance is rare).
if Match(r'^.*[^:]:[^:].*$', line): return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(2))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for closing parenthesis nearby. We need one to confirm where
# the declarator ends and where the virt-specifier starts to avoid
# false positives.
line = clean_lines.elided[linenum]
declarator_end = line.rfind(')')
if declarator_end >= 0:
fragment = line[declarator_end:]
else:
if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
fragment = line
else:
return
# Check that at most one of "override" or "final" is present, not both
if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
# Returns true if we are at a new block, and it is directly
# inside of a namespace.
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
"""Checks that the new block is directly in a namespace.
Args:
nesting_state: The _NestingState object that contains info about our state.
is_forward_declaration: If the class is a forward declared class.
Returns:
Whether or not the new block is directly in a namespace.
"""
if is_forward_declaration:
return len(nesting_state.stack) >= 1 and (
isinstance(nesting_state.stack[-1], _NamespaceInfo))
return (len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.stack[-2], _NamespaceInfo))
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum):
"""This method determines if we should apply our namespace indentation check.
Args:
nesting_state: The current nesting state.
is_namespace_indent_item: If we just put a new class on the stack, True.
If the top of the stack is not a class, or we did not recently
add the class, False.
raw_lines_no_comments: The lines without the comments.
linenum: The current line number we are processing.
Returns:
True if we should apply our namespace indentation check. Currently, it
only works for classes and namespaces inside of a namespace.
"""
is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
linenum)
if not (is_namespace_indent_item or is_forward_declaration):
return False
# If we are in a macro, we do not want to check the namespace indentation.
if IsMacroDefinition(raw_lines_no_comments, linenum):
return False
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
# Call this method if the line is directly inside of a namespace.
# If the line above is blank (excluding comments) or the start of
# an inner namespace, it cannot be indented.
def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
error):
line = raw_lines_no_comments[linenum]
if Match(r'^\s+', line):
error(filename, linenum, 'runtime/indentation_namespace', 4,
'Do not indent within a namespace')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=None):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
if extra_check_functions:
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
"""Flag those c++11 features that we only allow in certain places.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*framework\s+[<"]([^<"]+)[">]', line)
# Flag unapproved C++ TR1 headers.
if include and include.group(1).startswith('tr1/'):
error(filename, linenum, 'build/c++tr1', 5,
('C++ TR1 headers such as <%s> are unapproved.') % include.group(1))
# Flag unapproved C++11 headers.
if include and include.group(1) in ('cfenv',
'condition_variable',
'fenv.h',
'future',
'mutex',
'thread',
'chrono',
'ratio',
'regex',
'system_error',
):
error(filename, linenum, 'build/c++11', 5,
('<%s> is an unapproved C++11 header.') % include.group(1))
# The only place where we need to worry about C++11 keywords and library
# features in preprocessor directives is in macro definitions.
if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
# These are classes and free functions. The classes are always
# mentioned as std::*, but we only catch the free functions if
# they're not found by ADL. They're alphabetical by header.
for top_name in (
# type_traits
'alignment_of',
'aligned_union',
):
if Search(r'\bstd::%s\b' % top_name, line):
error(filename, linenum, 'build/c++11', 5,
('std::%s is an unapproved C++11 class or function. Send c-style '
'an example of where it would make your code more readable, and '
'they may let you use it.') % top_name)
def FlagCxx14Features(filename, clean_lines, linenum, error):
"""Flag those C++14 features that we restrict.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*framework\s+[<"]([^<"]+)[">]', line)
# Flag unapproved C++14 headers.
if include and include.group(1) in ('scoped_allocator', 'shared_mutex'):
error(filename, linenum, 'build/c++14', 5,
('<%s> is an unapproved C++14 header.') % include.group(1))
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=None):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
ProcessGlobalSuppresions(lines)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
if file_extension in GetHeaderExtensions():
CheckForHeaderGuard(filename, clean_lines, error)
for line in range(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# Check that the .cc file has included its header if it exists.
if _IsSourceExtension(file_extension):
CheckHeaderFileIncluded(filename, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessConfigOverrides(filename):
""" Loads the configuration files and processes the config overrides.
Args:
filename: The name of the file being processed by the linter.
Returns:
False if the current |filename| should not be processed further.
"""
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
abs_path, base_name = os.path.split(abs_filename)
if not base_name:
break # Reached the root directory.
cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
abs_filename = abs_path
if not os.path.isfile(cfg_file):
continue
try:
with open(cfg_file) as file_handle:
for line in file_handle:
line, _, _ = line.partition('#') # Remove comments.
if not line.strip():
continue
name, _, val = line.partition('=')
name = name.strip()
val = val.strip()
if name == 'set noparent':
keep_looking = False
elif name == 'filter':
cfg_filters.append(val)
elif name == 'exclude_files':
# When matching exclude_files pattern, use the base_name of
# the current file name or the directory name we are processing.
# For example, if we are checking for lint errors in /foo/bar/baz.cc
# and we found the .cfg file at /foo/CPPLINT.cfg, then the config
# file's "exclude_files" filter is meant to be checked against "bar"
# and not "baz" nor "bar/baz.cc".
if base_name:
pattern = re.compile(val)
if pattern.match(base_name):
_cpplint_state.PrintInfo('Ignoring "%s": file excluded by '
'"%s". File path component "%s" matches pattern "%s"\n' %
(filename, cfg_file, base_name, val))
return False
elif name == 'linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
_cpplint_state.PrintError('Line length must be numeric.')
elif name == 'extensions':
global _valid_extensions
try:
extensions = [ext.strip() for ext in val.split(',')]
_valid_extensions = set(extensions)
except ValueError:
sys.stderr.write('Extensions should be a comma-separated list of values;'
'for example: extensions=hpp,cpp\n'
'This could not be parsed: "%s"' % (val,))
elif name == 'headers':
global _header_extensions
try:
extensions = [ext.strip() for ext in val.split(',')]
_header_extensions = set(extensions)
except ValueError:
sys.stderr.write('Extensions should be a comma-separated list of values;'
'for example: extensions=hpp,cpp\n'
'This could not be parsed: "%s"' % (val,))
elif name == 'root':
global _root
_root = val
else:
_cpplint_state.PrintError(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
_cpplint_state.PrintError(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
for cfg_filter in reversed(cfg_filters):
_AddFilters(cfg_filter)
return True
def ProcessFile(filename, vlevel, extra_check_functions=None):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
_BackupFilters()
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
_cpplint_state.PrintError(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in GetAllExtensions():
_cpplint_state.PrintError('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(GetAllExtensions())))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
_cpplint_state.PrintInfo('Done processing %s\n' % filename)
_RestoreFilters()
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(0)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'repository=',
'linelength=',
'extensions=',
'exclude=',
'headers=',
'quiet',
'recursive'])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
recursive = False
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse', 'junit'):
PrintUsage('The only allowed output formats are emacs, vs7, eclipse '
'and junit.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--repository':
global _repository
_repository = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--exclude':
global _excludes
if not _excludes:
_excludes = set()
_excludes.update(glob.glob(val))
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
elif opt == '--headers':
global _header_extensions
try:
_header_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
elif opt == '--recursive':
recursive = True
elif opt == '--quiet':
global _quiet
_quiet = True
if not filenames:
PrintUsage('No files were specified.')
if recursive:
filenames = _ExpandDirectories(filenames)
if _excludes:
filenames = _FilterExcludedFiles(filenames)
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def _ExpandDirectories(filenames):
"""Searches a list of filenames and replaces directories in the list with
all files descending from those directories. Files with extensions not in
the valid extensions list are excluded.
Args:
filenames: A list of files or directories
Returns:
A list of all files that are members of filenames or descended from a
directory in filenames
"""
expanded = set()
for filename in filenames:
if not os.path.isdir(filename):
expanded.add(filename)
continue
for root, _, files in os.walk(filename):
for loopfile in files:
fullname = os.path.join(root, loopfile)
if fullname.startswith('.' + os.path.sep):
fullname = fullname[len('.' + os.path.sep):]
expanded.add(fullname)
filtered = []
for filename in expanded:
if os.path.splitext(filename)[1][1:] in GetAllExtensions():
filtered.append(filename)
return filtered
def _FilterExcludedFiles(filenames):
"""Filters out files listed in the --exclude command line switch. File paths
in the switch are evaluated relative to the current working directory
"""
exclude_paths = [os.path.abspath(f) for f in _excludes]
return [f for f in filenames if os.path.abspath(f) not in exclude_paths]
def main():
filenames = ParseArguments(sys.argv[1:])
backup_err = sys.stderr
try:
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReader(sys.stderr, 'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
if _cpplint_state.output_format == 'junit':
sys.stderr.write(_cpplint_state.FormatJUnitXML())
finally:
sys.stderr = backup_err
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| mit |
wylwang/vnpy | archive/vn.lts_old/pyscript/l2/generate_struct.py | 73 | 1370 | # encoding: UTF-8
__author__ = 'CHENXY'
from l2_data_type import *
def main():
"""主函数"""
fcpp = open('SecurityFtdcL2MDUserApiStruct.h', 'r')
fpy = open('l2_struct.py', 'w')
fpy.write('# encoding: UTF-8\n')
fpy.write('\n')
fpy.write('structDict = {}\n')
fpy.write('\n')
for line in fcpp:
# 结构体申明注释
if '///' in line and '\t' not in line:
py_line = '#' + line[3:]
# 结构体变量注释
elif '\t///' in line:
py_line = '#' + line[4:]
# 结构体申明
elif 'struct' in line:
content = line.split(' ')
name = content[1].replace('\n','')
py_line = '%s = {}\n' % name
# 结构体变量
elif '\t' in line:
content = line.split('\t')
typedef = content[1]
type_ = typedefDict[typedef]
variable = content[2].replace(';\n', "")
py_line = '%s["%s"] = "%s"\n' % (name, variable, type_)
# 结构体结束
elif '}' in line:
py_line = "structDict['%s'] = %s\n\n" % (name, name)
# 结构体开始
elif '{' in line:
py_line = ''
# 其他
else:
py_line = '\n'
fpy.write(py_line.decode('gbk').encode('utf-8'))
if __name__ == '__main__':
main() | mit |
nhuntwalker/astroML | astroML/datasets/sdss_galaxy_colors.py | 3 | 2595 | from __future__ import print_function, division
import os
import numpy as np
from . import get_data_home
from .tools import sql_query
SPECCLASS = ['UNKNOWN', 'STAR', 'GALAXY', 'QSO',
'HIZ_QSO', 'SKY', 'STAR_LATE', 'GAL_EM']
NOBJECTS = 50000
GAL_COLORS_DTYPE = [('u', float),
('g', float),
('r', float),
('i', float),
('z', float),
('specClass', int),
('redshift', float),
('redshift_err', float)]
ARCHIVE_FILE = 'sdss_galaxy_colors.npy'
def fetch_sdss_galaxy_colors(data_home=None, download_if_missing=True):
"""Loader for SDSS galaxy colors.
This function directly queries the sdss SQL database at
http://cas.sdss.org/
Parameters
----------
data_home : optional, default=None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/astroML_data' subfolders.
download_if_missing : optional, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : recarray, shape = (10000,)
record array containing magnitudes and redshift for each galaxy
"""
data_home = get_data_home(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
archive_file = os.path.join(data_home, ARCHIVE_FILE)
query_text = ('\n'.join(
("SELECT TOP %i" % NOBJECTS,
" p.u, p.g, p.r, p.i, p.z, s.specClass, s.z, s.zerr",
"FROM PhotoObj AS p",
" JOIN SpecObj AS s ON s.bestobjid = p.objid",
"WHERE ",
" p.u BETWEEN 0 AND 19.6",
" AND p.g BETWEEN 0 AND 20",
" AND s.specClass > 1 -- not UNKNOWN or STAR",
" AND s.specClass <> 5 -- not SKY",
" AND s.specClass <> 6 -- not STAR_LATE")))
if not os.path.exists(archive_file):
if not download_if_missing:
raise IOError('data not present on disk. '
'set download_if_missing=True to download')
print("querying for %i objects" % NOBJECTS)
print(query_text)
output = sql_query(query_text)
print("finished.")
data = np.loadtxt(output, delimiter=',',
skiprows=1, dtype=GAL_COLORS_DTYPE)
np.save(archive_file, data)
else:
data = np.load(archive_file)
return data
| bsd-2-clause |
GdZ/scriptfile | software/googleAppEngine/lib/django_1_3/django/contrib/auth/__init__.py | 151 | 4560 | import datetime
from warnings import warn
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.contrib.auth.signals import user_logged_in, user_logged_out
SESSION_KEY = '_auth_user_id'
BACKEND_SESSION_KEY = '_auth_user_backend'
REDIRECT_FIELD_NAME = 'next'
def load_backend(path):
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing authentication backend %s: "%s"' % (path, e))
except ValueError, e:
raise ImproperlyConfigured('Error importing authentication backends. Is AUTHENTICATION_BACKENDS a correctly defined list or tuple?')
try:
cls = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" authentication backend' % (module, attr))
if not hasattr(cls, "supports_object_permissions"):
warn("Authentication backends without a `supports_object_permissions` attribute are deprecated. Please define it in %s." % cls,
DeprecationWarning)
cls.supports_object_permissions = False
if not hasattr(cls, 'supports_anonymous_user'):
warn("Authentication backends without a `supports_anonymous_user` attribute are deprecated. Please define it in %s." % cls,
DeprecationWarning)
cls.supports_anonymous_user = False
if not hasattr(cls, 'supports_inactive_user'):
warn("Authentication backends without a `supports_inactive_user` attribute are deprecated. Please define it in %s." % cls,
PendingDeprecationWarning)
cls.supports_inactive_user = False
return cls()
def get_backends():
from django.conf import settings
backends = []
for backend_path in settings.AUTHENTICATION_BACKENDS:
backends.append(load_backend(backend_path))
if not backends:
raise ImproperlyConfigured('No authentication backends have been defined. Does AUTHENTICATION_BACKENDS contain anything?')
return backends
def authenticate(**credentials):
"""
If the given credentials are valid, return a User object.
"""
for backend in get_backends():
try:
user = backend.authenticate(**credentials)
except TypeError:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
if user is None:
continue
# Annotate the user object with the path of the backend.
user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__)
return user
def login(request, user):
"""
Persist a user id and a backend in the request. This way a user doesn't
have to reauthenticate on every request.
"""
if user is None:
user = request.user
# TODO: It would be nice to support different login methods, like signed cookies.
if SESSION_KEY in request.session:
if request.session[SESSION_KEY] != user.id:
# To avoid reusing another user's session, create a new, empty
# session if the existing session corresponds to a different
# authenticated user.
request.session.flush()
else:
request.session.cycle_key()
request.session[SESSION_KEY] = user.id
request.session[BACKEND_SESSION_KEY] = user.backend
if hasattr(request, 'user'):
request.user = user
user_logged_in.send(sender=user.__class__, request=request, user=user)
def logout(request):
"""
Removes the authenticated user's ID from the request and flushes their
session data.
"""
# Dispatch the signal before the user is logged out so the receivers have a
# chance to find out *who* logged out.
user = getattr(request, 'user', None)
if hasattr(user, 'is_authenticated') and not user.is_authenticated():
user = None
user_logged_out.send(sender=user.__class__, request=request, user=user)
request.session.flush()
if hasattr(request, 'user'):
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
def get_user(request):
from django.contrib.auth.models import AnonymousUser
try:
user_id = request.session[SESSION_KEY]
backend_path = request.session[BACKEND_SESSION_KEY]
backend = load_backend(backend_path)
user = backend.get_user(user_id) or AnonymousUser()
except KeyError:
user = AnonymousUser()
return user
| mit |
treejames/viewfinder | backend/www/test/photo_store_test.py | 13 | 11195 | #!/usr/bin/env python
#
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Test PhotoStore GET and PUT.
"""
__authors__ = ['[email protected] (Spencer Kimball)',
'[email protected] (Andy Kimball)']
import base64
import hashlib
import json
import time
from functools import partial
from viewfinder.backend.base import util
from viewfinder.backend.base.message import Message
from viewfinder.backend.base.testing import async_test
from viewfinder.backend.db.friend import Friend
from viewfinder.backend.db.photo import Photo
from viewfinder.backend.db.user import User
from viewfinder.backend.www import json_schema
from viewfinder.backend.www.test import service_base_test
class PhotoStoreTestCase(service_base_test.ServiceBaseTestCase):
def setUp(self):
super(PhotoStoreTestCase, self).setUp()
self._CreateSimpleTestAssets()
def testUploadAndGetPut(self):
"""Upload a photo, PUT the photo image data, then access it in
various ways.
"""
episode_id = self._episode_id
photo_id = self._photo_ids[0]
orig_image_data = 'original image data' # Same as used in self._UploadEpisode
self._PutPhotoAndVerify(self._cookie, 200, episode_id, photo_id, '.o', orig_image_data)
self._PutPhotoAndVerify(self._cookie, 200, episode_id, photo_id, '.f', 'full image data')
self._PutPhotoAndVerify(self._cookie, 200, episode_id, photo_id, '.t', 'thumbnail image data')
# Test legit downloads.
self._GetPhotoAndVerify(self._cookie, 200, episode_id, photo_id, '.o')
self._GetPhotoAndVerify(self._cookie, 200, episode_id, photo_id, '.f')
self._GetPhotoAndVerify(self._cookie, 200, episode_id, photo_id, '.t')
# Try get and put with no cookie.
self._PutPhotoAndVerify(None, 401, episode_id, photo_id, '.o', orig_image_data)
self._GetPhotoAndVerify(None, 401, episode_id, photo_id, '.o')
# Try get and put of missing photo.
self._PutPhotoAndVerify(self._cookie, 404, episode_id, 'p-unk', '.m', orig_image_data)
self._GetPhotoAndVerify(self._cookie, 404, episode_id, 'p-unk', '.m')
# Try get and put without permission.
self._PutPhotoAndVerify(self._cookie2, 404, episode_id, photo_id, '.o', orig_image_data)
self._GetPhotoAndVerify(self._cookie2, 404, episode_id, photo_id, '.o')
# Omit the Content-MD5 header.
response = self._PutPhoto(self._cookie, episode_id, photo_id, '.o', orig_image_data)
assert response.code == 400, response
# Try to use a non well-formed Content-MD5 header.
response = self._PutPhoto(self._cookie, episode_id, photo_id, '.o', orig_image_data,
content_md5='not well-formed')
assert response.code == 400, response
# Try to use a Content-MD5 header that does not match the data.
response = self._PutPhoto(self._cookie, episode_id, photo_id, '.o', orig_image_data,
content_md5=util.ComputeMD5Base64('mismatched md5'))
assert response.code == 400, response
# Try put with user that is not episode owner.
new_vp_id, new_ep_ids = self._tester.ShareNew(self._cookie,
[(episode_id, [photo_id])],
[self._user2.user_id])
self._PutPhotoAndVerify(self._cookie2, 403, new_ep_ids[0], photo_id, '.o', orig_image_data)
# Try get of photo using removed follower.
self._tester.RemoveFollowers(self._cookie2, new_vp_id, [self._user2.user_id])
self._GetPhotoAndVerify(self._cookie2, 404, new_ep_ids[0], photo_id, '.o')
# Try get and put of unshared photo.
self._tester.Unshare(self._cookie, new_vp_id, [(new_ep_ids[0], [photo_id])])
self._PutPhotoAndVerify(self._cookie, 403, new_ep_ids[0], photo_id, '.o', orig_image_data)
self._GetPhotoAndVerify(self._cookie, 403, new_ep_ids[0], photo_id, '.o')
# Try get and put of photo that has been shared again in order to override unshare.
self._tester.ShareExisting(self._cookie, new_vp_id, [(self._episode_id, self._photo_ids)])
self._PutPhotoAndVerify(self._cookie, 200, self._episode_id, self._photo_ids[0], '.o', orig_image_data)
self._GetPhotoAndVerify(self._cookie, 200, self._episode_id, self._photo_ids[0], '.o')
# Try get and put of hidden photo.
self._tester.HidePhotos(self._cookie, [(self._episode_id, self._photo_ids)])
self._PutPhotoAndVerify(self._cookie, 200, self._episode_id, self._photo_ids[0], '.o', orig_image_data)
self._GetPhotoAndVerify(self._cookie, 200, self._episode_id, self._photo_ids[0], '.o')
# Try get and put of removed photo.
self._tester.RemovePhotos(self._cookie, [(self._episode_id, self._photo_ids)])
self._PutPhotoAndVerify(self._cookie, 200, self._episode_id, self._photo_ids[0], '.o', orig_image_data)
self._GetPhotoAndVerify(self._cookie, 200, self._episode_id, self._photo_ids[0], '.o')
def testErrorResponse(self):
"""Test that error response is always in JSON format."""
response = self._PutPhoto(self._cookie, 'unk', 'unk', '.o', '')
self.assertEqual(json.loads(response.body), {"error": {"message": "Missing Content-MD5 header."}})
response = self._GetPhoto(self._cookie, 'unk', 'unk', '.o')
self.assertEqual(json.loads(response.body),
{u'error': {u'message': u'Photo was not found or you do not have permission to view it.'}})
def testReUpload(self):
"""Upload a new photo and attempt to re-upload using If-None-Match
header to simulate a phone reinstall where the client uses the
/photos/<photo_id> interface to get a redirect to a PUT URL. In
the case of the photo existing, the Etag should match and result
in a 304 response, saving the client the upload bandwidth.
"""
full_image_data = 'full image data'
for photo_id in self._photo_ids:
response = self._PutPhoto(self._cookie, self._episode_id, photo_id, '.f', full_image_data,
content_md5=util.ComputeMD5Base64(full_image_data),
etag=util.ComputeMD5Hex(full_image_data))
self.assertEqual(response.code, 200)
for photo_id in self._photo_ids:
response = self._PutPhoto(self._cookie, self._episode_id, photo_id, '.f', full_image_data,
content_md5=util.ComputeMD5Base64(full_image_data),
etag='"%s"' % util.ComputeMD5Hex(full_image_data))
self.assertEqual(response.code, 304)
response = self._PutPhoto(self._cookie, self._episode_id, photo_id, '.f', full_image_data,
content_md5=util.ComputeMD5Base64(full_image_data),
etag='*')
self.assertEqual(response.code, 304)
def testUploadMismatch(self):
"""Upload photo image data with a different MD5 than was originally
provided to upload_episode. Because the photo image data does not
yet exist, the metadata should be overwritten with the new values.
Then try to upload a different MD5 again, expecting an error this
time.
"""
for attr_name, suffix, image_data in [('tn_md5', '.t', 'new thumbnail image data'),
('med_md5', '.m', 'new medium image data'),
('full_md5', '.f', 'new full image data'),
('orig_md5', '.o', 'new original image data')]:
# Expect success on first upload.
response = self._PutPhoto(self._cookie, self._episode_id, self._photo_ids[0], suffix,
image_data, content_md5=util.ComputeMD5Base64(image_data),
etag=util.ComputeMD5Hex(image_data))
self.assertEqual(response.code, 200)
# Validate that the photo's MD5 was updated.
ph_dict = {'photo_id': self._photo_ids[0],
attr_name: util.ComputeMD5Hex(image_data)}
self._validator.ValidateUpdateDBObject(Photo, **ph_dict)
# Expect failure on second upload with different MD5.
new_image_data = 'really ' + image_data
response = self._PutPhoto(self._cookie, self._episode_id, self._photo_ids[0], suffix,
new_image_data, content_md5=util.ComputeMD5Base64(new_image_data),
etag=util.ComputeMD5Hex(new_image_data))
self.assertEqual(response.code, 400)
def testProspectiveCookie(self):
"""Gets photos using a prospective user cookie."""
orig_image_data = 'original image data' # Same as used in self._UploadEpisode
self._PutPhotoAndVerify(self._cookie, 200, self._episode_id, self._photo_ids[0], '.o', orig_image_data)
prospective_user, vp_id, ep_id = self._CreateProspectiveUser()
prospective_cookie = self._tester.GetSecureUserCookie(user_id=prospective_user.user_id,
device_id=prospective_user.webapp_dev_id,
user_name=None,
viewpoint_id=vp_id)
self._GetPhotoAndVerify(prospective_cookie, 200, ep_id, self._photo_ids[0], '.o')
# Share again to the prospective user to create a second viewpoint.
vp_id2, ep_ids2 = self._tester.ShareNew(self._cookie,
[(self._episode_id, self._photo_ids)],
['Email:[email protected]'])
# Now try to get the photo using the prospective cookie that is keyed to the first viewpoint.
response = self._GetPhoto(prospective_cookie, ep_ids2[0], self._photo_ids[0], '.o')
self.assertEqual(response.code, 403)
def _GetPhotoAndVerify(self, user_cookie, exp_code, episode_id, photo_id, suffix):
"""Call _GetPhoto and verify return code equals "exp_code"."""
response = self._GetPhoto(user_cookie, episode_id, photo_id, suffix)
self.assertEqual(response.code, exp_code)
if response.code == 200:
self.assertEqual(response.headers['Cache-Control'], 'private,max-age=31536000')
return response
def _PutPhotoAndVerify(self, user_cookie, exp_code, episode_id, photo_id, suffix, image_data):
"""Call _PutPhoto and verify return code equals "exp_code"."""
response = self._PutPhoto(user_cookie, episode_id, photo_id, suffix, image_data,
content_md5=util.ComputeMD5Base64(image_data))
self.assertEqual(response.code, exp_code)
return response
def _GetPhoto(self, user_cookie, episode_id, photo_id, suffix):
"""Sends a GET request to the photo store URL for the specified
photo and user cookie.
"""
return self._tester.GetPhotoImage(user_cookie, episode_id, photo_id, suffix)
def _PutPhoto(self, user_cookie, episode_id, photo_id, suffix, image_data,
etag=None, content_md5=None):
"""Sends a PUT request to the photo store URL for the specified
photo and user cookie. The put request body is set to "image_data".
"""
return self._tester.PutPhotoImage(user_cookie, episode_id, photo_id, suffix, image_data,
etag=etag, content_md5=content_md5)
| apache-2.0 |
jmcarp/django | django/core/management/commands/shell.py | 492 | 3951 | import os
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Runs a Python interactive interpreter. Tries to use IPython or bpython, if one of them is available."
requires_system_checks = False
shells = ['ipython', 'bpython']
def add_arguments(self, parser):
parser.add_argument('--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not IPython or bpython.')
parser.add_argument('--no-startup', action='store_true', dest='no_startup',
help='When using plain Python, ignore the PYTHONSTARTUP environment variable and ~/.pythonrc.py script.')
parser.add_argument('-i', '--interface', choices=self.shells, dest='interface',
help='Specify an interactive interpreter interface. Available options: "ipython" and "bpython"')
def _ipython_pre_011(self):
"""Start IPython pre-0.11"""
from IPython.Shell import IPShell
shell = IPShell(argv=[])
shell.mainloop()
def _ipython_pre_100(self):
"""Start IPython pre-1.0.0"""
from IPython.frontend.terminal.ipapp import TerminalIPythonApp
app = TerminalIPythonApp.instance()
app.initialize(argv=[])
app.start()
def _ipython(self):
"""Start IPython >= 1.0"""
from IPython import start_ipython
start_ipython(argv=[])
def ipython(self):
"""Start any version of IPython"""
for ip in (self._ipython, self._ipython_pre_100, self._ipython_pre_011):
try:
ip()
except ImportError:
pass
else:
return
# no IPython, raise ImportError
raise ImportError("No IPython")
def bpython(self):
import bpython
bpython.embed()
def run_shell(self, shell=None):
available_shells = [shell] if shell else self.shells
for shell in available_shells:
try:
return getattr(self, shell)()
except ImportError:
pass
raise ImportError
def handle(self, **options):
try:
if options['plain']:
# Don't bother loading IPython, because the user wants plain Python.
raise ImportError
self.run_shell(shell=options['interface'])
except ImportError:
import code
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
# See ticket 5082.
imported_objects = {}
try: # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then .pythonrc.py.
if not options['no_startup']:
for pythonrc in (os.environ.get("PYTHONSTARTUP"), '~/.pythonrc.py'):
if not pythonrc:
continue
pythonrc = os.path.expanduser(pythonrc)
if not os.path.isfile(pythonrc):
continue
try:
with open(pythonrc) as handle:
exec(compile(handle.read(), pythonrc, 'exec'), imported_objects)
except NameError:
pass
code.interact(local=imported_objects)
| bsd-3-clause |
javierTerry/odoo | addons/payment_ogone/data/ogone.py | 395 | 30321 | # -*- coding: utf-8 -*-
OGONE_ERROR_MAP = {
'0020001001': "Authorization failed, please retry",
'0020001002': "Authorization failed, please retry",
'0020001003': "Authorization failed, please retry",
'0020001004': "Authorization failed, please retry",
'0020001005': "Authorization failed, please retry",
'0020001006': "Authorization failed, please retry",
'0020001007': "Authorization failed, please retry",
'0020001008': "Authorization failed, please retry",
'0020001009': "Authorization failed, please retry",
'0020001010': "Authorization failed, please retry",
'0030001999': "Our payment system is currently under maintenance, please try later",
'0050001005': "Expiry date error",
'0050001007': "Requested Operation code not allowed",
'0050001008': "Invalid delay value",
'0050001010': "Input date in invalid format",
'0050001013': "Unable to parse socket input stream",
'0050001014': "Error in parsing stream content",
'0050001015': "Currency error",
'0050001016': "Transaction still posted at end of wait",
'0050001017': "Sync value not compatible with delay value",
'0050001019': "Transaction duplicate of a pre-existing transaction",
'0050001020': "Acceptation code empty while required for the transaction",
'0050001024': "Maintenance acquirer differs from original transaction acquirer",
'0050001025': "Maintenance merchant differs from original transaction merchant",
'0050001028': "Maintenance operation not accurate for the original transaction",
'0050001031': "Host application unknown for the transaction",
'0050001032': "Unable to perform requested operation with requested currency",
'0050001033': "Maintenance card number differs from original transaction card number",
'0050001034': "Operation code not allowed",
'0050001035': "Exception occurred in socket input stream treatment",
'0050001036': "Card length does not correspond to an acceptable value for the brand",
'0050001036': "Card length does not correspond to an acceptable value for the brand",
'0050001068': "A technical problem occurred, please contact helpdesk",
'0050001069': "Invalid check for CardID and Brand",
'0050001070': "A technical problem occurred, please contact helpdesk",
'0050001116': "Unknown origin IP",
'0050001117': "No origin IP detected",
'0050001118': "Merchant configuration problem, please contact support",
'10001001': "Communication failure",
'10001002': "Communication failure",
'10001003': "Communication failure",
'10001004': "Communication failure",
'10001005': "Communication failure",
'20001001': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001002': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001003': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001004': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001005': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001006': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001007': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001008': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001009': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001010': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001101': "A technical problem occurred, please contact helpdesk",
'20001105': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001111': "A technical problem occurred, please contact helpdesk",
'20002001': "Origin for the response of the bank can not be checked",
'20002002': "Beneficiary account number has been modified during processing",
'20002003': "Amount has been modified during processing",
'20002004': "Currency has been modified during processing",
'20002005': "No feedback from the bank server has been detected",
'30001001': "Payment refused by the acquirer",
'30001002': "Duplicate request",
'30001010': "A technical problem occurred, please contact helpdesk",
'30001011': "A technical problem occurred, please contact helpdesk",
'30001012': "Card black listed - Contact acquirer",
'30001015': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30001051': "A technical problem occurred, please contact helpdesk",
'30001054': "A technical problem occurred, please contact helpdesk",
'30001057': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30001058': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30001060': "Aquirer indicates that a failure occured during payment processing",
'30001070': "RATEPAY Invalid Response Type (Failure)",
'30001071': "RATEPAY Missing Mandatory status code field (failure)",
'30001072': "RATEPAY Missing Mandatory Result code field (failure)",
'30001073': "RATEPAY Response parsing Failed",
'30001090': "CVC check required by front end and returned invalid by acquirer",
'30001091': "ZIP check required by front end and returned invalid by acquirer",
'30001092': "Address check required by front end and returned as invalid by acquirer.",
'30001100': "Unauthorized buyer's country",
'30001101': "IP country <> card country",
'30001102': "Number of different countries too high",
'30001103': "unauthorized card country",
'30001104': "unauthorized ip address country",
'30001105': "Anonymous proxy",
'30001110': "If the problem persists, please contact Support, or go to paysafecard's card balance page (https://customer.cc.at.paysafecard.com/psccustomer/GetWelcomePanelServlet?language=en) to see when the amount reserved on your card will be available again.",
'30001120': "IP address in merchant's black list",
'30001130': "BIN in merchant's black list",
'30001131': "Wrong BIN for 3xCB",
'30001140': "Card in merchant's card blacklist",
'30001141': "Email in blacklist",
'30001142': "Passenger name in blacklist",
'30001143': "Card holder name in blacklist",
'30001144': "Passenger name different from owner name",
'30001145': "Time to departure too short",
'30001149': "Card Configured in Card Supplier Limit for another relation (CSL)",
'30001150': "Card not configured in the system for this customer (CSL)",
'30001151': "REF1 not allowed for this relationship (Contract number",
'30001152': "Card/Supplier Amount limit reached (CSL)",
'30001153': "Card not allowed for this supplier (Date out of contract bounds)",
'30001154': "You have reached the usage limit allowed",
'30001155': "You have reached the usage limit allowed",
'30001156': "You have reached the usage limit allowed",
'30001157': "Unauthorized IP country for itinerary",
'30001158': "email usage limit reached",
'30001159': "Unauthorized card country/IP country combination",
'30001160': "Postcode in highrisk group",
'30001161': "generic blacklist match",
'30001162': "Billing Address is a PO Box",
'30001180': "maximum scoring reached",
'30001997': "Authorization canceled by simulation",
'30001998': "A technical problem occurred, please try again.",
'30001999': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30002001': "Payment refused by the financial institution",
'30002001': "Payment refused by the financial institution",
'30021001': "Call acquirer support call number.",
'30022001': "Payment must be approved by the acquirer before execution.",
'30031001': "Invalid merchant number.",
'30041001': "Retain card.",
'30051001': "Authorization declined",
'30071001': "Retain card - special conditions.",
'30121001': "Invalid transaction",
'30131001': "Invalid amount",
'30131002': "You have reached the total amount allowed",
'30141001': "Invalid card number",
'30151001': "Unknown acquiring institution.",
'30171001': "Payment method cancelled by the buyer",
'30171002': "The maximum time allowed is elapsed.",
'30191001': "Try again later.",
'30201001': "A technical problem occurred, please contact helpdesk",
'30301001': "Invalid format",
'30311001': "Unknown acquirer ID.",
'30331001': "Card expired.",
'30341001': "Suspicion of fraud.",
'30341002': "Suspicion of fraud (3rdMan)",
'30341003': "Suspicion of fraud (Perseuss)",
'30341004': "Suspicion of fraud (ETHOCA)",
'30381001': "A technical problem occurred, please contact helpdesk",
'30401001': "Invalid function.",
'30411001': "Lost card.",
'30431001': "Stolen card, pick up",
'30511001': "Insufficient funds.",
'30521001': "No Authorization. Contact the issuer of your card.",
'30541001': "Card expired.",
'30551001': "Invalid PIN.",
'30561001': "Card not in authorizer's database.",
'30571001': "Transaction not permitted on card.",
'30581001': "Transaction not allowed on this terminal",
'30591001': "Suspicion of fraud.",
'30601001': "The merchant must contact the acquirer.",
'30611001': "Amount exceeds card ceiling.",
'30621001': "Restricted card.",
'30631001': "Security policy not respected.",
'30641001': "Amount changed from ref. trn.",
'30681001': "Tardy response.",
'30751001': "PIN entered incorrectly too often",
'30761001': "Card holder already contesting.",
'30771001': "PIN entry required.",
'30811001': "Message flow error.",
'30821001': "Authorization center unavailable",
'30831001': "Authorization center unavailable",
'30901001': "Temporary system shutdown.",
'30911001': "Acquirer unavailable.",
'30921001': "Invalid card type for acquirer.",
'30941001': "Duplicate transaction",
'30961001': "Processing temporarily not possible",
'30971001': "A technical problem occurred, please contact helpdesk",
'30981001': "A technical problem occurred, please contact helpdesk",
'31011001': "Unknown acceptance code",
'31021001': "Invalid currency",
'31031001': "Acceptance code missing",
'31041001': "Inactive card",
'31051001': "Merchant not active",
'31061001': "Invalid expiration date",
'31071001': "Interrupted host communication",
'31081001': "Card refused",
'31091001': "Invalid password",
'31101001': "Plafond transaction (majoré du bonus) dépassé",
'31111001': "Plafond mensuel (majoré du bonus) dépassé",
'31121001': "Plafond centre de facturation dépassé",
'31131001': "Plafond entreprise dépassé",
'31141001': "Code MCC du fournisseur non autorisé pour la carte",
'31151001': "Numéro SIRET du fournisseur non autorisé pour la carte",
'31161001': "This is not a valid online banking account",
'32001004': "A technical problem occurred, please try again.",
'34011001': "Bezahlung mit RatePAY nicht möglich.",
'39991001': "A technical problem occurred, please contact the helpdesk of your acquirer",
'40001001': "A technical problem occurred, please try again.",
'40001002': "A technical problem occurred, please try again.",
'40001003': "A technical problem occurred, please try again.",
'40001004': "A technical problem occurred, please try again.",
'40001005': "A technical problem occurred, please try again.",
'40001006': "A technical problem occurred, please try again.",
'40001007': "A technical problem occurred, please try again.",
'40001008': "A technical problem occurred, please try again.",
'40001009': "A technical problem occurred, please try again.",
'40001010': "A technical problem occurred, please try again.",
'40001011': "A technical problem occurred, please contact helpdesk",
'40001012': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'40001013': "A technical problem occurred, please contact helpdesk",
'40001016': "A technical problem occurred, please contact helpdesk",
'40001018': "A technical problem occurred, please try again.",
'40001019': "Sorry, an error occurred during processing. Please retry the operation (use back button of the browser). If problem persists, contact your merchant's helpdesk.",
'40001020': "Sorry, an error occurred during processing. Please retry the operation (use back button of the browser). If problem persists, contact your merchant's helpdesk.",
'40001050': "A technical problem occurred, please contact helpdesk",
'40001133': "Authentication failed, the signature of your bank access control server is incorrect",
'40001134': "Authentication failed, please retry or cancel.",
'40001135': "Authentication temporary unavailable, please retry or cancel.",
'40001136': "Technical problem with your browser, please retry or cancel",
'40001137': "Your bank access control server is temporary unavailable, please retry or cancel",
'40001998': "Temporary technical problem. Please retry a little bit later.",
'50001001': "Unknown card type",
'50001002': "Card number format check failed for given card number.",
'50001003': "Merchant data error",
'50001004': "Merchant identification missing",
'50001005': "Expiry date error",
'50001006': "Amount is not a number",
'50001007': "A technical problem occurred, please contact helpdesk",
'50001008': "A technical problem occurred, please contact helpdesk",
'50001009': "A technical problem occurred, please contact helpdesk",
'50001010': "A technical problem occurred, please contact helpdesk",
'50001011': "Brand not supported for that merchant",
'50001012': "A technical problem occurred, please contact helpdesk",
'50001013': "A technical problem occurred, please contact helpdesk",
'50001014': "A technical problem occurred, please contact helpdesk",
'50001015': "Invalid currency code",
'50001016': "A technical problem occurred, please contact helpdesk",
'50001017': "A technical problem occurred, please contact helpdesk",
'50001018': "A technical problem occurred, please contact helpdesk",
'50001019': "A technical problem occurred, please contact helpdesk",
'50001020': "A technical problem occurred, please contact helpdesk",
'50001021': "A technical problem occurred, please contact helpdesk",
'50001022': "A technical problem occurred, please contact helpdesk",
'50001023': "A technical problem occurred, please contact helpdesk",
'50001024': "A technical problem occurred, please contact helpdesk",
'50001025': "A technical problem occurred, please contact helpdesk",
'50001026': "A technical problem occurred, please contact helpdesk",
'50001027': "A technical problem occurred, please contact helpdesk",
'50001028': "A technical problem occurred, please contact helpdesk",
'50001029': "A technical problem occurred, please contact helpdesk",
'50001030': "A technical problem occurred, please contact helpdesk",
'50001031': "A technical problem occurred, please contact helpdesk",
'50001032': "A technical problem occurred, please contact helpdesk",
'50001033': "A technical problem occurred, please contact helpdesk",
'50001034': "A technical problem occurred, please contact helpdesk",
'50001035': "A technical problem occurred, please contact helpdesk",
'50001036': "Card length does not correspond to an acceptable value for the brand",
'50001037': "Purchasing card number for a regular merchant",
'50001038': "Non Purchasing card for a Purchasing card merchant",
'50001039': "Details sent for a non-Purchasing card merchant, please contact helpdesk",
'50001040': "Details not sent for a Purchasing card transaction, please contact helpdesk",
'50001041': "Payment detail validation failed",
'50001042': "Given transactions amounts (tax,discount,shipping,net,etc…) do not compute correctly together",
'50001043': "A technical problem occurred, please contact helpdesk",
'50001044': "No acquirer configured for this operation",
'50001045': "No UID configured for this operation",
'50001046': "Operation not allowed for the merchant",
'50001047': "A technical problem occurred, please contact helpdesk",
'50001048': "A technical problem occurred, please contact helpdesk",
'50001049': "A technical problem occurred, please contact helpdesk",
'50001050': "A technical problem occurred, please contact helpdesk",
'50001051': "A technical problem occurred, please contact helpdesk",
'50001052': "A technical problem occurred, please contact helpdesk",
'50001053': "A technical problem occurred, please contact helpdesk",
'50001054': "Card number incorrect or incompatible",
'50001055': "A technical problem occurred, please contact helpdesk",
'50001056': "A technical problem occurred, please contact helpdesk",
'50001057': "A technical problem occurred, please contact helpdesk",
'50001058': "A technical problem occurred, please contact helpdesk",
'50001059': "A technical problem occurred, please contact helpdesk",
'50001060': "A technical problem occurred, please contact helpdesk",
'50001061': "A technical problem occurred, please contact helpdesk",
'50001062': "A technical problem occurred, please contact helpdesk",
'50001063': "Card Issue Number does not correspond to range or not present",
'50001064': "Start Date not valid or not present",
'50001066': "Format of CVC code invalid",
'50001067': "The merchant is not enrolled for 3D-Secure",
'50001068': "The card number or account number (PAN) is invalid",
'50001069': "Invalid check for CardID and Brand",
'50001070': "The ECI value given is either not supported, or in conflict with other data in the transaction",
'50001071': "Incomplete TRN demat",
'50001072': "Incomplete PAY demat",
'50001073': "No demat APP",
'50001074': "Authorisation too old",
'50001075': "VERRes was an error message",
'50001076': "DCP amount greater than authorisation amount",
'50001077': "Details negative amount",
'50001078': "Details negative quantity",
'50001079': "Could not decode/decompress received PARes (3D-Secure)",
'50001080': "Received PARes was an erereor message from ACS (3D-Secure)",
'50001081': "Received PARes format was invalid according to the 3DS specifications (3D-Secure)",
'50001082': "PAReq/PARes reconciliation failure (3D-Secure)",
'50001084': "Maximum amount reached",
'50001087': "The transaction type requires authentication, please check with your bank.",
'50001090': "CVC missing at input, but CVC check asked",
'50001091': "ZIP missing at input, but ZIP check asked",
'50001092': "Address missing at input, but Address check asked",
'50001095': "Invalid date of birth",
'50001096': "Invalid commodity code",
'50001097': "The requested currency and brand are incompatible.",
'50001111': "Data validation error",
'50001113': "This order has already been processed",
'50001114': "Error pre-payment check page access",
'50001115': "Request not received in secure mode",
'50001116': "Unknown IP address origin",
'50001117': "NO IP address origin",
'50001118': "Pspid not found or not correct",
'50001119': "Password incorrect or disabled due to numbers of errors",
'50001120': "Invalid currency",
'50001121': "Invalid number of decimals for the currency",
'50001122': "Currency not accepted by the merchant",
'50001123': "Card type not active",
'50001124': "Number of lines don't match with number of payments",
'50001125': "Format validation error",
'50001126': "Overflow in data capture requests for the original order",
'50001127': "The original order is not in a correct status",
'50001128': "missing authorization code for unauthorized order",
'50001129': "Overflow in refunds requests",
'50001130': "Error access to original order",
'50001131': "Error access to original history item",
'50001132': "The Selected Catalog is empty",
'50001133': "Duplicate request",
'50001134': "Authentication failed, please retry or cancel.",
'50001135': "Authentication temporary unavailable, please retry or cancel.",
'50001136': "Technical problem with your browser, please retry or cancel",
'50001137': "Your bank access control server is temporary unavailable, please retry or cancel",
'50001150': "Fraud Detection, Technical error (IP not valid)",
'50001151': "Fraud detection : technical error (IPCTY unknown or error)",
'50001152': "Fraud detection : technical error (CCCTY unknown or error)",
'50001153': "Overflow in redo-authorisation requests",
'50001170': "Dynamic BIN check failed",
'50001171': "Dynamic country check failed",
'50001172': "Error in Amadeus signature",
'50001174': "Card Holder Name is too long",
'50001175': "Name contains invalid characters",
'50001176': "Card number is too long",
'50001177': "Card number contains non-numeric info",
'50001178': "Card Number Empty",
'50001179': "CVC too long",
'50001180': "CVC contains non-numeric info",
'50001181': "Expiration date contains non-numeric info",
'50001182': "Invalid expiration month",
'50001183': "Expiration date must be in the future",
'50001184': "SHA Mismatch",
'50001205': "Missing mandatory fields for billing address.",
'50001206': "Missing mandatory field date of birth.",
'50001207': "Missing required shopping basket details.",
'50001208': "Missing social security number",
'50001209': "Invalid country code",
'50001210': "Missing yearly salary",
'50001211': "Missing gender",
'50001212': "Missing email",
'50001213': "Missing IP address",
'50001214': "Missing part payment campaign ID",
'50001215': "Missing invoice number",
'50001216': "The alias must be different than the card number",
'60000001': "account number unknown",
'60000003': "not credited dd-mm-yy",
'60000005': "name/number do not correspond",
'60000007': "account number blocked",
'60000008': "specific direct debit block",
'60000009': "account number WKA",
'60000010': "administrative reason",
'60000011': "account number expired",
'60000012': "no direct debit authorisation given",
'60000013': "debit not approved",
'60000014': "double payment",
'60000018': "name/address/city not entered",
'60001001': "no original direct debit for revocation",
'60001002': "payer’s account number format error",
'60001004': "payer’s account at different bank",
'60001005': "payee’s account at different bank",
'60001006': "payee’s account number format error",
'60001007': "payer’s account number blocked",
'60001008': "payer’s account number expired",
'60001009': "payee’s account number expired",
'60001010': "direct debit not possible",
'60001011': "creditor payment not possible",
'60001012': "payer’s account number unknown WKA-number",
'60001013': "payee’s account number unknown WKA-number",
'60001014': "impermissible WKA transaction",
'60001015': "period for revocation expired",
'60001017': "reason for revocation not correct",
'60001018': "original run number not numeric",
'60001019': "payment ID incorrect",
'60001020': "amount not numeric",
'60001021': "amount zero not permitted",
'60001022': "negative amount not permitted",
'60001023': "payer and payee giro account number",
'60001025': "processing code (verwerkingscode) incorrect",
'60001028': "revocation not permitted",
'60001029': "guaranteed direct debit on giro account number",
'60001030': "NBC transaction type incorrect",
'60001031': "description too large",
'60001032': "book account number not issued",
'60001034': "book account number incorrect",
'60001035': "payer’s account number not numeric",
'60001036': "payer’s account number not eleven-proof",
'60001037': "payer’s account number not issued",
'60001039': "payer’s account number of DNB/BGC/BLA",
'60001040': "payee’s account number not numeric",
'60001041': "payee’s account number not eleven-proof",
'60001042': "payee’s account number not issued",
'60001044': "payee’s account number unknown",
'60001050': "payee’s name missing",
'60001051': "indicate payee’s bank account number instead of 3102",
'60001052': "no direct debit contract",
'60001053': "amount beyond bounds",
'60001054': "selective direct debit block",
'60001055': "original run number unknown",
'60001057': "payer’s name missing",
'60001058': "payee’s account number missing",
'60001059': "restore not permitted",
'60001060': "bank’s reference (navraaggegeven) missing",
'60001061': "BEC/GBK number incorrect",
'60001062': "BEC/GBK code incorrect",
'60001087': "book account number not numeric",
'60001090': "cancelled on request",
'60001091': "cancellation order executed",
'60001092': "cancelled instead of bended",
'60001093': "book account number is a shortened account number",
'60001094': "instructing party account number not identical with payer",
'60001095': "payee unknown GBK acceptor",
'60001097': "instructing party account number not identical with payee",
'60001099': "clearing not permitted",
'60001101': "payer’s account number not spaces",
'60001102': "PAN length not numeric",
'60001103': "PAN length outside limits",
'60001104': "track number not numeric",
'60001105': "track number not valid",
'60001106': "PAN sequence number not numeric",
'60001107': "domestic PAN not numeric",
'60001108': "domestic PAN not eleven-proof",
'60001109': "domestic PAN not issued",
'60001110': "foreign PAN not numeric",
'60001111': "card valid date not numeric",
'60001112': "book period number (boekperiodenr) not numeric",
'60001113': "transaction number not numeric",
'60001114': "transaction time not numeric",
'60001115': "transaction no valid time",
'60001116': "transaction date not numeric",
'60001117': "transaction no valid date",
'60001118': "STAN not numeric",
'60001119': "instructing party’s name missing",
'60001120': "foreign amount (bedrag-vv) not numeric",
'60001122': "rate (verrekenkoers) not numeric",
'60001125': "number of decimals (aantaldecimalen) incorrect",
'60001126': "tariff (tarifering) not B/O/S",
'60001127': "domestic costs (kostenbinnenland) not numeric",
'60001128': "domestic costs (kostenbinnenland) not higher than zero",
'60001129': "foreign costs (kostenbuitenland) not numeric",
'60001130': "foreign costs (kostenbuitenland) not higher than zero",
'60001131': "domestic costs (kostenbinnenland) not zero",
'60001132': "foreign costs (kostenbuitenland) not zero",
'60001134': "Euro record not fully filled in",
'60001135': "Client currency incorrect",
'60001136': "Amount NLG not numeric",
'60001137': "Amount NLG not higher than zero",
'60001138': "Amount NLG not equal to Amount",
'60001139': "Amount NLG incorrectly converted",
'60001140': "Amount EUR not numeric",
'60001141': "Amount EUR not greater than zero",
'60001142': "Amount EUR not equal to Amount",
'60001143': "Amount EUR incorrectly converted",
'60001144': "Client currency not NLG",
'60001145': "rate euro-vv (Koerseuro-vv) not numeric",
'60001146': "comma rate euro-vv (Kommakoerseuro-vv) incorrect",
'60001147': "acceptgiro distributor not valid",
'60001148': "Original run number and/or BRN are missing",
'60001149': "Amount/Account number/ BRN different",
'60001150': "Direct debit already revoked/restored",
'60001151': "Direct debit already reversed/revoked/restored",
'60001153': "Payer’s account number not known",
}
DATA_VALIDATION_ERROR = '50001111'
def retryable(error):
return error in [
'0020001001', '0020001002', '0020001003', '0020001004', '0020001005',
'0020001006', '0020001007', '0020001008', '0020001009', '0020001010',
'30001010', '30001011', '30001015',
'30001057', '30001058',
'30001998', '30001999',
#'30611001', # amount exceeds card limit
'30961001',
'40001001', '40001002', '40001003', '40001004', '40001005',
'40001006', '40001007', '40001008', '40001009', '40001010',
'40001012',
'40001018', '40001019', '40001020',
'40001134', '40001135', '40001136', '40001137',
#'50001174', # cardholder name too long
]
| agpl-3.0 |
keyurpatel076/MissionPlannerGit | Lib/encodings/iso8859_4.py | 593 | 13632 | """ Python Character Mapping Codec iso8859_4 generated from 'MAPPINGS/ISO8859/8859-4.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-4',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u0138' # 0xA2 -> LATIN SMALL LETTER KRA
u'\u0156' # 0xA3 -> LATIN CAPITAL LETTER R WITH CEDILLA
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
u'\u013b' # 0xA6 -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
u'\u0112' # 0xAA -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0122' # 0xAB -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\u0166' # 0xAC -> LATIN CAPITAL LETTER T WITH STROKE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
u'\u02db' # 0xB2 -> OGONEK
u'\u0157' # 0xB3 -> LATIN SMALL LETTER R WITH CEDILLA
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
u'\u013c' # 0xB6 -> LATIN SMALL LETTER L WITH CEDILLA
u'\u02c7' # 0xB7 -> CARON
u'\xb8' # 0xB8 -> CEDILLA
u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
u'\u0113' # 0xBA -> LATIN SMALL LETTER E WITH MACRON
u'\u0123' # 0xBB -> LATIN SMALL LETTER G WITH CEDILLA
u'\u0167' # 0xBC -> LATIN SMALL LETTER T WITH STROKE
u'\u014a' # 0xBD -> LATIN CAPITAL LETTER ENG
u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
u'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
u'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\u012a' # 0xCF -> LATIN CAPITAL LETTER I WITH MACRON
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
u'\u0136' # 0xD3 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0168' # 0xDD -> LATIN CAPITAL LETTER U WITH TILDE
u'\u016a' # 0xDE -> LATIN CAPITAL LETTER U WITH MACRON
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u012b' # 0xEF -> LATIN SMALL LETTER I WITH MACRON
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
u'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
u'\u0137' # 0xF3 -> LATIN SMALL LETTER K WITH CEDILLA
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u0169' # 0xFD -> LATIN SMALL LETTER U WITH TILDE
u'\u016b' # 0xFE -> LATIN SMALL LETTER U WITH MACRON
u'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
Dino0631/RedRain-Bot | lib/youtube_dl/extractor/kanalplay.py | 90 | 3283 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
srt_subtitles_timecode,
)
class KanalPlayIE(InfoExtractor):
IE_DESC = 'Kanal 5/9/11 Play'
_VALID_URL = r'https?://(?:www\.)?kanal(?P<channel_id>5|9|11)play\.se/(?:#!/)?(?:play/)?program/\d+/video/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.kanal5play.se/#!/play/program/3060212363/video/3270012277',
'info_dict': {
'id': '3270012277',
'ext': 'flv',
'title': 'Saknar både dusch och avlopp',
'description': 'md5:6023a95832a06059832ae93bc3c7efb7',
'duration': 2636.36,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.kanal9play.se/#!/play/program/335032/video/246042',
'only_matching': True,
}, {
'url': 'http://www.kanal11play.se/#!/play/program/232835958/video/367135199',
'only_matching': True,
}]
def _fix_subtitles(self, subs):
return '\r\n\r\n'.join(
'%s\r\n%s --> %s\r\n%s'
% (
num,
srt_subtitles_timecode(item['startMillis'] / 1000.0),
srt_subtitles_timecode(item['endMillis'] / 1000.0),
item['text'],
) for num, item in enumerate(subs, 1))
def _get_subtitles(self, channel_id, video_id):
subs = self._download_json(
'http://www.kanal%splay.se/api/subtitles/%s' % (channel_id, video_id),
video_id, 'Downloading subtitles JSON', fatal=False)
return {'sv': [{'ext': 'srt', 'data': self._fix_subtitles(subs)}]} if subs else {}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
channel_id = mobj.group('channel_id')
video = self._download_json(
'http://www.kanal%splay.se/api/getVideo?format=FLASH&videoId=%s' % (channel_id, video_id),
video_id)
reasons_for_no_streams = video.get('reasonsForNoStreams')
if reasons_for_no_streams:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, '\n'.join(reasons_for_no_streams)),
expected=True)
title = video['title']
description = video.get('description')
duration = float_or_none(video.get('length'), 1000)
thumbnail = video.get('posterUrl')
stream_base_url = video['streamBaseUrl']
formats = [{
'url': stream_base_url,
'play_path': stream['source'],
'ext': 'flv',
'tbr': float_or_none(stream.get('bitrate'), 1000),
'rtmp_real_time': True,
} for stream in video['streams']]
self._sort_formats(formats)
subtitles = {}
if video.get('hasSubtitle'):
subtitles = self.extract_subtitles(channel_id, video_id)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
| gpl-3.0 |
liqd/adhocracy | src/adhocracy/migration/versions/036_proposal_variant_relation_migrate_values.py | 4 | 6755 | from datetime import datetime
from logging import getLogger
from pickle import dumps, loads
import re
from sqlalchemy import (MetaData, Column, ForeignKey, DateTime, Integer,
PickleType, String, Table, Unicode)
metadata = MetaData()
log = getLogger(__name__)
def are_elements_equal(x, y):
return x == y
new_selection_table = Table(
'selection', metadata,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('delete_time', DateTime),
Column('page_id', Integer, ForeignKey('page.id',
name='selection_page', use_alter=True), nullable=True),
Column('proposal_id', Integer, ForeignKey('proposal.id',
name='selection_proposal', use_alter=True), nullable=True),
Column('variants', PickleType(comparator=are_elements_equal),
nullable=True)
)
page_table = Table('page', metadata,
Column('id', Integer, ForeignKey('delegateable.id'), primary_key=True),
Column('function', Unicode(20))
)
poll_table = Table('poll', metadata,
Column('id', Integer, primary_key=True),
Column('begin_time', DateTime, default=datetime.utcnow),
Column('end_time', DateTime, nullable=True),
Column('user_id', Integer, ForeignKey('user.id'), nullable=False),
Column('action', Unicode(50), nullable=False),
Column('subject', Unicode(254), nullable=False),
Column('scope_id', Integer, ForeignKey('delegateable.id'), nullable=False)
)
category_graph = Table('category_graph', metadata,
Column('parent_id', Integer, ForeignKey('delegateable.id')),
Column('child_id', Integer, ForeignKey('delegateable.id'))
)
delegateable_table = Table('delegateable', metadata,
Column('id', Integer, primary_key=True),
Column('label', Unicode(255), nullable=False),
Column('type', String(50)),
Column('create_time', DateTime, default=datetime.utcnow),
Column('access_time', DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow),
Column('delete_time', DateTime, nullable=True),
Column('milestone_id', Integer, ForeignKey('milestone.id'), nullable=True),
Column('creator_id', Integer, ForeignKey('user.id'), nullable=False),
Column('instance_id', Integer, ForeignKey('instance.id'), nullable=False)
)
tally_table = Table('tally', metadata,
Column('id', Integer, primary_key=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('poll_id', Integer, ForeignKey('poll.id'), nullable=False),
Column('vote_id', Integer, ForeignKey('vote.id'), nullable=True),
Column('num_for', Integer, nullable=True),
Column('num_against', Integer, nullable=True),
Column('num_abstain', Integer, nullable=True)
)
now = datetime.utcnow()
SEL_RE = re.compile('\[@\[selection:([^\]]*)\],"([^"]*)"\]')
def get_tally_count(migrate_engine, poll_id):
tallies = migrate_engine.execute(
tally_table.select('poll_id = %s' % poll_id)).fetchall()
tallies = sorted(tallies)
last = tallies[-1]
(_, _, _, _, num_for, num_against, _) = last
tally_count = num_for + num_against
return tally_count
def collect_selection_data(selections):
to_proposal = {}
to_variants = {}
for (sid, create_time, delete_time, page_id, proposal_id,
variants) in selections:
if delete_time is not None and delete_time < now:
pass
to_proposal[sid] = proposal_id
to_variants[sid] = loads(variants)
return to_proposal, to_variants
def collect_poll_data(migrate_engine, polls, selection_to_proposal):
proposal_ids = []
variant_to_selection = {}
for (poll_id, _, end_time, _, action, subject, scope_id) in polls:
if action != u'select':
continue
if end_time is not None and end_time < now:
continue
match = SEL_RE.match(subject)
selection_id = int(match.group(1))
variant = match.group(2)
if variant == u'HEAD':
# we handle HEAD speacially. Every Proposal has access to
# HEAD.
continue
var_selections = variant_to_selection.setdefault(variant, [])
tally_count = get_tally_count(migrate_engine, poll_id)
var_selections.append([tally_count, selection_id, poll_id])
try:
proposal_ids.append(selection_to_proposal[selection_id])
except Exception, E:
pass
return proposal_ids, variant_to_selection
def handle_page(migrate_engine, page_id, selections_, polls):
(selection_to_proposal,
selections_to_variants) = collect_selection_data(selections_)
(proposal_ids,
variant_to_selection) = collect_poll_data(migrate_engine, polls,
selection_to_proposal)
# if we have proposals select a winning proposal and assign
# the variant to it
if not len(set(proposal_ids)):
# no proposals
return
for variant, tally_selections in variant_to_selection.items():
tally_selections = sorted(tally_selections)
count, winning_selection, poll_id = tally_selections[-1]
try:
selections_to_variants[winning_selection].append(variant)
migrate_engine.execute(
new_selection_table.update().values(
variants=dumps(selections_to_variants[winning_selection]))
.where(new_selection_table.c.id == winning_selection))
except KeyError, E:
msg = (
'KeyError: %s\n' % E +
'There is no selection with the id %s \n' % E +
'which should be the winner for ' +
'page %s - variant %s' % (page_id, variant))
log.error(msg)
def add_default_variant(migrate_engine):
default_variants = [u'HEAD']
default_variants_pickle = dumps(default_variants)
migrate_engine.execute(
new_selection_table.update().values(variants=default_variants_pickle))
def upgrade(migrate_engine):
metadata.bind = migrate_engine
page_table = Table('page', metadata, autoload=True)
proposal_table = Table('proposal', metadata, autoload=True)
add_default_variant(migrate_engine)
pages = migrate_engine.execute(page_table.select())
for (page_id, function) in pages:
if function != u'norm':
continue
selections = migrate_engine.execute(
new_selection_table.select('page_id = %s' % page_id)).fetchall()
polls = migrate_engine.execute(
poll_table.select('scope_id = %s' % page_id)).fetchall()
handle_page(migrate_engine, page_id, selections, polls)
return
def downgrade(migrate_engine):
raise NotImplementedError()
| agpl-3.0 |
bala4901/odoo | addons/account_asset/report/account_asset_report.py | 40 | 4235 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
class asset_asset_report(osv.osv):
_name = "asset.asset.report"
_description = "Assets Analysis"
_auto = False
_columns = {
'name': fields.char('Year', size=16, required=False, readonly=True),
'purchase_date': fields.date('Purchase Date', readonly=True),
'depreciation_date': fields.date('Depreciation Date', readonly=True),
'asset_id': fields.many2one('account.asset.asset', string='Asset', readonly=True),
'asset_category_id': fields.many2one('account.asset.category',string='Asset category'),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'state': fields.selection([('draft','Draft'),('open','Running'),('close','Close')], 'Status', readonly=True),
'depreciation_value': fields.float('Amount of Depreciation Lines', readonly=True),
'move_check': fields.boolean('Posted', readonly=True),
'nbr': fields.integer('# of Depreciation Lines', readonly=True),
'gross_value': fields.float('Gross Amount', readonly=True),
'posted_value': fields.float('Posted Amount', readonly=True),
'unposted_value': fields.float('Unposted Amount', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'asset_asset_report')
cr.execute("""
create or replace view asset_asset_report as (
select
min(dl.id) as id,
dl.name as name,
dl.depreciation_date as depreciation_date,
a.purchase_date as purchase_date,
(CASE WHEN (select min(d.id) from account_asset_depreciation_line as d
left join account_asset_asset as ac ON (ac.id=d.asset_id)
where a.id=ac.id) = min(dl.id)
THEN a.purchase_value
ELSE 0
END) as gross_value,
dl.amount as depreciation_value,
(CASE WHEN dl.move_check
THEN dl.amount
ELSE 0
END) as posted_value,
(CASE WHEN NOT dl.move_check
THEN dl.amount
ELSE 0
END) as unposted_value,
dl.asset_id as asset_id,
dl.move_check as move_check,
a.category_id as asset_category_id,
a.partner_id as partner_id,
a.state as state,
count(dl.*) as nbr,
a.company_id as company_id
from account_asset_depreciation_line dl
left join account_asset_asset a on (dl.asset_id=a.id)
group by
dl.amount,dl.asset_id,dl.depreciation_date,dl.name,
a.purchase_date, dl.move_check, a.state, a.category_id, a.partner_id, a.company_id,
a.purchase_value, a.id, a.salvage_value
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zonk1024/moto | tests/test_autoscaling/test_launch_configurations.py | 17 | 7779 | from __future__ import unicode_literals
import boto
from boto.ec2.autoscale.launchconfig import LaunchConfiguration
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
import sure # noqa
from moto import mock_autoscaling
from tests.helpers import requires_boto_gte
@mock_autoscaling
def test_create_launch_configuration():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='t1.micro',
key_name='the_keys',
security_groups=["default", "default2"],
user_data=b"This is some user_data",
instance_monitoring=True,
instance_profile_name='arn:aws:iam::123456789012:instance-profile/testing',
spot_price=0.1,
)
conn.create_launch_configuration(config)
launch_config = conn.get_all_launch_configurations()[0]
launch_config.name.should.equal('tester')
launch_config.image_id.should.equal('ami-abcd1234')
launch_config.instance_type.should.equal('t1.micro')
launch_config.key_name.should.equal('the_keys')
set(launch_config.security_groups).should.equal(set(['default', 'default2']))
launch_config.user_data.should.equal(b"This is some user_data")
launch_config.instance_monitoring.enabled.should.equal('true')
launch_config.instance_profile_name.should.equal('arn:aws:iam::123456789012:instance-profile/testing')
launch_config.spot_price.should.equal(0.1)
@requires_boto_gte("2.27.0")
@mock_autoscaling
def test_create_launch_configuration_with_block_device_mappings():
block_device_mapping = BlockDeviceMapping()
ephemeral_drive = BlockDeviceType()
ephemeral_drive.ephemeral_name = 'ephemeral0'
block_device_mapping['/dev/xvdb'] = ephemeral_drive
snapshot_drive = BlockDeviceType()
snapshot_drive.snapshot_id = "snap-1234abcd"
snapshot_drive.volume_type = "standard"
block_device_mapping['/dev/xvdp'] = snapshot_drive
ebs_drive = BlockDeviceType()
ebs_drive.volume_type = "io1"
ebs_drive.size = 100
ebs_drive.iops = 1000
ebs_drive.delete_on_termination = False
block_device_mapping['/dev/xvdh'] = ebs_drive
conn = boto.connect_autoscale(use_block_device_types=True)
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='m1.small',
key_name='the_keys',
security_groups=["default", "default2"],
user_data=b"This is some user_data",
instance_monitoring=True,
instance_profile_name='arn:aws:iam::123456789012:instance-profile/testing',
spot_price=0.1,
block_device_mappings=[block_device_mapping]
)
conn.create_launch_configuration(config)
launch_config = conn.get_all_launch_configurations()[0]
launch_config.name.should.equal('tester')
launch_config.image_id.should.equal('ami-abcd1234')
launch_config.instance_type.should.equal('m1.small')
launch_config.key_name.should.equal('the_keys')
set(launch_config.security_groups).should.equal(set(['default', 'default2']))
launch_config.user_data.should.equal(b"This is some user_data")
launch_config.instance_monitoring.enabled.should.equal('true')
launch_config.instance_profile_name.should.equal('arn:aws:iam::123456789012:instance-profile/testing')
launch_config.spot_price.should.equal(0.1)
len(launch_config.block_device_mappings).should.equal(3)
returned_mapping = launch_config.block_device_mappings
set(returned_mapping.keys()).should.equal(set(['/dev/xvdb', '/dev/xvdp', '/dev/xvdh']))
returned_mapping['/dev/xvdh'].iops.should.equal(1000)
returned_mapping['/dev/xvdh'].size.should.equal(100)
returned_mapping['/dev/xvdh'].volume_type.should.equal("io1")
returned_mapping['/dev/xvdh'].delete_on_termination.should.be.false
returned_mapping['/dev/xvdp'].snapshot_id.should.equal("snap-1234abcd")
returned_mapping['/dev/xvdp'].volume_type.should.equal("standard")
returned_mapping['/dev/xvdb'].ephemeral_name.should.equal('ephemeral0')
@requires_boto_gte("2.12")
@mock_autoscaling
def test_create_launch_configuration_for_2_12():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
ebs_optimized=True,
)
conn.create_launch_configuration(config)
launch_config = conn.get_all_launch_configurations()[0]
launch_config.ebs_optimized.should.equal(True)
@requires_boto_gte("2.25.0")
@mock_autoscaling
def test_create_launch_configuration_using_ip_association():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
associate_public_ip_address=True,
)
conn.create_launch_configuration(config)
launch_config = conn.get_all_launch_configurations()[0]
launch_config.associate_public_ip_address.should.equal(True)
@requires_boto_gte("2.25.0")
@mock_autoscaling
def test_create_launch_configuration_using_ip_association_should_default_to_false():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
)
conn.create_launch_configuration(config)
launch_config = conn.get_all_launch_configurations()[0]
launch_config.associate_public_ip_address.should.equal(False)
@mock_autoscaling
def test_create_launch_configuration_defaults():
""" Test with the minimum inputs and check that all of the proper defaults
are assigned for the other attributes """
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='m1.small',
)
conn.create_launch_configuration(config)
launch_config = conn.get_all_launch_configurations()[0]
launch_config.name.should.equal('tester')
launch_config.image_id.should.equal('ami-abcd1234')
launch_config.instance_type.should.equal('m1.small')
# Defaults
launch_config.key_name.should.equal('')
list(launch_config.security_groups).should.equal([])
launch_config.user_data.should.equal(b"")
launch_config.instance_monitoring.enabled.should.equal('false')
launch_config.instance_profile_name.should.equal(None)
launch_config.spot_price.should.equal(None)
@requires_boto_gte("2.12")
@mock_autoscaling
def test_create_launch_configuration_defaults_for_2_12():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
)
conn.create_launch_configuration(config)
launch_config = conn.get_all_launch_configurations()[0]
launch_config.ebs_optimized.should.equal(False)
@mock_autoscaling
def test_launch_configuration_describe_filter():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='m1.small',
)
conn.create_launch_configuration(config)
config.name = 'tester2'
conn.create_launch_configuration(config)
config.name = 'tester3'
conn.create_launch_configuration(config)
conn.get_all_launch_configurations(names=['tester', 'tester2']).should.have.length_of(2)
conn.get_all_launch_configurations().should.have.length_of(3)
@mock_autoscaling
def test_launch_configuration_delete():
conn = boto.connect_autoscale()
config = LaunchConfiguration(
name='tester',
image_id='ami-abcd1234',
instance_type='m1.small',
)
conn.create_launch_configuration(config)
conn.get_all_launch_configurations().should.have.length_of(1)
conn.delete_launch_configuration('tester')
conn.get_all_launch_configurations().should.have.length_of(0)
| apache-2.0 |
popazerty/enigma2-4.3 | lib/python/Plugins/SystemPlugins/PositionerSetup/plugin.py | 6 | 50500 | from enigma import eTimer, eDVBResourceManager, eDVBDiseqcCommand, eDVBFrontendParametersSatellite, iDVBFrontend
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Plugins.Plugin import PluginDescriptor
from Components.Label import Label
from Components.Button import Button
from Components.ConfigList import ConfigList
from Components.ConfigList import ConfigListScreen
from Components.TunerInfo import TunerInfo
from Components.ActionMap import NumberActionMap, ActionMap
from Components.NimManager import nimmanager
from Components.MenuList import MenuList
from Components.ScrollLabel import ScrollLabel
from Components.config import config, ConfigSatlist, ConfigNothing, ConfigSelection, \
ConfigSubsection, ConfigInteger, ConfigFloat, KEY_LEFT, KEY_RIGHT, KEY_0, getConfigListEntry
from Components.TuneTest import Tuner
from Tools.Transponder import ConvertToHumanReadable
from time import sleep
from operator import mul as mul
from random import SystemRandom as SystemRandom
from threading import Thread as Thread
from threading import Event as Event
import log
import rotor_calc
class PositionerSetup(Screen):
@staticmethod
def satposition2metric(position):
if position > 1800:
position = 3600 - position
orientation = "west"
else:
orientation = "east"
return position, orientation
@staticmethod
def orbital2metric(position, orientation):
if orientation == "west":
position = 360 - position
if orientation == "south":
position = - position
return position
@staticmethod
def longitude2orbital(position):
if position >= 180:
return 360 - position, "west"
else:
return position, "east"
@staticmethod
def latitude2orbital(position):
if position >= 0:
return position, "north"
else:
return -position, "south"
UPDATE_INTERVAL = 50 # milliseconds
STATUS_MSG_TIMEOUT = 2 # seconds
LOG_SIZE = 16 * 1024 # log buffer size
def __init__(self, session, feid):
self.session = session
Screen.__init__(self, session)
self.feid = feid
self.oldref = None
log.open(self.LOG_SIZE)
if config.Nims[self.feid].configMode.value == 'advanced':
self.advanced = True
self.advancedconfig = config.Nims[self.feid].advanced
self.advancedsats = self.advancedconfig.sat
self.availablesats = map(lambda x: x[0], nimmanager.getRotorSatListForNim(self.feid))
else:
self.advanced = False
cur = { }
if not self.openFrontend():
self.oldref = session.nav.getCurrentlyPlayingServiceReference()
service = session.nav.getCurrentService()
feInfo = service and service.frontendInfo()
if feInfo:
cur = feInfo.getTransponderData(True)
del feInfo
del service
session.nav.stopService() # try to disable foreground service
if not self.openFrontend():
if session.pipshown: # try to disable pip
service = self.session.pip.pipservice
feInfo = service and service.frontendInfo()
if feInfo:
cur = feInfo.getTransponderData()
del feInfo
del service
if hasattr(session, 'infobar'):
if session.infobar.servicelist.dopipzap:
session.infobar.servicelist.togglePipzap()
if hasattr(session, 'pip'):
del session.pip
session.pipshown = False
if not self.openFrontend():
self.frontend = None # in normal case this should not happen
del self.raw_channel
self.frontendStatus = { }
self.diseqc = Diseqc(self.frontend)
# True means we dont like that the normal sec stuff sends commands to the rotor!
self.tuner = Tuner(self.frontend, ignore_rotor = True)
tp = ( cur.get("frequency", 0) / 1000,
cur.get("symbol_rate", 0) / 1000,
cur.get("polarization", eDVBFrontendParametersSatellite.Polarisation_Horizontal),
cur.get("fec_inner", eDVBFrontendParametersSatellite.FEC_Auto),
cur.get("inversion", eDVBFrontendParametersSatellite.Inversion_Unknown),
cur.get("orbital_position", 0),
cur.get("system", eDVBFrontendParametersSatellite.System_DVB_S),
cur.get("modulation", eDVBFrontendParametersSatellite.Modulation_QPSK),
cur.get("rolloff", eDVBFrontendParametersSatellite.RollOff_alpha_0_35),
cur.get("pilot", eDVBFrontendParametersSatellite.Pilot_Unknown))
self.tuner.tune(tp)
self.isMoving = False
self.stopOnLock = False
self.red = Button("")
self["key_red"] = self.red
self.green = Button("")
self["key_green"] = self.green
self.yellow = Button("")
self["key_yellow"] = self.yellow
self.blue = Button("")
self["key_blue"] = self.blue
self.list = []
self["list"] = ConfigList(self.list)
self["snr_db"] = TunerInfo(TunerInfo.SNR_DB, statusDict = self.frontendStatus)
self["snr_percentage"] = TunerInfo(TunerInfo.SNR_PERCENTAGE, statusDict = self.frontendStatus)
self["ber_value"] = TunerInfo(TunerInfo.BER_VALUE, statusDict = self.frontendStatus)
self["snr_bar"] = TunerInfo(TunerInfo.SNR_BAR, statusDict = self.frontendStatus)
self["ber_bar"] = TunerInfo(TunerInfo.BER_BAR, statusDict = self.frontendStatus)
self["lock_state"] = TunerInfo(TunerInfo.LOCK_STATE, statusDict = self.frontendStatus)
self["frequency_value"] = Label("")
self["symbolrate_value"] = Label("")
self["fec_value"] = Label("")
self["polarisation"] = Label("")
self["status_bar"] = Label("")
self.statusMsgTimeoutTicks = 0
self.statusMsgBlinking = False
self.statusMsgBlinkCount = 0
self.statusMsgBlinkRate = 500 / self.UPDATE_INTERVAL # milliseconds
self.tuningChangedTo(tp)
self["actions"] = NumberActionMap(["DirectionActions", "OkCancelActions", "ColorActions", "TimerEditActions", "InputActions"],
{
"ok": self.keyOK,
"cancel": self.keyCancel,
"up": self.keyUp,
"down": self.keyDown,
"left": self.keyLeft,
"right": self.keyRight,
"red": self.redKey,
"green": self.greenKey,
"yellow": self.yellowKey,
"blue": self.blueKey,
"log": self.showLog,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
self.updateColors("tune")
self.statusTimer = eTimer()
self.statusTimer.callback.append(self.updateStatus)
self.collectingStatistics = False
self.statusTimer.start(self.UPDATE_INTERVAL, True)
self.dataAvailable = Event()
self.onClose.append(self.__onClose)
self.createConfig()
self.createSetup()
def __onClose(self):
self.statusTimer.stop()
log.close()
self.session.nav.playService(self.oldref)
def restartPrevService(self, yesno):
if yesno:
if self.frontend:
self.frontend = None
del self.raw_channel
else:
self.oldref=None
self.close(None)
def keyCancel(self):
if self.oldref:
self.session.openWithCallback(self.restartPrevService, MessageBox, _("Zap back to service before positioner setup?"), MessageBox.TYPE_YESNO)
else:
self.restartPrevService(False)
def openFrontend(self):
res_mgr = eDVBResourceManager.getInstance()
if res_mgr:
self.raw_channel = res_mgr.allocateRawChannel(self.feid)
if self.raw_channel:
self.frontend = self.raw_channel.getFrontend()
if self.frontend:
return True
else:
print "getFrontend failed"
else:
print "getRawChannel failed"
else:
print "getResourceManager instance failed"
return False
def setLNB(self, lnb):
try:
self.sitelon = lnb.longitude.float
self.longitudeOrientation = lnb.longitudeOrientation.value
self.sitelat = lnb.latitude.float
self.latitudeOrientation = lnb.latitudeOrientation.value
self.tuningstepsize = lnb.tuningstepsize.float
self.rotorPositions = lnb.rotorPositions.value
self.turningspeedH = lnb.turningspeedH.float
self.turningspeedV = lnb.turningspeedV.float
except: # some reasonable defaults from NimManager
self.sitelon = 5.1
self.longitudeOrientation = 'east'
self.sitelat = 50.767
self.latitudeOrientation = 'north'
self.tuningstepsize = 0.36
self.rotorPositions = 49
self.turningspeedH = 2.3
self.turningspeedV = 1.7
self.sitelat = PositionerSetup.orbital2metric(self.sitelat, self.latitudeOrientation)
self.sitelon = PositionerSetup.orbital2metric(self.sitelon, self.longitudeOrientation)
def getLNBfromConfig(self, orb_pos):
lnb = None
if orb_pos in self.availablesats:
lnbnum = int(self.advancedsats[orb_pos].lnb.value)
if not lnbnum:
for allsats in range(3601, 3604):
lnbnum = int(self.advancedsats[allsats].lnb.value)
if lnbnum:
break
if lnbnum:
self.printMsg(_("Using LNB %d") % lnbnum)
lnb = self.advancedconfig.lnb[lnbnum]
if not lnb:
self.logMsg(_("Warning: no LNB; using factory defaults."), timeout = 4)
return lnb
def createConfig(self):
rotorposition = 1
orb_pos = 0
self.printMsg(_("Using tuner %s") % chr(0x41 + self.feid))
if not self.advanced:
self.printMsg(_("Configuration mode: %s") % _("simple"))
nim = config.Nims[self.feid]
self.sitelon = nim.longitude.float
self.longitudeOrientation = nim.longitudeOrientation.value
self.sitelat = nim.latitude.float
self.latitudeOrientation = nim.latitudeOrientation.value
self.sitelat = PositionerSetup.orbital2metric(self.sitelat, self.latitudeOrientation)
self.sitelon = PositionerSetup.orbital2metric(self.sitelon, self.longitudeOrientation)
self.tuningstepsize = nim.tuningstepsize.float
self.rotorPositions = nim.rotorPositions.value
self.turningspeedH = nim.turningspeedH.float
self.turningspeedV = nim.turningspeedV.float
else: # it is advanced
self.printMsg(_("Configuration mode: %s") % _("advanced"))
fe_data = { }
self.frontend.getFrontendData(fe_data)
self.frontend.getTransponderData(fe_data, True)
orb_pos = fe_data.get("orbital_position", None)
if orb_pos in self.availablesats:
rotorposition = int(self.advancedsats[orb_pos].rotorposition.value)
self.setLNB(self.getLNBfromConfig(orb_pos))
self.positioner_tune = ConfigNothing()
self.positioner_move = ConfigNothing()
self.positioner_finemove = ConfigNothing()
self.positioner_limits = ConfigNothing()
self.positioner_storage = ConfigInteger(default = rotorposition, limits = (1, self.rotorPositions))
self.allocatedIndices = []
m = PositionerSetup.satposition2metric(orb_pos)
self.orbitalposition = ConfigFloat(default = [int(m[0] / 10), m[0] % 10], limits = [(0,180),(0,9)])
self.orientation = ConfigSelection([("east", _("East")), ("west", _("West"))], m[1])
def createSetup(self):
self.list.append((_("Tune and focus"), self.positioner_tune, "tune"))
self.list.append((_("Movement"), self.positioner_move, "move"))
self.list.append((_("Fine movement"), self.positioner_finemove, "finemove"))
self.list.append((_("Set limits"), self.positioner_limits, "limits"))
self.list.append((_("Memory index"), self.positioner_storage, "storage"))
self.list.append((_("Goto"), self.orbitalposition, "goto"))
self.list.append((" ", self.orientation, "goto"))
self["list"].l.setList(self.list)
def keyOK(self):
pass
def getCurrentConfigPath(self):
return self["list"].getCurrent()[2]
def keyUp(self):
if not self.isMoving:
self["list"].instance.moveSelection(self["list"].instance.moveUp)
self.updateColors(self.getCurrentConfigPath())
def keyDown(self):
if not self.isMoving:
self["list"].instance.moveSelection(self["list"].instance.moveDown)
self.updateColors(self.getCurrentConfigPath())
def keyNumberGlobal(self, number):
self["list"].handleKey(KEY_0 + number)
def keyLeft(self):
self["list"].handleKey(KEY_LEFT)
def keyRight(self):
self["list"].handleKey(KEY_RIGHT)
def updateColors(self, entry):
if entry == "tune":
self.red.setText(_("Tune"))
self.green.setText(_("Auto focus"))
self.yellow.setText(_("Calibrate"))
self.blue.setText(_("Calculate"))
elif entry == "move":
if self.isMoving:
self.red.setText(_("Stop"))
self.green.setText(_("Stop"))
self.yellow.setText(_("Stop"))
self.blue.setText(_("Stop"))
else:
self.red.setText(_("Move west"))
self.green.setText(_("Search west"))
self.yellow.setText(_("Search east"))
self.blue.setText(_("Move east"))
elif entry == "finemove":
self.red.setText("")
self.green.setText(_("Step west"))
self.yellow.setText(_("Step east"))
self.blue.setText("")
elif entry == "limits":
self.red.setText(_("Limits off"))
self.green.setText(_("Limit west"))
self.yellow.setText(_("Limit east"))
self.blue.setText(_("Limits on"))
elif entry == "storage":
self.red.setText("")
self.green.setText(_("Store position"))
self.yellow.setText(_("Goto position"))
if self.advanced:
self.blue.setText(_("Allocate"))
else:
self.blue.setText("")
elif entry == "goto":
self.red.setText("")
self.green.setText(_("Goto 0"))
self.yellow.setText(_("Goto X"))
self.blue.setText("")
else:
self.red.setText("")
self.green.setText("")
self.yellow.setText("")
self.blue.setText("")
def printMsg(self, msg):
print msg
print>>log, msg
def stopMoving(self):
self.printMsg(_("Stop"))
self.diseqccommand("stop")
self.isMoving = False
self.stopOnLock = False
self.statusMsg(_("Stopped"), timeout = self.STATUS_MSG_TIMEOUT)
def redKey(self):
entry = self.getCurrentConfigPath()
if entry == "move":
if self.isMoving:
self.stopMoving()
else:
self.printMsg(_("Move west"))
self.diseqccommand("moveWest", 0)
self.isMoving = True
self.statusMsg(_("Moving west ..."), blinking = True)
self.updateColors("move")
elif entry == "limits":
self.printMsg(_("Limits off"))
self.diseqccommand("limitOff")
self.statusMsg(_("Limits cancelled"), timeout = self.STATUS_MSG_TIMEOUT)
elif entry == "tune":
fe_data = { }
self.frontend.getFrontendData(fe_data)
self.frontend.getTransponderData(fe_data, True)
feparm = self.tuner.lastparm.getDVBS()
fe_data["orbital_position"] = feparm.orbital_position
self.statusTimer.stop()
self.session.openWithCallback(self.tune, TunerScreen, self.feid, fe_data)
def greenKey(self):
entry = self.getCurrentConfigPath()
if entry == "tune":
# Auto focus
self.printMsg(_("Auto focus"))
print>>log, (_("Site latitude") + " : %5.1f %s") % PositionerSetup.latitude2orbital(self.sitelat)
print>>log, (_("Site longitude") + " : %5.1f %s") % PositionerSetup.longitude2orbital(self.sitelon)
Thread(target = self.autofocus).start()
elif entry == "move":
if self.isMoving:
self.stopMoving()
else:
self.printMsg(_("Search west"))
self.isMoving = True
self.stopOnLock = True
self.diseqccommand("moveWest", 0)
self.statusMsg(_("Searching west ..."), blinking = True)
self.updateColors("move")
elif entry == "finemove":
self.printMsg(_("Step west"))
self.diseqccommand("moveWest", 0xFF) # one step
self.statusMsg(_("Stepped west"), timeout = self.STATUS_MSG_TIMEOUT)
elif entry == "storage":
self.printMsg(_("Store at index"))
index = int(self.positioner_storage.value)
self.diseqccommand("store", index)
self.statusMsg((_("Position stored at index") + " %2d") % index, timeout = self.STATUS_MSG_TIMEOUT)
elif entry == "limits":
self.printMsg(_("Limit west"))
self.diseqccommand("limitWest")
self.statusMsg(_("West limit set"), timeout = self.STATUS_MSG_TIMEOUT)
elif entry == "goto":
self.printMsg(_("Goto 0"))
self.diseqccommand("moveTo", 0)
self.statusMsg(_("Moved to position 0"), timeout = self.STATUS_MSG_TIMEOUT)
def yellowKey(self):
entry = self.getCurrentConfigPath()
if entry == "move":
if self.isMoving:
self.stopMoving()
else:
self.printMsg(_("Move east"))
self.isMoving = True
self.stopOnLock = True
self.diseqccommand("moveEast", 0)
self.statusMsg(_("Searching east ..."), blinking = True)
self.updateColors("move")
elif entry == "finemove":
self.printMsg(_("Step east"))
self.diseqccommand("moveEast", 0xFF) # one step
self.statusMsg(_("Stepped east"), timeout = self.STATUS_MSG_TIMEOUT)
elif entry == "storage":
self.printMsg(_("Goto index position"))
index = int(self.positioner_storage.value)
self.diseqccommand("moveTo", index)
self.statusMsg((_("Moved to position at index") + " %2d") % index, timeout = self.STATUS_MSG_TIMEOUT)
elif entry == "limits":
self.printMsg(_("Limit east"))
self.diseqccommand("limitEast")
self.statusMsg(_("East limit set"), timeout = self.STATUS_MSG_TIMEOUT)
elif entry == "goto":
self.printMsg(_("Move to position X"))
satlon = self.orbitalposition.float
position = "%5.1f %s" % (satlon, self.orientation.value)
print>>log, (_("Satellite longitude:") + " %s") % position
satlon = PositionerSetup.orbital2metric(satlon, self.orientation.value)
self.statusMsg((_("Moving to position") + " %s") % position, timeout = self.STATUS_MSG_TIMEOUT)
self.gotoX(satlon)
elif entry == "tune":
# Start USALS calibration
self.printMsg(_("USALS calibration"))
print>>log, (_("Site latitude") + " : %5.1f %s") % PositionerSetup.latitude2orbital(self.sitelat)
print>>log, (_("Site longitude") + " : %5.1f %s") % PositionerSetup.longitude2orbital(self.sitelon)
Thread(target = self.gotoXcalibration).start()
def blueKey(self):
entry = self.getCurrentConfigPath()
if entry == "move":
if self.isMoving:
self.stopMoving()
else:
self.printMsg(_("Move east"))
self.diseqccommand("moveEast", 0)
self.isMoving = True
self.statusMsg(_("Moving east ..."), blinking = True)
self.updateColors("move")
elif entry == "limits":
self.printMsg(_("Limits on"))
self.diseqccommand("limitOn")
self.statusMsg(_("Limits enabled"), timeout = self.STATUS_MSG_TIMEOUT)
elif entry == "tune":
# Start (re-)calculate
self.session.openWithCallback(self.recalcConfirmed, MessageBox, _("This will (re-)calculate all positions of your rotor and may remove previously memorised positions and fine-tuning!\nAre you sure?"), MessageBox.TYPE_YESNO, default = False, timeout = 10)
elif entry == "storage":
if self.advanced:
self.printMsg(_("Allocate unused memory index"))
while True:
if not len(self.allocatedIndices):
for sat in self.availablesats:
self.allocatedIndices.append(int(self.advancedsats[sat].rotorposition.value))
if len(self.allocatedIndices) == self.rotorPositions:
self.statusMsg(_("No free index available"), timeout = self.STATUS_MSG_TIMEOUT)
break
index = 1
for i in sorted(self.allocatedIndices):
if i != index:
break
index += 1
if index <= self.rotorPositions:
self.positioner_storage.value = index
self["list"].invalidateCurrent()
self.allocatedIndices.append(index)
self.statusMsg((_("Index allocated:") + " %2d") % index, timeout = self.STATUS_MSG_TIMEOUT)
break
else:
self.allocatedIndices = []
def recalcConfirmed(self, yesno):
if yesno:
self.printMsg(_("Calculate all positions"))
print>>log, (_("Site latitude") + " : %5.1f %s") % PositionerSetup.latitude2orbital(self.sitelat)
print>>log, (_("Site longitude") + " : %5.1f %s") % PositionerSetup.longitude2orbital(self.sitelon)
lon = self.sitelon
if lon >= 180:
lon -= 360
if lon < -30: # americas, make unsigned binary west positive polarity
lon = -lon
lon = int(round(lon)) & 0xFF
lat = int(round(self.sitelat)) & 0xFF
index = int(self.positioner_storage.value) & 0xFF
self.diseqccommand("calc", (((index << 8) | lon) << 8) | lat)
self.statusMsg(_("Calculation complete"), timeout = self.STATUS_MSG_TIMEOUT)
def showLog(self):
self.session.open(PositionerSetupLog)
def diseqccommand(self, cmd, param = 0):
print>>log, "Diseqc(%s, %X)" % (cmd, param)
self.diseqc.command(cmd, param)
self.tuner.retune()
def tune(self, transponder):
# re-start the update timer
self.statusTimer.start(self.UPDATE_INTERVAL, True)
if transponder is not None:
self.tuner.tune(transponder)
self.tuningChangedTo(transponder)
feparm = self.tuner.lastparm.getDVBS()
orb_pos = feparm.orbital_position
m = PositionerSetup.satposition2metric(orb_pos)
self.orbitalposition.value = [int(m[0] / 10), m[0] % 10]
self.orientation.value = m[1]
if self.advanced:
if orb_pos in self.availablesats:
rotorposition = int(self.advancedsats[orb_pos].rotorposition.value)
self.positioner_storage.value = rotorposition
self.allocatedIndices = []
self.setLNB(self.getLNBfromConfig(orb_pos))
def isLocked(self):
return self.frontendStatus.get("tuner_locked", 0) == 1
def statusMsg(self, msg, blinking = False, timeout = 0): # timeout in seconds
self.statusMsgBlinking = blinking
if not blinking:
self["status_bar"].visible = True
self["status_bar"].setText(msg)
self.statusMsgTimeoutTicks = (timeout * 1000 + self.UPDATE_INTERVAL / 2) / self.UPDATE_INTERVAL
def updateStatus(self):
self.statusTimer.start(self.UPDATE_INTERVAL, True)
if self.frontend:
self.frontend.getFrontendStatus(self.frontendStatus)
self["snr_db"].update()
self["snr_percentage"].update()
self["ber_value"].update()
self["snr_bar"].update()
self["ber_bar"].update()
self["lock_state"].update()
if self.statusMsgBlinking:
self.statusMsgBlinkCount += 1
if self.statusMsgBlinkCount == self.statusMsgBlinkRate:
self.statusMsgBlinkCount = 0
self["status_bar"].visible = not self["status_bar"].visible
if self.statusMsgTimeoutTicks > 0:
self.statusMsgTimeoutTicks -= 1
if self.statusMsgTimeoutTicks == 0:
self["status_bar"].setText("")
self.statusMsgBlinking = False
self["status_bar"].visible = True
if self.isLocked() and self.isMoving and self.stopOnLock:
self.stopMoving()
self.updateColors(self.getCurrentConfigPath())
if self.collectingStatistics:
self.low_rate_adapter_count += 1
if self.low_rate_adapter_count == self.MAX_LOW_RATE_ADAPTER_COUNT:
self.low_rate_adapter_count = 0
self.snr_percentage += self["snr_percentage"].getValue(TunerInfo.SNR)
self.lock_count += self["lock_state"].getValue(TunerInfo.LOCK)
self.stat_count += 1
if self.stat_count == self.max_count:
self.collectingStatistics = False
count = float(self.stat_count)
self.lock_count /= count
self.snr_percentage *= 100.0 / 0x10000 / count
self.dataAvailable.set()
def tuningChangedTo(self, tp):
def setLowRateAdapterCount(symbolrate):
# change the measurement time and update interval in case of low symbol rate,
# since more time is needed for the front end in that case.
# It is an heuristic determination without any pretence. For symbol rates
# of 5000 the interval is multiplied by 3 until 15000 which is seen
# as a high symbol rate. Linear interpolation elsewhere.
return max(int(round((3 - 1) * (symbolrate - 15000) / (5000 - 15000) + 1)), 1)
self.symbolrate = tp[1]
self.polarisation = tp[2]
self.MAX_LOW_RATE_ADAPTER_COUNT = setLowRateAdapterCount(self.symbolrate)
transponderdata = ConvertToHumanReadable(self.tuner.getTransponderData(), "DVB-S")
self["frequency_value"].setText(str(transponderdata.get("frequency")))
self["symbolrate_value"].setText(str(transponderdata.get("symbol_rate")))
self["fec_value"].setText(str(transponderdata.get("fec_inner")))
self["polarisation"].setText(str(transponderdata.get("polarization")))
@staticmethod
def rotorCmd2Step(rotorCmd, stepsize):
return round(float(rotorCmd & 0xFFF) / 0x10 / stepsize) * (1 - ((rotorCmd & 0x1000) >> 11))
@staticmethod
def gotoXcalc(satlon, sitelat, sitelon):
def azimuth2Rotorcode(angle):
gotoXtable = (0x00, 0x02, 0x03, 0x05, 0x06, 0x08, 0x0A, 0x0B, 0x0D, 0x0E)
a = int(round(abs(angle) * 10.0))
return ((a / 10) << 4) + gotoXtable[a % 10]
satHourAngle = rotor_calc.calcSatHourangle(satlon, sitelat, sitelon)
if sitelat >= 0: # Northern Hemisphere
rotorCmd = azimuth2Rotorcode(180 - satHourAngle)
if satHourAngle <= 180: # the east
rotorCmd |= 0xE000
else: # west
rotorCmd |= 0xD000
else: # Southern Hemisphere
if satHourAngle <= 180: # the east
rotorCmd = azimuth2Rotorcode(satHourAngle) | 0xD000
else: # west
rotorCmd = azimuth2Rotorcode(360 - satHourAngle) | 0xE000
return rotorCmd
def gotoX(self, satlon):
rotorCmd = PositionerSetup.gotoXcalc(satlon, self.sitelat, self.sitelon)
self.diseqccommand("gotoX", rotorCmd)
x = PositionerSetup.rotorCmd2Step(rotorCmd, self.tuningstepsize)
print>>log, (_("Rotor step position:") + " %4d") % x
return x
def getTurningspeed(self):
if self.polarisation == eDVBFrontendParametersSatellite.Polarisation_Horizontal:
turningspeed = self.turningspeedH
else:
turningspeed = self.turningspeedV
return max(turningspeed, 0.1)
TURNING_START_STOP_DELAY = 1.600 # seconds
MAX_SEARCH_ANGLE = 12.0 # degrees
MAX_FOCUS_ANGLE = 6.0 # degrees
LOCK_LIMIT = 0.1 # ratio
MEASURING_TIME = 2.500 # seconds
def measure(self, time = MEASURING_TIME): # time in seconds
self.snr_percentage = 0.0
self.lock_count = 0.0
self.stat_count = 0
self.low_rate_adapter_count = 0
self.max_count = max(int((time * 1000 + self.UPDATE_INTERVAL / 2)/ self.UPDATE_INTERVAL), 1)
self.collectingStatistics = True
self.dataAvailable.clear()
self.dataAvailable.wait()
def logMsg(self, msg, timeout = 0):
self.statusMsg(msg, timeout = timeout)
self.printMsg(msg)
def sync(self):
self.lock_count = 0.0
n = 0
while self.lock_count < (1 - self.LOCK_LIMIT) and n < 5:
self.measure(time = 0.500)
n += 1
if self.lock_count < (1 - self.LOCK_LIMIT):
return False
return True
randomGenerator = None
def randomBool(self):
if self.randomGenerator is None:
self.randomGenerator = SystemRandom()
return self.randomGenerator.random() >= 0.5
def gotoXcalibration(self):
def move(x):
z = self.gotoX(x + satlon)
time = int(abs(x - prev_pos) / turningspeed + 2 * self.TURNING_START_STOP_DELAY)
sleep(time * self.MAX_LOW_RATE_ADAPTER_COUNT)
return z
def reportlevels(pos, level, lock):
print>>log, (_("Signal quality") + " %5.1f" + chr(176) + " : %6.2f") % (pos, level)
print>>log, (_("Lock ratio") + " %5.1f" + chr(176) + " : %6.2f") % (pos, lock)
def optimise(readings):
xi = readings.keys()
yi = map(lambda (x, y) : x, readings.values())
x0 = sum(map(mul, xi, yi)) / sum(yi)
xm = xi[yi.index(max(yi))]
return x0, xm
def toGeopos(x):
if x < 0:
return _("W")
else:
return _("E")
def toGeoposEx(x):
if x < 0:
return _("west")
else:
return _("east")
self.logMsg(_("GotoX calibration"))
satlon = self.orbitalposition.float
print>>log, (_("Satellite longitude:") + " %5.1f" + chr(176) + " %s") % (satlon, self.orientation.value)
satlon = PositionerSetup.orbital2metric(satlon, self.orientation.value)
prev_pos = 0.0 # previous relative position w.r.t. satlon
turningspeed = self.getTurningspeed()
x = 0.0 # relative position w.r.t. satlon
dir = 1
if self.randomBool():
dir = -dir
while abs(x) < self.MAX_SEARCH_ANGLE:
if self.sync():
break
x += (1.0 * dir) # one degree east/west
self.statusMsg((_("Searching") + " " + toGeoposEx(dir) + " %2d" + chr(176)) % abs(x), blinking = True)
move(x)
prev_pos = x
else:
x = 0.0
dir = -dir
while abs(x) < self.MAX_SEARCH_ANGLE:
x += (1.0 * dir) # one degree east/west
self.statusMsg((_("Searching") + " " + toGeoposEx(dir) + " %2d" + chr(176)) % abs(x), blinking = True)
move(x)
prev_pos = x
if self.sync():
break
else:
msg = _("Cannot find any signal ..., aborting !")
self.printMsg(msg)
self.statusMsg("")
self.session.open(MessageBox, msg, MessageBox.TYPE_ERROR, timeout = 5)
return
x = round(x / self.tuningstepsize) * self.tuningstepsize
move(x)
prev_pos = x
measurements = {}
self.measure()
print>>log, (_("Initial signal quality") + " %5.1f" + chr(176) + ": %6.2f") % (x, self.snr_percentage)
print>>log, (_("Initial lock ratio") + " %5.1f" + chr(176) + ": %6.2f") % (x, self.lock_count)
measurements[x] = (self.snr_percentage, self.lock_count)
start_pos = x
x = 0.0
dir = 1
if self.randomBool():
dir = -dir
while x < self.MAX_FOCUS_ANGLE:
x += self.tuningstepsize * dir # one step east/west
self.statusMsg((_("Moving") + " " + toGeoposEx(dir) + " %5.1f" + chr(176)) % abs(x + start_pos), blinking = True)
move(x + start_pos)
prev_pos = x + start_pos
self.measure()
measurements[x + start_pos] = (self.snr_percentage, self.lock_count)
reportlevels(x + start_pos, self.snr_percentage, self.lock_count)
if self.lock_count < self.LOCK_LIMIT:
break
else:
msg = _("Cannot determine") + " " + toGeoposEx(dir) + " " + _("limit ..., aborting !")
self.printMsg(msg)
self.statusMsg("")
self.session.open(MessageBox, msg, MessageBox.TYPE_ERROR, timeout = 5)
return
x = 0.0
dir = -dir
self.statusMsg((_("Moving") + " " + toGeoposEx(dir) + " %5.1f" + chr(176)) % abs(start_pos), blinking = True)
move(start_pos)
prev_pos = start_pos
if not self.sync():
msg = _("Sync failure moving back to origin !")
self.printMsg(msg)
self.statusMsg("")
self.session.open(MessageBox, msg, MessageBox.TYPE_ERROR, timeout = 5)
return
while abs(x) < self.MAX_FOCUS_ANGLE:
x += self.tuningstepsize * dir # one step west/east
self.statusMsg((_("Moving") + " " + toGeoposEx(dir) + " %5.1f" + chr(176)) % abs(x + start_pos), blinking = True)
move(x + start_pos)
prev_pos = x + start_pos
self.measure()
measurements[x + start_pos] = (self.snr_percentage, self.lock_count)
reportlevels(x + start_pos, self.snr_percentage, self.lock_count)
if self.lock_count < self.LOCK_LIMIT:
break
else:
msg = _("Cannot determine") + " " + toGeoposEx(dir) + " " + _("limit ..., aborting !")
self.printMsg(msg)
self.statusMsg("")
self.session.open(MessageBox, msg, MessageBox.TYPE_ERROR, timeout = 5)
return
(x0, xm) = optimise(measurements)
x = move(x0)
if satlon > 180:
satlon -= 360
x0 += satlon
xm += satlon
print>>log, (_("Weighted position") + " : %5.1f" + chr(176) + " %s") % (abs(x0), toGeopos(x0))
print>>log, (_("Strongest position") + " : %5.1f" + chr(176) + " %s") % (abs(xm), toGeopos(xm))
self.logMsg((_("Final position at") + " %5.1f" + chr(176) + " %s / %d; " + _("offset is") + " %4.1f" + chr(176)) % (abs(x0), toGeopos(x0), x, x0 - satlon), timeout = 10)
def autofocus(self):
def move(x):
if x > 0:
self.diseqccommand("moveEast", (-x) & 0xFF)
elif x < 0:
self.diseqccommand("moveWest", x & 0xFF)
if x != 0:
time = int(abs(x) * self.tuningstepsize / turningspeed + 2 * self.TURNING_START_STOP_DELAY)
sleep(time * self.MAX_LOW_RATE_ADAPTER_COUNT)
def reportlevels(pos, level, lock):
print>>log, (_("Signal quality") + " [%2d] : %6.2f") % (pos, level)
print>>log, (_("Lock ratio") + " [%2d] : %6.2f") % (pos, lock)
def optimise(readings):
xi = readings.keys()
yi = map(lambda (x, y) : x, readings.values())
x0 = int(round(sum(map(mul, xi, yi)) / sum(yi)))
xm = xi[yi.index(max(yi))]
return x0, xm
def toGeoposEx(x):
if x < 0:
return _("west")
else:
return _("east")
self.logMsg(_("Auto focus commencing ..."))
turningspeed = self.getTurningspeed()
measurements = {}
maxsteps = max(min(round(self.MAX_FOCUS_ANGLE / self.tuningstepsize), 0x1F), 3)
self.measure()
print>>log, (_("Initial signal quality:") + " %6.2f") % self.snr_percentage
print>>log, (_("Initial lock ratio") + " : %6.2f") % self.lock_count
if self.lock_count < 1 - self.LOCK_LIMIT:
msg = _("There is no signal to lock on !")
self.printMsg(msg)
self.statusMsg("")
self.session.open(MessageBox, msg, MessageBox.TYPE_ERROR, timeout = 5)
return
print>>log, _("Signal OK, proceeding")
x = 0
dir = 1
if self.randomBool():
dir = -dir
measurements[x] = (self.snr_percentage, self.lock_count)
nsteps = 0
while nsteps < maxsteps:
x += dir
self.statusMsg((_("Moving") + " " + toGeoposEx(dir) + " %2d") % abs(x), blinking = True)
move(dir) # one step
self.measure()
measurements[x] = (self.snr_percentage, self.lock_count)
reportlevels(x, self.snr_percentage, self.lock_count)
if self.lock_count < self.LOCK_LIMIT:
break
nsteps += 1
else:
msg = _("Cannot determine") + " " + toGeoposEx(dir) + " " + _("limit ..., aborting !")
self.printMsg(msg)
self.statusMsg("")
self.session.open(MessageBox, msg, MessageBox.TYPE_ERROR, timeout = 5)
return
dir = -dir
self.statusMsg(_("Moving") + " " + toGeoposEx(dir) + " 0", blinking = True)
move(-x)
if not self.sync():
msg = _("Sync failure moving back to origin !")
self.printMsg(msg)
self.statusMsg("")
self.session.open(MessageBox, msg, MessageBox.TYPE_ERROR, timeout = 5)
return
x = 0
nsteps = 0
while nsteps < maxsteps:
x += dir
self.statusMsg((_("Moving") + " " + toGeoposEx(dir) + " %2d") % abs(x), blinking = True)
move(dir) # one step
self.measure()
measurements[x] = (self.snr_percentage, self.lock_count)
reportlevels(x, self.snr_percentage, self.lock_count)
if self.lock_count < self.LOCK_LIMIT:
break
nsteps += 1
else:
msg = _("Cannot determine") + " " + toGeoposEx(dir) + " " + _("limit ..., aborting !")
self.printMsg(msg)
self.statusMsg("")
self.session.open(MessageBox, msg, MessageBox.TYPE_ERROR, timeout = 5)
return
(x0, xm) = optimise(measurements)
print>>log, (_("Weighted position") + " : %2d") % x0
print>>log, (_("Strongest position") + " : %2d") % xm
self.logMsg((_("Final position at index") + " %2d (%5.1f" + chr(176) + ")") % (x0, x0 * self.tuningstepsize), timeout = 6)
move(x0 - x)
class Diseqc:
def __init__(self, frontend):
self.frontend = frontend
def command(self, what, param = 0):
if self.frontend:
cmd = eDVBDiseqcCommand()
if what == "moveWest":
string = 'E03169' + ("%02X" % param)
elif what == "moveEast":
string = 'E03168' + ("%02X" % param)
elif what == "moveTo":
string = 'E0316B' + ("%02X" % param)
elif what == "store":
string = 'E0316A' + ("%02X" % param)
elif what == "gotoX":
string = 'E0316E' + ("%04X" % param)
elif what == "calc":
string = 'E0316F' + ("%06X" % param)
elif what == "limitOn":
string = 'E0316A00'
elif what == "limitOff":
string = 'E03163'
elif what == "limitEast":
string = 'E03166'
elif what == "limitWest":
string = 'E03167'
else:
string = 'E03160' #positioner stop
print "diseqc command:",
print string
cmd.setCommandString(string)
self.frontend.setTone(iDVBFrontend.toneOff)
sleep(0.015) # wait 15msec after disable tone
self.frontend.sendDiseqc(cmd)
if string == 'E03160': #positioner stop
sleep(0.050)
self.frontend.sendDiseqc(cmd) # send 2nd time
class PositionerSetupLog(Screen):
skin = """
<screen position="center,center" size="560,400" title="Positioner Setup Log" >
<ePixmap name="red" position="0,0" zPosition="2" size="140,40" pixmap="buttons/red.png" transparent="1" alphatest="on" />
<ePixmap name="green" position="140,0" zPosition="2" size="140,40" pixmap="buttons/green.png" transparent="1" alphatest="on" />
<ePixmap name="yellow" position="280,0" zPosition="2" size="140,40" pixmap="buttons/yellow.png" transparent="1" alphatest="on" />
<ePixmap name="blue" position="420,0" zPosition="2" size="140,40" pixmap="buttons/blue.png" transparent="1" alphatest="on" />
<widget name="key_red" position="0,0" size="140,40" valign="center" halign="center" zPosition="4" foregroundColor="white" font="Regular;20" transparent="1" shadowColor="background" shadowOffset="-2,-2" />
<widget name="key_green" position="140,0" size="140,40" valign="center" halign="center" zPosition="4" foregroundColor="white" font="Regular;20" transparent="1" shadowColor="background" shadowOffset="-2,-2" />
<widget name="key_yellow" position="280,0" size="140,40" valign="center" halign="center" zPosition="4" foregroundColor="white" font="Regular;20" transparent="1" shadowColor="background" shadowOffset="-2,-2" />
<widget name="key_blue" position="420,0" size="140,40" valign="center" halign="center" zPosition="4" foregroundColor="white" font="Regular;20" transparent="1" shadowColor="background" shadowOffset="-2,-2" />
<ePixmap alphatest="on" pixmap="icons/clock.png" position="480,383" size="14,14" zPosition="3"/>
<widget font="Regular;18" halign="left" position="505,380" render="Label" size="55,20" source="global.CurrentTime" transparent="1" valign="center" zPosition="3">
<convert type="ClockToText">Default</convert>
</widget>
<widget name="list" font="Console;16" position="10,40" size="540,340" />
</screen>"""
def __init__(self, session):
self.session = session
Screen.__init__(self, session)
self["key_red"] = Button(_("Clear"))
self["key_green"] = Button()
self["key_yellow"] = Button()
self["key_blue"] = Button(_("Save"))
self["list"] = ScrollLabel(log.value)
self["actions"] = ActionMap(["DirectionActions", "OkCancelActions", "ColorActions"],
{
"red": self.clear,
"green": self.cancel,
"yellow": self.cancel,
"save": self.save,
"blue": self.save,
"cancel": self.cancel,
"ok": self.cancel,
"left": self["list"].pageUp,
"right": self["list"].pageDown,
"up": self["list"].pageUp,
"down": self["list"].pageDown,
"pageUp": self["list"].pageUp,
"pageDown": self["list"].pageDown
}, -2)
def save(self):
try:
f = open('/tmp/positionersetup.log', 'w')
f.write(log.value)
f.close()
except Exception, e:
self["list"].setText(_("Failed to write /tmp/positionersetup.log: ") + str(e))
self.close(True)
def cancel(self):
self.close(False)
def clear(self):
log.logfile.reset()
log.logfile.truncate()
self.close(False)
class TunerScreen(ConfigListScreen, Screen):
skin = """
<screen position="90,100" size="520,400" title="Tune">
<widget name="config" position="20,10" size="460,350" scrollbarMode="showOnDemand" />
<widget name="introduction" position="20,360" size="350,30" font="Regular;23" />
</screen>"""
def __init__(self, session, feid, fe_data):
self.feid = feid
self.fe_data = fe_data
Screen.__init__(self, session)
ConfigListScreen.__init__(self, None)
self.createConfig(fe_data)
self.initialSetup()
self.createSetup()
self.tuning.sat.addNotifier(self.tuningSatChanged)
self.tuning.type.addNotifier(self.tuningTypeChanged)
self.scan_sat.system.addNotifier(self.systemChanged)
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.keyGo,
"cancel": self.keyCancel,
}, -2)
def createConfig(self, frontendData):
satlist = nimmanager.getRotorSatListForNim(self.feid)
orb_pos = self.fe_data.get("orbital_position", None)
orb_pos_str = str(orb_pos)
self.tuning = ConfigSubsection()
self.tuning.type = ConfigSelection(
default = "manual_transponder",
choices = { "manual_transponder" : _("Manual transponder"),
"predefined_transponder" : _("Predefined transponder") } )
self.tuning.sat = ConfigSatlist(list = satlist)
if orb_pos is not None:
for sat in satlist:
if sat[0] == orb_pos and self.tuning.sat.value != orb_pos_str:
self.tuning.sat.value = orb_pos_str
self.updateTransponders()
defaultSat = {
"orbpos": 192,
"system": eDVBFrontendParametersSatellite.System_DVB_S,
"frequency": 11836,
"inversion": eDVBFrontendParametersSatellite.Inversion_Unknown,
"symbolrate": 27500,
"polarization": eDVBFrontendParametersSatellite.Polarisation_Horizontal,
"fec": eDVBFrontendParametersSatellite.FEC_Auto,
"fec_s2": eDVBFrontendParametersSatellite.FEC_9_10,
"modulation": eDVBFrontendParametersSatellite.Modulation_QPSK }
if frontendData is not None:
ttype = frontendData.get("tuner_type", "UNKNOWN")
defaultSat["system"] = frontendData.get("system", eDVBFrontendParametersSatellite.System_DVB_S)
defaultSat["frequency"] = frontendData.get("frequency", 0) / 1000
defaultSat["inversion"] = frontendData.get("inversion", eDVBFrontendParametersSatellite.Inversion_Unknown)
defaultSat["symbolrate"] = frontendData.get("symbol_rate", 0) / 1000
defaultSat["polarization"] = frontendData.get("polarization", eDVBFrontendParametersSatellite.Polarisation_Horizontal)
if defaultSat["system"] == eDVBFrontendParametersSatellite.System_DVB_S2:
defaultSat["fec_s2"] = frontendData.get("fec_inner", eDVBFrontendParametersSatellite.FEC_Auto)
defaultSat["rolloff"] = frontendData.get("rolloff", eDVBFrontendParametersSatellite.RollOff_alpha_0_35)
defaultSat["pilot"] = frontendData.get("pilot", eDVBFrontendParametersSatellite.Pilot_Unknown)
else:
defaultSat["fec"] = frontendData.get("fec_inner", eDVBFrontendParametersSatellite.FEC_Auto)
defaultSat["modulation"] = frontendData.get("modulation", eDVBFrontendParametersSatellite.Modulation_QPSK)
defaultSat["orbpos"] = frontendData.get("orbital_position", 0)
self.scan_sat = ConfigSubsection()
self.scan_sat.system = ConfigSelection(default = defaultSat["system"], choices = [
(eDVBFrontendParametersSatellite.System_DVB_S, _("DVB-S")),
(eDVBFrontendParametersSatellite.System_DVB_S2, _("DVB-S2"))])
self.scan_sat.frequency = ConfigInteger(default = defaultSat["frequency"], limits = (1, 99999))
self.scan_sat.inversion = ConfigSelection(default = defaultSat["inversion"], choices = [
(eDVBFrontendParametersSatellite.Inversion_Off, _("Off")),
(eDVBFrontendParametersSatellite.Inversion_On, _("On")),
(eDVBFrontendParametersSatellite.Inversion_Unknown, _("Auto"))])
self.scan_sat.symbolrate = ConfigInteger(default = defaultSat["symbolrate"], limits = (1, 99999))
self.scan_sat.polarization = ConfigSelection(default = defaultSat["polarization"], choices = [
(eDVBFrontendParametersSatellite.Polarisation_Horizontal, _("horizontal")),
(eDVBFrontendParametersSatellite.Polarisation_Vertical, _("vertical")),
(eDVBFrontendParametersSatellite.Polarisation_CircularLeft, _("circular left")),
(eDVBFrontendParametersSatellite.Polarisation_CircularRight, _("circular right"))])
self.scan_sat.fec = ConfigSelection(default = defaultSat["fec"], choices = [
(eDVBFrontendParametersSatellite.FEC_Auto, _("Auto")),
(eDVBFrontendParametersSatellite.FEC_1_2, "1/2"),
(eDVBFrontendParametersSatellite.FEC_2_3, "2/3"),
(eDVBFrontendParametersSatellite.FEC_3_4, "3/4"),
(eDVBFrontendParametersSatellite.FEC_5_6, "5/6"),
(eDVBFrontendParametersSatellite.FEC_7_8, "7/8"),
(eDVBFrontendParametersSatellite.FEC_None, _("None"))])
self.scan_sat.fec_s2 = ConfigSelection(default = defaultSat["fec_s2"], choices = [
(eDVBFrontendParametersSatellite.FEC_1_2, "1/2"),
(eDVBFrontendParametersSatellite.FEC_2_3, "2/3"),
(eDVBFrontendParametersSatellite.FEC_3_4, "3/4"),
(eDVBFrontendParametersSatellite.FEC_3_5, "3/5"),
(eDVBFrontendParametersSatellite.FEC_4_5, "4/5"),
(eDVBFrontendParametersSatellite.FEC_5_6, "5/6"),
(eDVBFrontendParametersSatellite.FEC_7_8, "7/8"),
(eDVBFrontendParametersSatellite.FEC_8_9, "8/9"),
(eDVBFrontendParametersSatellite.FEC_9_10, "9/10")])
self.scan_sat.modulation = ConfigSelection(default = defaultSat["modulation"], choices = [
(eDVBFrontendParametersSatellite.Modulation_QPSK, "QPSK"),
(eDVBFrontendParametersSatellite.Modulation_8PSK, "8PSK")])
self.scan_sat.rolloff = ConfigSelection(default = defaultSat.get("rolloff", eDVBFrontendParametersSatellite.RollOff_alpha_0_35), choices = [
(eDVBFrontendParametersSatellite.RollOff_alpha_0_35, "0.35"),
(eDVBFrontendParametersSatellite.RollOff_alpha_0_25, "0.25"),
(eDVBFrontendParametersSatellite.RollOff_alpha_0_20, "0.20"),
(eDVBFrontendParametersSatellite.RollOff_auto, _("Auto"))])
self.scan_sat.pilot = ConfigSelection(default = defaultSat.get("pilot", eDVBFrontendParametersSatellite.Pilot_Unknown), choices = [
(eDVBFrontendParametersSatellite.Pilot_Off, _("Off")),
(eDVBFrontendParametersSatellite.Pilot_On, _("On")),
(eDVBFrontendParametersSatellite.Pilot_Unknown, _("Auto"))])
def initialSetup(self):
currtp = self.transponderToString([None, self.scan_sat.frequency.value, self.scan_sat.symbolrate.value, self.scan_sat.polarization.value])
if currtp in self.tuning.transponder.choices:
self.tuning.type.value = "predefined_transponder"
else:
self.tuning.type.value = "manual_transponder"
def createSetup(self):
self.list = []
self.list.append(getConfigListEntry(_('Tune'), self.tuning.type))
self.list.append(getConfigListEntry(_('Satellite'), self.tuning.sat))
nim = nimmanager.nim_slots[self.feid]
if self.tuning.type.value == "manual_transponder":
if nim.isCompatible("DVB-S2"):
self.list.append(getConfigListEntry(_('System'), self.scan_sat.system))
else:
# downgrade to dvb-s, in case a -s2 config was active
self.scan_sat.system.value = eDVBFrontendParametersSatellite.System_DVB_S
self.list.append(getConfigListEntry(_('Frequency'), self.scan_sat.frequency))
self.list.append(getConfigListEntry(_("Polarisation"), self.scan_sat.polarization))
self.list.append(getConfigListEntry(_('Symbol rate'), self.scan_sat.symbolrate))
if self.scan_sat.system.value == eDVBFrontendParametersSatellite.System_DVB_S:
self.list.append(getConfigListEntry(_("FEC"), self.scan_sat.fec))
self.list.append(getConfigListEntry(_('Inversion'), self.scan_sat.inversion))
elif self.scan_sat.system.value == eDVBFrontendParametersSatellite.System_DVB_S2:
self.list.append(getConfigListEntry(_("FEC"), self.scan_sat.fec_s2))
self.list.append(getConfigListEntry(_('Inversion'), self.scan_sat.inversion))
self.modulationEntry = getConfigListEntry(_('Modulation'), self.scan_sat.modulation)
self.list.append(self.modulationEntry)
self.list.append(getConfigListEntry(_('Roll-off'), self.scan_sat.rolloff))
self.list.append(getConfigListEntry(_('Pilot'), self.scan_sat.pilot))
else: # "predefined_transponder"
self.list.append(getConfigListEntry(_("Transponder"), self.tuning.transponder))
currtp = self.transponderToString([None, self.scan_sat.frequency.value, self.scan_sat.symbolrate.value, self.scan_sat.polarization.value])
self.tuning.transponder.setValue(currtp)
self["config"].list = self.list
self["config"].l.setList(self.list)
def tuningSatChanged(self, *parm):
self.updateTransponders()
self.createSetup()
def tuningTypeChanged(self, *parm):
self.createSetup()
def systemChanged(self, *parm):
self.createSetup()
def transponderToString(self, tr, scale = 1):
if tr[3] == 0:
pol = "H"
elif tr[3] == 1:
pol = "V"
elif tr[3] == 2:
pol = "CL"
elif tr[3] == 3:
pol = "CR"
else:
pol = "??"
return str(tr[1] / scale) + "," + pol + "," + str(tr[2] / scale)
def updateTransponders(self):
if len(self.tuning.sat.choices):
transponderlist = nimmanager.getTransponders(int(self.tuning.sat.value))
tps = []
for transponder in transponderlist:
tps.append(self.transponderToString(transponder, scale = 1000))
self.tuning.transponder = ConfigSelection(choices = tps)
def keyLeft(self):
ConfigListScreen.keyLeft(self)
def keyRight(self):
ConfigListScreen.keyRight(self)
def keyGo(self):
returnvalue = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
satpos = int(self.tuning.sat.value)
if self.tuning.type.value == "manual_transponder":
if self.scan_sat.system.value == eDVBFrontendParametersSatellite.System_DVB_S2:
fec = self.scan_sat.fec_s2.value
else:
fec = self.scan_sat.fec.value
returnvalue = (
self.scan_sat.frequency.value,
self.scan_sat.symbolrate.value,
self.scan_sat.polarization.value,
fec,
self.scan_sat.inversion.value,
satpos,
self.scan_sat.system.value,
self.scan_sat.modulation.value,
self.scan_sat.rolloff.value,
self.scan_sat.pilot.value)
elif self.tuning.type.value == "predefined_transponder":
transponder = nimmanager.getTransponders(satpos)[self.tuning.transponder.index]
returnvalue = (transponder[1] / 1000, transponder[2] / 1000,
transponder[3], transponder[4], 2, satpos, transponder[5], transponder[6], transponder[8], transponder[9])
self.close(returnvalue)
def keyCancel(self):
self.close(None)
class RotorNimSelection(Screen):
skin = """
<screen position="140,165" size="400,130" title="select Slot">
<widget name="nimlist" position="20,10" size="360,100" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
nimlist = nimmanager.getNimListOfType("DVB-S")
nimMenuList = []
for x in nimlist:
if len(nimmanager.getRotorSatListForNim(x)) != 0:
nimMenuList.append((nimmanager.nim_slots[x].friendly_full_description, x))
self["nimlist"] = MenuList(nimMenuList)
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.okbuttonClick ,
"cancel": self.close
}, -1)
def okbuttonClick(self):
selection = self["nimlist"].getCurrent()
self.session.open(PositionerSetup, selection[1])
def PositionerMain(session, **kwargs):
nimList = nimmanager.getNimListOfType("DVB-S")
if len(nimList) == 0:
session.open(MessageBox, _("No positioner capable frontend found."), MessageBox.TYPE_ERROR)
else:
if session.nav.RecordTimer.isRecording():
session.open(MessageBox, _("A recording is currently running. Please stop the recording before trying to configure the positioner."), MessageBox.TYPE_ERROR)
else:
usableNims = []
for x in nimList:
configured_rotor_sats = nimmanager.getRotorSatListForNim(x)
if len(configured_rotor_sats) != 0:
usableNims.append(x)
if len(usableNims) == 1:
session.open(PositionerSetup, usableNims[0])
elif len(usableNims) > 1:
session.open(RotorNimSelection)
else:
session.open(MessageBox, _("No tuner is configured for use with a diseqc positioner!"), MessageBox.TYPE_ERROR)
def PositionerSetupStart(menuid, **kwargs):
if menuid == "scan":
return [(_("Positioner setup"), PositionerMain, "positioner_setup", None)]
else:
return []
def Plugins(**kwargs):
if nimmanager.hasNimType("DVB-S"):
return PluginDescriptor(name=_("Positioner setup"), description = _("Setup your positioner"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc = PositionerSetupStart)
else:
return []
| gpl-2.0 |
JFriel/honours_project | venv/lib/python2.7/site-packages/pip/vendor/html5lib/treewalkers/pulldom.py | 1729 | 2302 | from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
| gpl-3.0 |
Arcanemagus/SickRage | lib/dateutil/parser/__init__.py | 22 | 1727 | # -*- coding: utf-8 -*-
from ._parser import parse, parser, parserinfo
from ._parser import DEFAULTPARSER, DEFAULTTZPARSER
from ._parser import UnknownTimezoneWarning
from ._parser import __doc__
from .isoparser import isoparser, isoparse
__all__ = ['parse', 'parser', 'parserinfo',
'isoparse', 'isoparser',
'UnknownTimezoneWarning']
###
# Deprecate portions of the private interface so that downstream code that
# is improperly relying on it is given *some* notice.
def __deprecated_private_func(f):
from functools import wraps
import warnings
msg = ('{name} is a private function and may break without warning, '
'it will be moved and or renamed in future versions.')
msg = msg.format(name=f.__name__)
@wraps(f)
def deprecated_func(*args, **kwargs):
warnings.warn(msg, DeprecationWarning)
return f(*args, **kwargs)
return deprecated_func
def __deprecate_private_class(c):
import warnings
msg = ('{name} is a private class and may break without warning, '
'it will be moved and or renamed in future versions.')
msg = msg.format(name=c.__name__)
class private_class(c):
__doc__ = c.__doc__
def __init__(self, *args, **kwargs):
warnings.warn(msg, DeprecationWarning)
super(private_class, self).__init__(*args, **kwargs)
private_class.__name__ = c.__name__
return private_class
from ._parser import _timelex, _resultbase
from ._parser import _tzparser, _parsetz
_timelex = __deprecate_private_class(_timelex)
_tzparser = __deprecate_private_class(_tzparser)
_resultbase = __deprecate_private_class(_resultbase)
_parsetz = __deprecated_private_func(_parsetz)
| gpl-3.0 |
tuhangdi/django | django/contrib/auth/context_processors.py | 514 | 1938 | # PermWrapper and PermLookupDict proxy the permissions system into objects that
# the template system can understand.
class PermLookupDict(object):
def __init__(self, user, app_label):
self.user, self.app_label = user, app_label
def __repr__(self):
return str(self.user.get_all_permissions())
def __getitem__(self, perm_name):
return self.user.has_perm("%s.%s" % (self.app_label, perm_name))
def __iter__(self):
# To fix 'item in perms.someapp' and __getitem__ iteraction we need to
# define __iter__. See #18979 for details.
raise TypeError("PermLookupDict is not iterable.")
def __bool__(self):
return self.user.has_module_perms(self.app_label)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
class PermWrapper(object):
def __init__(self, user):
self.user = user
def __getitem__(self, app_label):
return PermLookupDict(self.user, app_label)
def __iter__(self):
# I am large, I contain multitudes.
raise TypeError("PermWrapper is not iterable.")
def __contains__(self, perm_name):
"""
Lookup by "someapp" or "someapp.someperm" in perms.
"""
if '.' not in perm_name:
# The name refers to module.
return bool(self[perm_name])
app_label, perm_name = perm_name.split('.', 1)
return self[app_label][perm_name]
def auth(request):
"""
Returns context variables required by apps that use Django's authentication
system.
If there is no 'user' attribute in the request, uses AnonymousUser (from
django.contrib.auth).
"""
if hasattr(request, 'user'):
user = request.user
else:
from django.contrib.auth.models import AnonymousUser
user = AnonymousUser()
return {
'user': user,
'perms': PermWrapper(user),
}
| bsd-3-clause |
TGAC/RAMPART | doc/source/conf.py | 1 | 10364 | # -*- coding: utf-8 -*-
#
# RAMPART documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 11 14:37:43 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RAMPART'
copyright = u'2014, The Genome Analysis Centre'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.11'
# The full version, including alpha/beta/rc tags.
release = '0.11.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'RAMPARTdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'RAMPART.tex', u'RAMPART Documentation',
u'Daniel Mapleson, Nizar Drou, David Swarbreck', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rampart', u'RAMPART Documentation',
[u'Daniel Mapleson, Nizar Drou, David Swarbreck'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'RAMPART', u'RAMPART Documentation',
u'Daniel Mapleson, Nizar Drou, David Swarbreck', 'RAMPART', 'A de novo assembly pipeline.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'RAMPART'
epub_author = u'Daniel Mapleson, Nizar Drou, David Swarbreck'
epub_publisher = u'Daniel Mapleson, Nizar Drou, David Swarbreck'
epub_copyright = u'2015, The Genome Analysis Centre'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'RAMPART'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| gpl-3.0 |
lyft/incubator-airflow | airflow/providers/google/cloud/example_dags/example_compute.py | 4 | 4444 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that starts, stops and sets the machine type of a Google Compute
Engine instance.
This DAG relies on the following OS environment variables
* GCP_PROJECT_ID - Google Cloud Platform project where the Compute Engine instance exists.
* GCE_ZONE - Google Cloud Platform zone where the instance exists.
* GCE_INSTANCE - Name of the Compute Engine instance.
* GCE_SHORT_MACHINE_TYPE_NAME - Machine type resource name to set, e.g. 'n1-standard-1'.
See https://cloud.google.com/compute/docs/machine-types
"""
import os
from airflow import models
from airflow.providers.google.cloud.operators.compute import (
ComputeEngineSetMachineTypeOperator, ComputeEngineStartInstanceOperator,
ComputeEngineStopInstanceOperator,
)
from airflow.utils.dates import days_ago
# [START howto_operator_gce_args_common]
GCP_PROJECT_ID = os.environ.get('GCP_PROJECT_ID', 'example-project')
GCE_ZONE = os.environ.get('GCE_ZONE', 'europe-west1-b')
GCE_INSTANCE = os.environ.get('GCE_INSTANCE', 'testinstance')
# [END howto_operator_gce_args_common]
default_args = {
'start_date': days_ago(1),
}
# [START howto_operator_gce_args_set_machine_type]
GCE_SHORT_MACHINE_TYPE_NAME = os.environ.get('GCE_SHORT_MACHINE_TYPE_NAME', 'n1-standard-1')
SET_MACHINE_TYPE_BODY = {
'machineType': 'zones/{}/machineTypes/{}'.format(GCE_ZONE, GCE_SHORT_MACHINE_TYPE_NAME)
}
# [END howto_operator_gce_args_set_machine_type]
with models.DAG(
'example_gcp_compute',
default_args=default_args,
schedule_interval=None, # Override to match your needs
tags=['example'],
) as dag:
# [START howto_operator_gce_start]
gce_instance_start = ComputeEngineStartInstanceOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
task_id='gcp_compute_start_task'
)
# [END howto_operator_gce_start]
# Duplicate start for idempotence testing
# [START howto_operator_gce_start_no_project_id]
gce_instance_start2 = ComputeEngineStartInstanceOperator(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
task_id='gcp_compute_start_task2'
)
# [END howto_operator_gce_start_no_project_id]
# [START howto_operator_gce_stop]
gce_instance_stop = ComputeEngineStopInstanceOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
task_id='gcp_compute_stop_task'
)
# [END howto_operator_gce_stop]
# Duplicate stop for idempotence testing
# [START howto_operator_gce_stop_no_project_id]
gce_instance_stop2 = ComputeEngineStopInstanceOperator(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
task_id='gcp_compute_stop_task2'
)
# [END howto_operator_gce_stop_no_project_id]
# [START howto_operator_gce_set_machine_type]
gce_set_machine_type = ComputeEngineSetMachineTypeOperator(
project_id=GCP_PROJECT_ID,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
body=SET_MACHINE_TYPE_BODY,
task_id='gcp_compute_set_machine_type'
)
# [END howto_operator_gce_set_machine_type]
# Duplicate set machine type for idempotence testing
# [START howto_operator_gce_set_machine_type_no_project_id]
gce_set_machine_type2 = ComputeEngineSetMachineTypeOperator(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
body=SET_MACHINE_TYPE_BODY,
task_id='gcp_compute_set_machine_type2'
)
# [END howto_operator_gce_set_machine_type_no_project_id]
gce_instance_start >> gce_instance_start2 >> gce_instance_stop >> \
gce_instance_stop2 >> gce_set_machine_type >> gce_set_machine_type2
| apache-2.0 |
pyramania/scipy | scipy/__init__.py | 6 | 4850 | """
SciPy: A scientific computing package for Python
================================================
Documentation is available in the docstrings and
online at https://docs.scipy.org.
Contents
--------
SciPy imports all the functions from the NumPy namespace, and in
addition provides:
Subpackages
-----------
Using any of these subpackages requires an explicit import. For example,
``import scipy.cluster``.
::
cluster --- Vector Quantization / Kmeans
fftpack --- Discrete Fourier Transform algorithms
integrate --- Integration routines
interpolate --- Interpolation Tools
io --- Data input and output
linalg --- Linear algebra routines
linalg.blas --- Wrappers to BLAS library
linalg.lapack --- Wrappers to LAPACK library
misc --- Various utilities that don't have
another home.
ndimage --- n-dimensional image package
odr --- Orthogonal Distance Regression
optimize --- Optimization Tools
signal --- Signal Processing Tools
sparse --- Sparse Matrices
sparse.linalg --- Sparse Linear Algebra
sparse.linalg.dsolve --- Linear Solvers
sparse.linalg.dsolve.umfpack --- :Interface to the UMFPACK library:
Conjugate Gradient Method (LOBPCG)
sparse.linalg.eigen --- Sparse Eigenvalue Solvers
sparse.linalg.eigen.lobpcg --- Locally Optimal Block Preconditioned
Conjugate Gradient Method (LOBPCG)
spatial --- Spatial data structures and algorithms
special --- Special functions
stats --- Statistical Functions
Utility tools
-------------
::
test --- Run scipy unittests
show_config --- Show scipy build configuration
show_numpy_config --- Show numpy build configuration
__version__ --- Scipy version string
__numpy_version__ --- Numpy version string
"""
from __future__ import division, print_function, absolute_import
__all__ = ['test']
from numpy import show_config as show_numpy_config
if show_numpy_config is None:
raise ImportError("Cannot import scipy when running from numpy source directory.")
from numpy import __version__ as __numpy_version__
# Import numpy symbols to scipy name space
import numpy as _num
linalg = None
from numpy import *
from numpy.random import rand, randn
from numpy.fft import fft, ifft
from numpy.lib.scimath import *
__all__ += _num.__all__
__all__ += ['randn', 'rand', 'fft', 'ifft']
del _num
# Remove the linalg imported from numpy so that the scipy.linalg package can be
# imported.
del linalg
__all__.remove('linalg')
# We first need to detect if we're being called as part of the scipy
# setup procedure itself in a reliable manner.
try:
__SCIPY_SETUP__
except NameError:
__SCIPY_SETUP__ = False
if __SCIPY_SETUP__:
import sys as _sys
_sys.stderr.write('Running from scipy source directory.\n')
del _sys
else:
try:
from scipy.__config__ import show as show_config
except ImportError:
msg = """Error importing scipy: you cannot import scipy while
being in scipy source directory; please exit the scipy source
tree first, and relaunch your python intepreter."""
raise ImportError(msg)
from scipy.version import version as __version__
from scipy._lib._version import NumpyVersion as _NumpyVersion
if _NumpyVersion(__numpy_version__) < '1.8.2':
import warnings
warnings.warn("Numpy 1.8.2 or above is recommended for this version of "
"scipy (detected version %s)" % __numpy_version__,
UserWarning)
del _NumpyVersion
from numpy.testing import Tester
def test(*a, **kw):
# Nose never recurses into directories with underscores prefix, so we
# need to list those explicitly. Note that numpy.testing.Tester inserts
# the top-level package path determined from __file__ to argv unconditionally,
# so we only need to add the part that is not otherwise recursed into.
import os
underscore_modules = ['_lib']
base_dir = os.path.abspath(os.path.dirname(__file__))
underscore_paths = [os.path.join(base_dir, name) for name in underscore_modules]
kw['extra_argv'] = list(kw.get('extra_argv', [])) + underscore_paths
return test._tester.test(*a, **kw)
test._tester = Tester()
test.__doc__ = test._tester.test.__doc__
test.__test__ = False # Prevent nose from treating test() as a test
bench = test._tester.bench
| bsd-3-clause |
shoopio/shoop | shuup/gdpr/templatetags/__init__.py | 1 | 1113 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from django_jinja import library
import jinja2
import json
from django.conf import settings
from shuup.gdpr.utils import get_active_consent_pages
class GDPRNamespace(object):
def is_enabled(self, request, **kwargs):
from shuup.gdpr.models import GDPRSettings
return GDPRSettings.get_for_shop(request.shop).enabled
def get_documents(self, request, **kwargs):
return get_active_consent_pages(request.shop)
@jinja2.contextfunction
def get_accepted_cookies(self, context, **kwargs):
request = context["request"]
if settings.SHUUP_GDPR_CONSENT_COOKIE_NAME in request.COOKIES:
consent_cookies = request.COOKIES[settings.SHUUP_GDPR_CONSENT_COOKIE_NAME]
return json.loads(consent_cookies).get("cookies")
return []
library.global_function(name="gdpr", fn=GDPRNamespace())
| agpl-3.0 |
code4futuredotorg/reeborg_tw | src/libraries/brython/Lib/logging/handlers.py | 736 | 55579 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import errno, logging, socket, os, pickle, struct, time, re
from codecs import BOM_UTF8
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
try:
import threading
except ImportError: #pragma: no cover
threading = None
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=False):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
self.namer = None
self.rotator = None
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = t[6] # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def emit(self, record):
"""
Emit a record.
First check if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except OSError as err:
if err.errno == errno.ENOENT:
sres = None
else:
raise
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(s, 'settimeout'):
s.settimeout(timeout)
try:
s.connect((self.host, self.port))
return s
except socket.error:
s.close()
raise
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except socket.error:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
if hasattr(self.sock, "sendall"):
self.sock.sendall(s)
else: #pragma: no cover
sentsofar = 0
left = len(s)
while left > 0:
sent = self.sock.send(s[sentsofar:])
sentsofar = sentsofar + sent
left = left - sent
except socket.error: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
if self.sock:
self.sock.close()
self.sock = None
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, (self.host, self.port))
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = True
self._connect_unixsocket(address)
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_INET, socktype)
if socktype == socket.SOCK_STREAM:
self.socket.connect(address)
self.socktype = socktype
self.formatter = None
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except socket.error:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except socket.error:
self.socket.close()
raise
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
self.acquire()
try:
self.socket.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\000'
"""
We need to convert record level to lowercase, maybe this will
change in the future.
"""
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
try:
if self.unixsocket:
try:
self.socket.send(msg)
except socket.error:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, tuple):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, tuple):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.utils import formatdate
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
",".join(self.toaddrs),
self.getSubject(record),
formatdate(), msg)
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import http.client, urllib.parse
host = self.host
if self.secure:
h = http.client.HTTPSConnection(host)
else:
h = http.client.HTTPConnection(host)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('u%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip()
h.putheader('Authorization', s)
h.endheaders()
if self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything with the result
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer = []
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
self.flush()
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
finally:
self.release()
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
self.flush()
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
if threading:
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._stop = threading.Event()
self._thread = None
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
pass
# There might still be records in the queue.
while True:
try:
record = self.dequeue(False)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self._stop.set()
self.enqueue_sentinel()
self._thread.join()
self._thread = None
| agpl-3.0 |
nicobustillos/odoo | addons/hr_holidays/wizard/hr_holidays_summary_employees.py | 337 | 2152 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class hr_holidays_summary_employee(osv.osv_memory):
_name = 'hr.holidays.summary.employee'
_description = 'HR Leaves Summary Report By Employee'
_columns = {
'date_from': fields.date('From', required=True),
'emp': fields.many2many('hr.employee', 'summary_emp_rel', 'sum_id', 'emp_id', 'Employee(s)'),
'holiday_type': fields.selection([('Approved','Approved'),('Confirmed','Confirmed'),('both','Both Approved and Confirmed')], 'Select Leave Type', required=True)
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-%m-01'),
'holiday_type': 'Approved',
}
def print_report(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
data['emp'] = context['active_ids']
datas = {
'ids': [],
'model': 'hr.employee',
'form': data
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'holidays.summary',
'datas': datas,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.