repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
cchurch/ansible | test/units/modules/storage/netapp/test_na_ontap_software_update.py | 43 | 5876 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit tests ONTAP Ansible module: na_ontap_software_update '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_software_update \
import NetAppONTAPSoftwareUpdate as my_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, parm1=None, parm2=None):
''' save arguments '''
self.type = kind
self.parm1 = parm1
self.parm2 = parm2
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'software_update':
xml = self.build_software_update_info(self.parm1, self.parm2)
self.xml_out = xml
return xml
def autosupport_log(self):
''' mock autosupport log'''
return None
@staticmethod
def build_software_update_info(status, node):
''' build xml data for software-update-info '''
xml = netapp_utils.zapi.NaElement('xml')
data = {
'num-records': 1,
'attributes-list': {'cluster-image-info': {'node-id': node}},
'progress-status': status,
'attributes': {'ndu-progress-info': {'overall-status': 'completed',
'completed-node-count': '0'}},
}
xml.translate_struct(data)
print(xml.to_string())
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection()
self.use_vsim = False
def set_default_args(self):
if self.use_vsim:
hostname = '10.10.10.10'
username = 'admin'
password = 'admin'
node = 'vsim1'
package_version = 'Fattire__9.3.0'
package_url = 'abc.com'
else:
hostname = 'hostname'
username = 'username'
password = 'password'
node = 'abc'
package_version = 'test'
package_url = 'abc.com'
return dict({
'hostname': hostname,
'username': username,
'password': password,
'nodes': node,
'package_version': package_version,
'package_url': package_url,
'https': 'true'
})
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
my_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_ensure_image_get_called(self):
''' a more interesting test '''
set_module_args(self.set_default_args())
my_obj = my_module()
my_obj.server = self.server
cluster_image_get = my_obj.cluster_image_get()
print('Info: test_software_update_get: %s' % repr(cluster_image_get))
assert cluster_image_get is None
def test_ensure_apply_for_update_called(self):
''' updating software and checking idempotency '''
module_args = {}
module_args.update(self.set_default_args())
module_args.update({'package_url': 'abc.com'})
set_module_args(module_args)
my_obj = my_module()
my_obj.autosupport_log = Mock(return_value=None)
if not self.use_vsim:
my_obj.server = self.server
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
print('Info: test_software_update_apply: %s' % repr(exc.value))
assert not exc.value.args[0]['changed']
if not self.use_vsim:
my_obj.server = MockONTAPConnection('software_update', 'async_pkg_get_phase_complete', 'abc')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
print('Info: test_software_update_apply: %s' % repr(exc.value))
assert exc.value.args[0]['changed']
| gpl-3.0 |
aviaryan/open-event-orga-server | migrations/versions/30ca70296a1c_.py | 11 | 8271 | """empty message
Revision ID: 30ca70296a1c
Revises: None
Create Date: 2015-12-20 17:54:59.544763
"""
# revision identifiers, used by Alembic.
revision = '30ca70296a1c'
down_revision = None
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('events',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('email', sa.String(), nullable=True),
sa.Column('color', sqlalchemy_utils.types.color.ColorType(length=20), nullable=True),
sa.Column('logo', sa.String(), nullable=True),
sa.Column('start_time', sa.DateTime(), nullable=False),
sa.Column('end_time', sa.DateTime(), nullable=False),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('location_name', sa.String(), nullable=True),
sa.Column('slogan', sa.String(), nullable=True),
sa.Column('url', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('format',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('label_en', sa.String(), nullable=False),
sa.Column('event_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('language',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('label_en', sa.String(), nullable=True),
sa.Column('label_de', sa.String(), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('level',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('label_en', sa.String(), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('nickname', sa.String(length=100), nullable=True),
sa.Column('login', sa.String(length=80), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password', sa.String(length=128), nullable=True),
sa.Column('reset_password', sa.String(length=128), nullable=True),
sa.Column('salt', sa.String(length=128), nullable=True),
sa.Column('role', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('login')
)
op.create_table('versions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.Column('event_ver', sa.Integer(), nullable=False),
sa.Column('session_ver', sa.Integer(), nullable=False),
sa.Column('speakers_ver', sa.Integer(), nullable=False),
sa.Column('tracks_ver', sa.Integer(), nullable=False),
sa.Column('sponsors_ver', sa.Integer(), nullable=False),
sa.Column('microlocations_ver', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('eventsusers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('editor', sa.Boolean(), nullable=True),
sa.Column('admin', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('files',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('path', sa.String(), nullable=False),
sa.Column('owner_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['owner_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('microlocation',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('floor', sa.Integer(), nullable=True),
sa.Column('room', sa.String(), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('speaker',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('photo', sa.String(), nullable=True),
sa.Column('biography', sa.Text(), nullable=True),
sa.Column('email', sa.String(), nullable=False),
sa.Column('web', sa.String(), nullable=True),
sa.Column('twitter', sa.String(), nullable=True),
sa.Column('facebook', sa.String(), nullable=True),
sa.Column('github', sa.String(), nullable=True),
sa.Column('linkedin', sa.String(), nullable=True),
sa.Column('organisation', sa.String(), nullable=False),
sa.Column('position', sa.String(), nullable=True),
sa.Column('country', sa.String(), nullable=False),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('sponsors',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('url', sa.String(), nullable=True),
sa.Column('logo', sa.String(), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('tracks',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('track_image_url', sa.Text(), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('session',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=False),
sa.Column('subtitle', sa.String(), nullable=True),
sa.Column('abstract', sa.Text(), nullable=True),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('start_time', sa.DateTime(), nullable=False),
sa.Column('end_time', sa.DateTime(), nullable=False),
sa.Column('track_id', sa.Integer(), nullable=True),
sa.Column('level_id', sa.Integer(), nullable=True),
sa.Column('format_id', sa.Integer(), nullable=True),
sa.Column('language_id', sa.Integer(), nullable=True),
sa.Column('microlocation_id', sa.Integer(), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.Column('is_accepted', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['event_id'], ['events.id'], ),
sa.ForeignKeyConstraint(['format_id'], ['format.id'], ),
sa.ForeignKeyConstraint(['language_id'], ['language.id'], ),
sa.ForeignKeyConstraint(['level_id'], ['level.id'], ),
sa.ForeignKeyConstraint(['microlocation_id'], ['microlocation.id'], ),
sa.ForeignKeyConstraint(['track_id'], ['tracks.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('speakers_sessions',
sa.Column('speaker_id', sa.Integer(), nullable=True),
sa.Column('session_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['session_id'], ['session.id'], ),
sa.ForeignKeyConstraint(['speaker_id'], ['speaker.id'], )
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('speakers_sessions')
op.drop_table('session')
op.drop_table('tracks')
op.drop_table('sponsors')
op.drop_table('speaker')
op.drop_table('microlocation')
op.drop_table('files')
op.drop_table('eventsusers')
op.drop_table('versions')
op.drop_table('user')
op.drop_table('level')
op.drop_table('language')
op.drop_table('format')
op.drop_table('events')
### end Alembic commands ###
| gpl-3.0 |
krishna-pandey-git/django | django/contrib/auth/__init__.py | 387 | 7508 | import inspect
import re
from django.apps import apps as django_apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.middleware.csrf import rotate_token
from django.utils.crypto import constant_time_compare
from django.utils.module_loading import import_string
from django.utils.translation import LANGUAGE_SESSION_KEY
from .signals import user_logged_in, user_logged_out, user_login_failed
SESSION_KEY = '_auth_user_id'
BACKEND_SESSION_KEY = '_auth_user_backend'
HASH_SESSION_KEY = '_auth_user_hash'
REDIRECT_FIELD_NAME = 'next'
def load_backend(path):
return import_string(path)()
def _get_backends(return_tuples=False):
backends = []
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
backends.append((backend, backend_path) if return_tuples else backend)
if not backends:
raise ImproperlyConfigured(
'No authentication backends have been defined. Does '
'AUTHENTICATION_BACKENDS contain anything?'
)
return backends
def get_backends():
return _get_backends(return_tuples=False)
def _clean_credentials(credentials):
"""
Cleans a dictionary of credentials of potentially sensitive info before
sending to less secure functions.
Not comprehensive - intended for user_login_failed signal
"""
SENSITIVE_CREDENTIALS = re.compile('api|token|key|secret|password|signature', re.I)
CLEANSED_SUBSTITUTE = '********************'
for key in credentials:
if SENSITIVE_CREDENTIALS.search(key):
credentials[key] = CLEANSED_SUBSTITUTE
return credentials
def _get_user_session_key(request):
# This value in the session is always serialized to a string, so we need
# to convert it back to Python whenever we access it.
return get_user_model()._meta.pk.to_python(request.session[SESSION_KEY])
def authenticate(**credentials):
"""
If the given credentials are valid, return a User object.
"""
for backend, backend_path in _get_backends(return_tuples=True):
try:
inspect.getcallargs(backend.authenticate, **credentials)
except TypeError:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
try:
user = backend.authenticate(**credentials)
except PermissionDenied:
# This backend says to stop in our tracks - this user should not be allowed in at all.
return None
if user is None:
continue
# Annotate the user object with the path of the backend.
user.backend = backend_path
return user
# The credentials supplied are invalid to all backends, fire signal
user_login_failed.send(sender=__name__,
credentials=_clean_credentials(credentials))
def login(request, user):
"""
Persist a user id and a backend in the request. This way a user doesn't
have to reauthenticate on every request. Note that data set during
the anonymous session is retained when the user logs in.
"""
session_auth_hash = ''
if user is None:
user = request.user
if hasattr(user, 'get_session_auth_hash'):
session_auth_hash = user.get_session_auth_hash()
if SESSION_KEY in request.session:
if _get_user_session_key(request) != user.pk or (
session_auth_hash and
request.session.get(HASH_SESSION_KEY) != session_auth_hash):
# To avoid reusing another user's session, create a new, empty
# session if the existing session corresponds to a different
# authenticated user.
request.session.flush()
else:
request.session.cycle_key()
request.session[SESSION_KEY] = user._meta.pk.value_to_string(user)
request.session[BACKEND_SESSION_KEY] = user.backend
request.session[HASH_SESSION_KEY] = session_auth_hash
if hasattr(request, 'user'):
request.user = user
rotate_token(request)
user_logged_in.send(sender=user.__class__, request=request, user=user)
def logout(request):
"""
Removes the authenticated user's ID from the request and flushes their
session data.
"""
# Dispatch the signal before the user is logged out so the receivers have a
# chance to find out *who* logged out.
user = getattr(request, 'user', None)
if hasattr(user, 'is_authenticated') and not user.is_authenticated():
user = None
user_logged_out.send(sender=user.__class__, request=request, user=user)
# remember language choice saved to session
language = request.session.get(LANGUAGE_SESSION_KEY)
request.session.flush()
if language is not None:
request.session[LANGUAGE_SESSION_KEY] = language
if hasattr(request, 'user'):
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
def get_user_model():
"""
Returns the User model that is active in this project.
"""
try:
return django_apps.get_model(settings.AUTH_USER_MODEL)
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"AUTH_USER_MODEL refers to model '%s' that has not been installed" % settings.AUTH_USER_MODEL
)
def get_user(request):
"""
Returns the user model instance associated with the given request session.
If no user is retrieved an instance of `AnonymousUser` is returned.
"""
from .models import AnonymousUser
user = None
try:
user_id = _get_user_session_key(request)
backend_path = request.session[BACKEND_SESSION_KEY]
except KeyError:
pass
else:
if backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
user = backend.get_user(user_id)
# Verify the session
if ('django.contrib.auth.middleware.SessionAuthenticationMiddleware'
in settings.MIDDLEWARE_CLASSES and hasattr(user, 'get_session_auth_hash')):
session_hash = request.session.get(HASH_SESSION_KEY)
session_hash_verified = session_hash and constant_time_compare(
session_hash,
user.get_session_auth_hash()
)
if not session_hash_verified:
request.session.flush()
user = None
return user or AnonymousUser()
def get_permission_codename(action, opts):
"""
Returns the codename of the permission for the specified action.
"""
return '%s_%s' % (action, opts.model_name)
def update_session_auth_hash(request, user):
"""
Updating a user's password logs out all sessions for the user if
django.contrib.auth.middleware.SessionAuthenticationMiddleware is enabled.
This function takes the current request and the updated user object from
which the new session hash will be derived and updates the session hash
appropriately to prevent a password change from logging out the session
from which the password was changed.
"""
if hasattr(user, 'get_session_auth_hash') and request.user == user:
request.session[HASH_SESSION_KEY] = user.get_session_auth_hash()
default_app_config = 'django.contrib.auth.apps.AuthConfig'
| bsd-3-clause |
thresholdsoftware/asylum-v2.0 | openerp/addons/auth_crypt/__init__.py | 435 | 1050 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import auth_crypt
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
romain-intel/bcc | src/cc/frontends/p4/compiler/ebpfType.py | 10 | 1117 | # Copyright (c) Barefoot Networks, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from compilationException import CompilationException
class EbpfType(object):
__doc__ = "Base class for representing a P4 type"
def __init__(self, hlirType):
self.hlirType = hlirType
# Methods to override
def serialize(self, serializer):
# the type itself
raise CompilationException(True, "Method must be overridden")
def declare(self, serializer, identifier, asPointer):
# declaration of an identifier with this type
# asPointer is a boolean;
# if true, the identifier is declared as a pointer
raise CompilationException(True, "Method must be overridden")
def emitInitializer(self, serializer):
# A default initializer suitable for this type
raise CompilationException(True, "Method must be overridden")
def declareArray(self, serializer, identifier, size):
# Declare an identifier with an array type with the specified size
raise CompilationException(True, "Method must be overridden")
| apache-2.0 |
watonyweng/horizon | openstack_dashboard/dashboards/identity/projects/workflows.py | 17 | 37664 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from openstack_auth import utils as auth_utils
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import base
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.api import nova
from openstack_dashboard.usage import quotas
INDEX_URL = "horizon:identity:projects:index"
ADD_USER_URL = "horizon:identity:projects:create_user"
PROJECT_GROUP_ENABLED = keystone.VERSIONS.active >= 3
PROJECT_USER_MEMBER_SLUG = "update_members"
PROJECT_GROUP_MEMBER_SLUG = "update_group_members"
COMMON_HORIZONTAL_TEMPLATE = "identity/projects/_common_horizontal_form.html"
class ProjectQuotaAction(workflows.Action):
ifcb_label = _("Injected File Content (Bytes)")
metadata_items = forms.IntegerField(min_value=-1,
label=_("Metadata Items"))
cores = forms.IntegerField(min_value=-1, label=_("VCPUs"))
instances = forms.IntegerField(min_value=-1, label=_("Instances"))
injected_files = forms.IntegerField(min_value=-1,
label=_("Injected Files"))
injected_file_content_bytes = forms.IntegerField(min_value=-1,
label=ifcb_label)
volumes = forms.IntegerField(min_value=-1, label=_("Volumes"))
snapshots = forms.IntegerField(min_value=-1, label=_("Volume Snapshots"))
gigabytes = forms.IntegerField(
min_value=-1, label=_("Total Size of Volumes and Snapshots (GB)"))
ram = forms.IntegerField(min_value=-1, label=_("RAM (MB)"))
floating_ips = forms.IntegerField(min_value=-1, label=_("Floating IPs"))
fixed_ips = forms.IntegerField(min_value=-1, label=_("Fixed IPs"))
security_groups = forms.IntegerField(min_value=-1,
label=_("Security Groups"))
security_group_rules = forms.IntegerField(min_value=-1,
label=_("Security Group Rules"))
# Neutron
security_group = forms.IntegerField(min_value=-1,
label=_("Security Groups"))
security_group_rule = forms.IntegerField(min_value=-1,
label=_("Security Group Rules"))
floatingip = forms.IntegerField(min_value=-1, label=_("Floating IPs"))
network = forms.IntegerField(min_value=-1, label=_("Networks"))
port = forms.IntegerField(min_value=-1, label=_("Ports"))
router = forms.IntegerField(min_value=-1, label=_("Routers"))
subnet = forms.IntegerField(min_value=-1, label=_("Subnets"))
def __init__(self, request, *args, **kwargs):
super(ProjectQuotaAction, self).__init__(request,
*args,
**kwargs)
disabled_quotas = quotas.get_disabled_quotas(request)
for field in disabled_quotas:
if field in self.fields:
self.fields[field].required = False
self.fields[field].widget = forms.HiddenInput()
class UpdateProjectQuotaAction(ProjectQuotaAction):
def clean(self):
cleaned_data = super(UpdateProjectQuotaAction, self).clean()
usages = quotas.tenant_quota_usages(
self.request, tenant_id=self.initial['project_id'])
# Validate the quota values before updating quotas.
bad_values = []
for key, value in cleaned_data.items():
used = usages[key].get('used', 0)
if value is not None and value >= 0 and used > value:
bad_values.append(_('%(used)s %(key)s used') %
{'used': used,
'key': quotas.QUOTA_NAMES.get(key, key)})
if bad_values:
value_str = ", ".join(bad_values)
msg = (_('Quota value(s) cannot be less than the current usage '
'value(s): %s.') %
value_str)
raise forms.ValidationError(msg)
return cleaned_data
class Meta(object):
name = _("Quota")
slug = 'update_quotas'
help_text = _("Set maximum quotas for the project.")
class CreateProjectQuotaAction(ProjectQuotaAction):
class Meta(object):
name = _("Quota")
slug = 'create_quotas'
help_text = _("Set maximum quotas for the project.")
class UpdateProjectQuota(workflows.Step):
action_class = UpdateProjectQuotaAction
template_name = COMMON_HORIZONTAL_TEMPLATE
depends_on = ("project_id",)
contributes = quotas.QUOTA_FIELDS
class CreateProjectQuota(workflows.Step):
action_class = CreateProjectQuotaAction
template_name = COMMON_HORIZONTAL_TEMPLATE
depends_on = ("project_id",)
contributes = quotas.QUOTA_FIELDS
class CreateProjectInfoAction(workflows.Action):
# Hide the domain_id and domain_name by default
domain_id = forms.CharField(label=_("Domain ID"),
required=False,
widget=forms.HiddenInput())
domain_name = forms.CharField(label=_("Domain Name"),
required=False,
widget=forms.HiddenInput())
name = forms.CharField(label=_("Name"),
max_length=64)
description = forms.CharField(widget=forms.widgets.Textarea(
attrs={'rows': 4}),
label=_("Description"),
required=False)
enabled = forms.BooleanField(label=_("Enabled"),
required=False,
initial=True)
def __init__(self, request, *args, **kwargs):
super(CreateProjectInfoAction, self).__init__(request,
*args,
**kwargs)
# For keystone V3, display the two fields in read-only
if keystone.VERSIONS.active >= 3:
readonlyInput = forms.TextInput(attrs={'readonly': 'readonly'})
self.fields["domain_id"].widget = readonlyInput
self.fields["domain_name"].widget = readonlyInput
class Meta(object):
name = _("Project Information")
help_text = _("Create a project to organize users.")
class CreateProjectInfo(workflows.Step):
action_class = CreateProjectInfoAction
template_name = COMMON_HORIZONTAL_TEMPLATE
contributes = ("domain_id",
"domain_name",
"project_id",
"name",
"description",
"enabled")
class UpdateProjectMembersAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(UpdateProjectMembersAction, self).__init__(request,
*args,
**kwargs)
err_msg = _('Unable to retrieve user list. Please try again later.')
# Use the domain_id from the project
domain_id = self.initial.get("domain_id", None)
project_id = ''
if 'project_id' in self.initial:
project_id = self.initial['project_id']
# Get the default role
try:
default_role = api.keystone.get_default_role(self.request)
# Default role is necessary to add members to a project
if default_role is None:
default = getattr(settings,
"OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
msg = (_('Could not find default role "%s" in Keystone') %
default)
raise exceptions.NotFound(msg)
except Exception:
exceptions.handle(self.request,
err_msg,
redirect=reverse(INDEX_URL))
default_role_name = self.get_default_role_field_name()
self.fields[default_role_name] = forms.CharField(required=False)
self.fields[default_role_name].initial = default_role.id
# Get list of available users
all_users = []
try:
all_users = api.keystone.user_list(request,
domain=domain_id)
except Exception:
exceptions.handle(request, err_msg)
users_list = [(user.id, user.name) for user in all_users]
# Get list of roles
role_list = []
try:
role_list = api.keystone.role_list(request)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for role in role_list:
field_name = self.get_member_field_name(role.id)
label = role.name
self.fields[field_name] = forms.MultipleChoiceField(required=False,
label=label)
self.fields[field_name].choices = users_list
self.fields[field_name].initial = []
# Figure out users & roles
if project_id:
try:
users_roles = api.keystone.get_project_users_roles(request,
project_id)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for user_id in users_roles:
roles_ids = users_roles[user_id]
for role_id in roles_ids:
field_name = self.get_member_field_name(role_id)
self.fields[field_name].initial.append(user_id)
class Meta(object):
name = _("Project Members")
slug = PROJECT_USER_MEMBER_SLUG
class UpdateProjectMembers(workflows.UpdateMembersStep):
action_class = UpdateProjectMembersAction
available_list_title = _("All Users")
members_list_title = _("Project Members")
no_available_text = _("No users found.")
no_members_text = _("No users.")
def contribute(self, data, context):
if data:
try:
roles = api.keystone.role_list(self.workflow.request)
except Exception:
exceptions.handle(self.workflow.request,
_('Unable to retrieve user list.'))
post = self.workflow.request.POST
for role in roles:
field = self.get_member_field_name(role.id)
context[field] = post.getlist(field)
return context
class UpdateProjectGroupsAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(UpdateProjectGroupsAction, self).__init__(request,
*args,
**kwargs)
err_msg = _('Unable to retrieve group list. Please try again later.')
# Use the domain_id from the project
domain_id = self.initial.get("domain_id", None)
project_id = ''
if 'project_id' in self.initial:
project_id = self.initial['project_id']
# Get the default role
try:
default_role = api.keystone.get_default_role(self.request)
# Default role is necessary to add members to a project
if default_role is None:
default = getattr(settings,
"OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
msg = (_('Could not find default role "%s" in Keystone') %
default)
raise exceptions.NotFound(msg)
except Exception:
exceptions.handle(self.request,
err_msg,
redirect=reverse(INDEX_URL))
default_role_name = self.get_default_role_field_name()
self.fields[default_role_name] = forms.CharField(required=False)
self.fields[default_role_name].initial = default_role.id
# Get list of available groups
all_groups = []
try:
all_groups = api.keystone.group_list(request,
domain=domain_id)
except Exception:
exceptions.handle(request, err_msg)
groups_list = [(group.id, group.name) for group in all_groups]
# Get list of roles
role_list = []
try:
role_list = api.keystone.role_list(request)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for role in role_list:
field_name = self.get_member_field_name(role.id)
label = role.name
self.fields[field_name] = forms.MultipleChoiceField(required=False,
label=label)
self.fields[field_name].choices = groups_list
self.fields[field_name].initial = []
# Figure out groups & roles
if project_id:
try:
groups_roles = api.keystone.get_project_groups_roles(
request, project_id)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for group_id in groups_roles:
roles_ids = groups_roles[group_id]
for role_id in roles_ids:
field_name = self.get_member_field_name(role_id)
self.fields[field_name].initial.append(group_id)
class Meta(object):
name = _("Project Groups")
slug = PROJECT_GROUP_MEMBER_SLUG
class UpdateProjectGroups(workflows.UpdateMembersStep):
action_class = UpdateProjectGroupsAction
available_list_title = _("All Groups")
members_list_title = _("Project Groups")
no_available_text = _("No groups found.")
no_members_text = _("No groups.")
def contribute(self, data, context):
if data:
try:
roles = api.keystone.role_list(self.workflow.request)
except Exception:
exceptions.handle(self.workflow.request,
_('Unable to retrieve role list.'))
post = self.workflow.request.POST
for role in roles:
field = self.get_member_field_name(role.id)
context[field] = post.getlist(field)
return context
class CommonQuotaWorkflow(workflows.Workflow):
def _update_project_quota(self, request, data, project_id):
# Update the project quota.
nova_data = dict(
[(key, data[key]) for key in quotas.NOVA_QUOTA_FIELDS])
nova.tenant_quota_update(request, project_id, **nova_data)
if base.is_service_enabled(request, 'volume'):
cinder_data = dict([(key, data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
cinder.tenant_quota_update(request,
project_id,
**cinder_data)
if api.base.is_service_enabled(request, 'network') and \
api.neutron.is_quotas_extension_supported(request):
neutron_data = {}
disabled_quotas = quotas.get_disabled_quotas(request)
for key in quotas.NEUTRON_QUOTA_FIELDS:
if key not in disabled_quotas:
neutron_data[key] = data[key]
api.neutron.tenant_quota_update(request,
project_id,
**neutron_data)
class CreateProject(CommonQuotaWorkflow):
slug = "create_project"
name = _("Create Project")
finalize_button_name = _("Create Project")
success_message = _('Created new project "%s".')
failure_message = _('Unable to create project "%s".')
success_url = "horizon:identity:projects:index"
default_steps = (CreateProjectInfo,
UpdateProjectMembers,
CreateProjectQuota)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
if PROJECT_GROUP_ENABLED:
self.default_steps = (CreateProjectInfo,
UpdateProjectMembers,
UpdateProjectGroups,
CreateProjectQuota)
super(CreateProject, self).__init__(request=request,
context_seed=context_seed,
entry_point=entry_point,
*args,
**kwargs)
def format_status_message(self, message):
if "%s" in message:
return message % self.context.get('name', 'unknown project')
else:
return message
def _create_project(self, request, data):
# create the project
domain_id = data['domain_id']
try:
desc = data['description']
self.object = api.keystone.tenant_create(request,
name=data['name'],
description=desc,
enabled=data['enabled'],
domain=domain_id)
return self.object
except exceptions.Conflict:
msg = _('Project name "%s" is already used.') % data['name']
self.failure_message = msg
return
except Exception:
exceptions.handle(request, ignore=True)
return
def _update_project_members(self, request, data, project_id):
# update project members
users_to_add = 0
try:
available_roles = api.keystone.role_list(request)
member_step = self.get_step(PROJECT_USER_MEMBER_SLUG)
# count how many users are to be added
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
users_to_add += len(role_list)
# add new users to project
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
users_added = 0
for user in role_list:
api.keystone.add_tenant_user_role(request,
project=project_id,
user=user,
role=role.id)
users_added += 1
users_to_add -= users_added
except Exception:
if PROJECT_GROUP_ENABLED:
group_msg = _(", add project groups")
else:
group_msg = ""
exceptions.handle(request,
_('Failed to add %(users_to_add)s project '
'members%(group_msg)s and set project quotas.')
% {'users_to_add': users_to_add,
'group_msg': group_msg})
finally:
auth_utils.remove_project_cache(request.user.token.unscoped_token)
def _update_project_groups(self, request, data, project_id):
# update project groups
groups_to_add = 0
try:
available_roles = api.keystone.role_list(request)
member_step = self.get_step(PROJECT_GROUP_MEMBER_SLUG)
# count how many groups are to be added
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
groups_to_add += len(role_list)
# add new groups to project
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
groups_added = 0
for group in role_list:
api.keystone.add_group_role(request,
role=role.id,
group=group,
project=project_id)
groups_added += 1
groups_to_add -= groups_added
except Exception:
exceptions.handle(request,
_('Failed to add %s project groups '
'and update project quotas.')
% groups_to_add)
def _update_project_quota(self, request, data, project_id):
try:
super(CreateProject, self)._update_project_quota(
request, data, project_id)
except Exception:
exceptions.handle(request, _('Unable to set project quotas.'))
def handle(self, request, data):
project = self._create_project(request, data)
if not project:
return False
project_id = project.id
self._update_project_members(request, data, project_id)
if PROJECT_GROUP_ENABLED:
self._update_project_groups(request, data, project_id)
self._update_project_quota(request, data, project_id)
return True
class UpdateProjectInfoAction(CreateProjectInfoAction):
enabled = forms.BooleanField(required=False, label=_("Enabled"))
def __init__(self, request, initial, *args, **kwargs):
super(UpdateProjectInfoAction, self).__init__(
request, initial, *args, **kwargs)
if initial['project_id'] == request.user.project_id:
self.fields['enabled'].widget.attrs['disabled'] = True
self.fields['enabled'].help_text = _(
'You cannot disable your current project')
def clean(self):
cleaned_data = super(UpdateProjectInfoAction, self).clean()
# NOTE(tsufiev): in case the current project is being edited, its
# 'enabled' field is disabled to prevent changing the field value
# which is always `True` for the current project (because the user
# logged in it). Since Django treats disabled checkbox as providing
# `False` value even if its initial value is `True`, we need to
# restore the original `True` value of 'enabled' field here.
if self.fields['enabled'].widget.attrs.get('disabled', False):
cleaned_data['enabled'] = True
return cleaned_data
class Meta(object):
name = _("Project Information")
slug = 'update_info'
help_text = _("Edit the project details.")
class UpdateProjectInfo(workflows.Step):
action_class = UpdateProjectInfoAction
template_name = COMMON_HORIZONTAL_TEMPLATE
depends_on = ("project_id",)
contributes = ("domain_id",
"domain_name",
"name",
"description",
"enabled")
class UpdateProject(CommonQuotaWorkflow):
slug = "update_project"
name = _("Edit Project")
finalize_button_name = _("Save")
success_message = _('Modified project "%s".')
failure_message = _('Unable to modify project "%s".')
success_url = "horizon:identity:projects:index"
default_steps = (UpdateProjectInfo,
UpdateProjectMembers,
UpdateProjectQuota)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
if PROJECT_GROUP_ENABLED:
self.default_steps = (UpdateProjectInfo,
UpdateProjectMembers,
UpdateProjectGroups,
UpdateProjectQuota)
super(UpdateProject, self).__init__(request=request,
context_seed=context_seed,
entry_point=entry_point,
*args,
**kwargs)
def format_status_message(self, message):
if "%s" in message:
return message % self.context.get('name', 'unknown project')
else:
return message
@memoized.memoized_method
def _get_available_roles(self, request):
return api.keystone.role_list(request)
def _update_project(self, request, data):
# update project info
try:
project_id = data['project_id']
return api.keystone.tenant_update(
request,
project_id,
name=data['name'],
description=data['description'],
enabled=data['enabled'])
except exceptions.Conflict:
msg = _('Project name "%s" is already used.') % data['name']
self.failure_message = msg
return
except Exception:
exceptions.handle(request, ignore=True)
return
def _add_roles_to_users(self, request, data, project_id, user_id,
role_ids, available_roles):
member_step = self.get_step(PROJECT_USER_MEMBER_SLUG)
current_role_ids = list(role_ids)
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Check if the user is in the list of users with this role.
if user_id in data[field_name]:
# Add it if necessary
if role.id not in current_role_ids:
# user role has changed
api.keystone.add_tenant_user_role(
request,
project=project_id,
user=user_id,
role=role.id)
else:
# User role is unchanged, so remove it from the
# remaining roles list to avoid removing it later.
index = current_role_ids.index(role.id)
current_role_ids.pop(index)
return current_role_ids
def _remove_roles_from_user(self, request, project_id, user_id,
current_role_ids):
for id_to_delete in current_role_ids:
api.keystone.remove_tenant_user_role(
request,
project=project_id,
user=user_id,
role=id_to_delete)
def _is_removing_self_admin_role(self, request, project_id, user_id,
available_roles, current_role_ids):
is_current_user = user_id == request.user.id
is_current_project = project_id == request.user.tenant_id
available_admin_role_ids = [role.id for role in available_roles
if role.name.lower() == 'admin']
admin_roles = [role for role in current_role_ids
if role in available_admin_role_ids]
if len(admin_roles):
removing_admin = any([role in current_role_ids
for role in admin_roles])
else:
removing_admin = False
if is_current_user and is_current_project and removing_admin:
# Cannot remove "admin" role on current(admin) project
msg = _('You cannot revoke your administrative privileges '
'from the project you are currently logged into. '
'Please switch to another project with '
'administrative privileges or remove the '
'administrative role manually via the CLI.')
messages.warning(request, msg)
return True
else:
return False
def _update_project_members(self, request, data, project_id):
# update project members
users_to_modify = 0
# Project-user member step
member_step = self.get_step(PROJECT_USER_MEMBER_SLUG)
try:
# Get our role options
available_roles = self._get_available_roles(request)
# Get the users currently associated with this project so we
# can diff against it.
users_roles = api.keystone.get_project_users_roles(
request, project=project_id)
users_to_modify = len(users_roles)
for user_id in users_roles.keys():
# Check if there have been any changes in the roles of
# Existing project members.
current_role_ids = list(users_roles[user_id])
modified_role_ids = self._add_roles_to_users(
request, data, project_id, user_id,
current_role_ids, available_roles)
# Prevent admins from doing stupid things to themselves.
removing_admin = self._is_removing_self_admin_role(
request, project_id, user_id, available_roles,
modified_role_ids)
# Otherwise go through and revoke any removed roles.
if not removing_admin:
self._remove_roles_from_user(request, project_id, user_id,
modified_role_ids)
users_to_modify -= 1
# Grant new roles on the project.
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Count how many users may be added for exception handling.
users_to_modify += len(data[field_name])
for role in available_roles:
users_added = 0
field_name = member_step.get_member_field_name(role.id)
for user_id in data[field_name]:
if user_id not in users_roles:
api.keystone.add_tenant_user_role(request,
project=project_id,
user=user_id,
role=role.id)
users_added += 1
users_to_modify -= users_added
return True
except Exception:
if PROJECT_GROUP_ENABLED:
group_msg = _(", update project groups")
else:
group_msg = ""
exceptions.handle(request,
_('Failed to modify %(users_to_modify)s'
' project members%(group_msg)s and '
'update project quotas.')
% {'users_to_modify': users_to_modify,
'group_msg': group_msg})
return False
finally:
auth_utils.remove_project_cache(request.user.token.unscoped_token)
def _update_project_groups(self, request, data, project_id, domain_id):
# update project groups
groups_to_modify = 0
member_step = self.get_step(PROJECT_GROUP_MEMBER_SLUG)
try:
available_roles = self._get_available_roles(request)
# Get the groups currently associated with this project so we
# can diff against it.
project_groups = api.keystone.group_list(request,
domain=domain_id,
project=project_id)
groups_to_modify = len(project_groups)
for group in project_groups:
# Check if there have been any changes in the roles of
# Existing project members.
current_roles = api.keystone.roles_for_group(
self.request,
group=group.id,
project=project_id)
current_role_ids = [role.id for role in current_roles]
for role in available_roles:
# Check if the group is in the list of groups with
# this role.
field_name = member_step.get_member_field_name(role.id)
if group.id in data[field_name]:
# Add it if necessary
if role.id not in current_role_ids:
# group role has changed
api.keystone.add_group_role(
request,
role=role.id,
group=group.id,
project=project_id)
else:
# Group role is unchanged, so remove it from
# the remaining roles list to avoid removing it
# later.
index = current_role_ids.index(role.id)
current_role_ids.pop(index)
# Revoke any removed roles.
for id_to_delete in current_role_ids:
api.keystone.remove_group_role(request,
role=id_to_delete,
group=group.id,
project=project_id)
groups_to_modify -= 1
# Grant new roles on the project.
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Count how many groups may be added for error handling.
groups_to_modify += len(data[field_name])
for role in available_roles:
groups_added = 0
field_name = member_step.get_member_field_name(role.id)
for group_id in data[field_name]:
if not filter(lambda x: group_id == x.id,
project_groups):
api.keystone.add_group_role(request,
role=role.id,
group=group_id,
project=project_id)
groups_added += 1
groups_to_modify -= groups_added
return True
except Exception:
exceptions.handle(request,
_('Failed to modify %s project '
'members, update project groups '
'and update project quotas.')
% groups_to_modify)
return False
def _update_project_quota(self, request, data, project_id):
try:
super(UpdateProject, self)._update_project_quota(
request, data, project_id)
return True
except Exception:
exceptions.handle(request, _('Modified project information and '
'members, but unable to modify '
'project quotas.'))
return False
def handle(self, request, data):
# FIXME(gabriel): This should be refactored to use Python's built-in
# sets and do this all in a single "roles to add" and "roles to remove"
# pass instead of the multi-pass thing happening now.
project = self._update_project(request, data)
if not project:
return False
project_id = data['project_id']
# Use the domain_id from the project if available
domain_id = getattr(project, "domain_id", '')
ret = self._update_project_members(request, data, project_id)
if not ret:
return False
if PROJECT_GROUP_ENABLED:
ret = self._update_project_groups(request, data,
project_id, domain_id)
if not ret:
return False
ret = self._update_project_quota(request, data, project_id)
if not ret:
return False
return True
| apache-2.0 |
Rashminadig/SDN | ryu/contrib/ncclient/operations/session.py | 82 | 1414 | # Copyright 2009 Shikhar Bhushan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Session-related NETCONF operations"
from ncclient.xml_ import *
from rpc import RPC
class CloseSession(RPC):
"`close-session` RPC. The connection to NETCONF server is also closed."
def request(self):
"Request graceful termination of the NETCONF session, and also close the transport."
try:
return self._request(new_ele("close-session"))
finally:
self.session.close()
class KillSession(RPC):
"`kill-session` RPC."
def request(self, session_id):
"""Force the termination of a NETCONF session (not the current one!)
*session_id* is the session identifier of the NETCONF session to be terminated as a string
"""
node = new_ele("kill-session")
sub_ele(node, "session-id").text = session_id
return self._request(node)
| apache-2.0 |
lgautier/mashing-pumpkins | src/tests/test_parallel.py | 1 | 6609 | import pytest
import mashingpumpkins.parallel
from mashingpumpkins._murmurhash3 import hasharray, DEFAULT_SEED
from mashingpumpkins.sequence import chunkpos_iter
from mashingpumpkins import minhashsketch
import random
def test_sketch_initializer():
# empty initializer
with pytest.raises(TypeError):
mashingpumpkins.parallel.Sketch.initializer()
nsize = 21
maxsize = 10
hashfun = lambda input,width,hashbuffer: None
seed = 0
cls = minhashsketch.MaxSketch
mashingpumpkins.parallel.Sketch.initializer(cls, nsize, maxsize, hashfun, seed)
hasattr(mashingpumpkins.parallel, 'sketch_constructor')
assert type(mashingpumpkins.parallel.sketch_constructor()) is cls
def test_sketch_map_sequence():
nsize = 21
maxsize = 10
hashfun = hasharray
seed = DEFAULT_SEED
cls = minhashsketch.MaxSketch
mashingpumpkins.parallel.Sketch.initializer(cls, nsize, maxsize, hashfun, seed)
random.seed(123)
sequence = b''.join(random.choice((b'A',b'T',b'G',b'C')) for x in range(250))
mhs = mashingpumpkins.parallel.Sketch.map_sequence(sequence)
assert mhs.nsize == nsize
assert mhs.maxsize == maxsize
assert mhs.nvisited == len(sequence)-nsize+1
def test_sketch_map_sequences():
nsize = 21
maxsize = 10
hashfun = hasharray
seed = DEFAULT_SEED
cls = minhashsketch.MaxSketch
mashingpumpkins.parallel.Sketch.initializer(cls, nsize, maxsize, hashfun, seed)
random.seed(123)
sequence = b''.join(random.choice((b'A',b'T',b'G',b'C')) for x in range(250))
sequences = (sequence[beg:end] for beg, end in chunkpos_iter(nsize, len(sequence), 100))
mhs = mashingpumpkins.parallel.Sketch.map_sequences(sequences)
assert mhs.nsize == nsize
assert mhs.maxsize == maxsize
assert mhs.nvisited == len(sequence)-nsize+1
def test_sketch_reduce_sketches():
nsize = 21
maxsize = 10
hashfun = hasharray
seed = DEFAULT_SEED
cls = minhashsketch.MaxSketch
mhs = cls(nsize, maxsize, hashfun, seed)
mhs_a = cls(nsize, maxsize, hashfun, seed)
random.seed(123)
sequence = b''.join(random.choice((b'A',b'T',b'G',b'C')) for x in range(250))
mhs.add(sequence)
mhs_a.add(sequence)
mhs_b = cls(nsize, maxsize, hashfun, seed)
random.seed(123)
sequence = b''.join(random.choice((b'A',b'T',b'G',b'C')) for x in range(250))
mhs.add(sequence)
mhs_b.add(sequence)
mhs_ab = mashingpumpkins.parallel.Sketch.reduce(mhs_a, mhs_b)
assert mhs.nsize == mhs_ab.nsize
assert mhs.maxsize == mhs_ab.maxsize
assert mhs.nvisited == mhs_ab.nvisited
assert len(set(mhs._heapmap) ^ set(mhs_ab._heapmap)) == 0
def test_sketchlist_initializer():
# empty initializer
with pytest.raises(TypeError):
mashingpumpkins.parallel.SketchList.initializer()
nsize = 21
maxsize = 10
hashfun = lambda input,width,hashbuffer: None
seed = 0
clslist = (minhashsketch.MaxSketch, minhashsketch.MinSketch)
# mistmatching lengths
with pytest.raises(ValueError):
mashingpumpkins.parallel.SketchList.initializer(clslist, [])
# automagic length adjustment for argslist
mashingpumpkins.parallel.SketchList.initializer(clslist, [(nsize, maxsize, hashfun, seed)])
hasattr(mashingpumpkins.parallel, 'sketchlist_constructor')
l = tuple(mashingpumpkins.parallel.sketchlist_constructor())
assert len(l) == len(clslist)
for elt, cls in zip(l, clslist):
assert type(elt) is cls
# automagic length adjustment for clslist
argslist = [(nsize, maxsize, hashfun, seed),
(nsize+1, maxsize, hashfun, seed)]
mashingpumpkins.parallel.SketchList.initializer(clslist[:1],
argslist)
hasattr(mashingpumpkins.parallel, 'sketchlist_constructor')
l = tuple(mashingpumpkins.parallel.sketchlist_constructor())
assert len(l) == 2
for elt, (nsize, maxsize, hashfun, seed) in zip(l, argslist):
assert elt.nsize == nsize
assert elt.maxsize == maxsize
assert elt._hashfun is hashfun
assert elt.seed == seed
def test_sketchlist_map_sequence():
nsize = 21
maxsize = 10
hashfun = hasharray
seed = DEFAULT_SEED
clslist = (minhashsketch.MaxSketch, minhashsketch.MinSketch)
mashingpumpkins.parallel.SketchList.initializer(clslist, [(nsize, maxsize, hashfun, seed)])
random.seed(123)
sequence = b''.join(random.choice((b'A',b'T',b'G',b'C')) for x in range(250))
mhslist = mashingpumpkins.parallel.SketchList.map_sequence(sequence)
for mhs in mhslist:
assert mhs.nsize == nsize
assert mhs.maxsize == maxsize
assert mhs.nvisited == len(sequence)-nsize+1
def test_sketchlist_map_sequences():
nsize = 21
maxsize = 10
hashfun = hasharray
seed = DEFAULT_SEED
cls = minhashsketch.MaxSketch
mashingpumpkins.parallel.Sketch.initializer(cls, nsize, maxsize, hashfun, seed)
random.seed(123)
sequence = b''.join(random.choice((b'A',b'T',b'G',b'C')) for x in range(250))
sequences = (sequence[beg:end] for beg, end in chunkpos_iter(nsize, len(sequence), 100))
mhs = mashingpumpkins.parallel.Sketch.map_sequences(sequences)
assert mhs.nsize == nsize
assert mhs.maxsize == maxsize
assert mhs.nvisited == len(sequence)-nsize+1
def test_sketchlist_reduce_sketches():
nsize = 21
maxsize = 10
hashfun = hasharray
seed = DEFAULT_SEED
clslist = (minhashsketch.MaxSketch, minhashsketch.MinSketch)
mashingpumpkins.parallel.SketchList.initializer(clslist, [(nsize, maxsize, hashfun, seed)])
mhslist = tuple(cls(nsize, maxsize, hashfun, seed) for cls in clslist)
mhslist_a = tuple(cls(nsize, maxsize, hashfun, seed) for cls in clslist)
random.seed(123)
sequence = b''.join(random.choice((b'A',b'T',b'G',b'C')) for x in range(250))
for mhs, mhs_a in zip(mhslist,mhslist_a):
mhs.add(sequence)
mhs_a.add(sequence)
mhslist_b = tuple(cls(nsize, maxsize, hashfun, seed) for cls in clslist)
random.seed(123)
sequence = b''.join(random.choice((b'A',b'T',b'G',b'C')) for x in range(250))
for mhs, mhs_b in zip(mhslist, mhslist_b):
mhs.add(sequence)
mhs_b.add(sequence)
mhslist_ab = mashingpumpkins.parallel.SketchList.reduce(mhslist_a, mhslist_b)
for mhs, mhs_ab in zip(mhslist, mhslist_ab):
assert mhs.nsize == mhs_ab.nsize
assert mhs.maxsize == mhs_ab.maxsize
assert mhs.nvisited == mhs_ab.nvisited
assert len(set(mhs._heapmap) ^ set(mhs_ab._heapmap)) == 0
| mit |
Bitl/RBXLegacy-src | Cut/RBXLegacyDiscordBot/lib/youtube_dl/extractor/myvi.py | 36 | 2246 | # coding: utf-8
from __future__ import unicode_literals
import re
from .vimple import SprutoBaseIE
class MyviIE(SprutoBaseIE):
_VALID_URL = r'''(?x)
https?://
myvi\.(?:ru/player|tv)/
(?:
(?:
embed/html|
flash|
api/Video/Get
)/|
content/preloader\.swf\?.*\bid=
)
(?P<id>[\da-zA-Z_-]+)
'''
_TESTS = [{
'url': 'http://myvi.ru/player/embed/html/oOy4euHA6LVwNNAjhD9_Jq5Ha2Qf0rtVMVFMAZav8wObeRTZaCATzucDQIDph8hQU0',
'md5': '571bbdfba9f9ed229dc6d34cc0f335bf',
'info_dict': {
'id': 'f16b2bbd-cde8-481c-a981-7cd48605df43',
'ext': 'mp4',
'title': 'хозяин жизни',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 25,
},
}, {
'url': 'http://myvi.ru/player/content/preloader.swf?id=oOy4euHA6LVwNNAjhD9_Jq5Ha2Qf0rtVMVFMAZav8wOYf1WFpPfc_bWTKGVf_Zafr0',
'only_matching': True,
}, {
'url': 'http://myvi.ru/player/api/Video/Get/oOy4euHA6LVwNNAjhD9_Jq5Ha2Qf0rtVMVFMAZav8wObeRTZaCATzucDQIDph8hQU0',
'only_matching': True,
}, {
'url': 'http://myvi.tv/embed/html/oTGTNWdyz4Zwy_u1nraolwZ1odenTd9WkTnRfIL9y8VOgHYqOHApE575x4_xxS9Vn0?ap=0',
'only_matching': True,
}, {
'url': 'http://myvi.ru/player/flash/ocp2qZrHI-eZnHKQBK4cZV60hslH8LALnk0uBfKsB-Q4WnY26SeGoYPi8HWHxu0O30',
'only_matching': True,
}]
@classmethod
def _extract_url(cls, webpage):
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//myvi\.(?:ru/player|tv)/(?:embed/html|flash)/[^"]+)\1', webpage)
if mobj:
return mobj.group('url')
def _real_extract(self, url):
video_id = self._match_id(url)
spruto = self._download_json(
'http://myvi.ru/player/api/Video/Get/%s?sig' % video_id, video_id)['sprutoData']
return self._extract_spruto(spruto, video_id)
| gpl-3.0 |
pkmital/CADL | session-2/libs/gif.py | 7 | 2381 | """Utility for creating a GIF.
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Copyright Parag K. Mital, June 2016.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def build_gif(imgs, interval=0.1, dpi=72,
save_gif=True, saveto='animation.gif',
show_gif=False, cmap=None):
"""Take an array or list of images and create a GIF.
Parameters
----------
imgs : np.ndarray or list
List of images to create a GIF of
interval : float, optional
Spacing in seconds between successive images.
dpi : int, optional
Dots per inch.
save_gif : bool, optional
Whether or not to save the GIF.
saveto : str, optional
Filename of GIF to save.
show_gif : bool, optional
Whether or not to render the GIF using plt.
cmap : None, optional
Optional colormap to apply to the images.
Returns
-------
ani : matplotlib.animation.ArtistAnimation
The artist animation from matplotlib. Likely not useful.
"""
imgs = np.asarray(imgs)
h, w, *c = imgs[0].shape
fig, ax = plt.subplots(figsize=(np.round(w / dpi), np.round(h / dpi)))
fig.subplots_adjust(bottom=0)
fig.subplots_adjust(top=1)
fig.subplots_adjust(right=1)
fig.subplots_adjust(left=0)
ax.set_axis_off()
if cmap is not None:
axs = list(map(lambda x: [
ax.imshow(x, cmap=cmap)], imgs))
else:
axs = list(map(lambda x: [
ax.imshow(x)], imgs))
ani = animation.ArtistAnimation(
fig, axs, interval=interval*1000, repeat_delay=0, blit=False)
if save_gif:
try:
ani.save(saveto, writer='imagemagick', dpi=dpi)
except:
print('You do not have imagemagick installed.\n\nOn OSX ' +
'you can install this by first installing homebrew: ' +
'http://brew.sh\nThen run: "brew install imagemagick".\n' +
'Windows users can obtain a binary installation here: ' +
'https://www.imagemagick.org/script/binary-releases.php\n' +
'And Linux users should be able to install imagemagick using ' +
'their package manager, e.g.: sudo apt-get install imagemagick.')
if show_gif:
plt.show()
return ani
| apache-2.0 |
petercable/xray | xray/test/test_plot.py | 1 | 32894 | import inspect
import numpy as np
import pandas as pd
from xray import DataArray
import xray.plot as xplt
from xray.plot.plot import _infer_interval_breaks
from xray.plot.utils import (_determine_cmap_params,
_build_discrete_cmap,
_color_palette)
from . import TestCase, requires_matplotlib, incompatible_2_6
try:
import matplotlib as mpl
# Using a different backend makes Travis CI work.
mpl.use('Agg')
# Order of imports is important here.
import matplotlib.pyplot as plt
except ImportError:
pass
def text_in_fig():
'''
Return the set of all text in the figure
'''
alltxt = [t.get_text() for t in plt.gcf().findobj(mpl.text.Text)]
# Set comprehension not compatible with Python 2.6
return set(alltxt)
def substring_in_axes(substring, ax):
'''
Return True if a substring is found anywhere in an axes
'''
alltxt = set([t.get_text() for t in ax.findobj(mpl.text.Text)])
for txt in alltxt:
if substring in txt:
return True
return False
def easy_array(shape, start=0, stop=1):
'''
Make an array with desired shape using np.linspace
shape is a tuple like (2, 3)
'''
a = np.linspace(start, stop, num=np.prod(shape))
return a.reshape(shape)
@requires_matplotlib
class PlotTestCase(TestCase):
def tearDown(self):
# Remove all matplotlib figures
plt.close('all')
def pass_in_axis(self, plotmethod):
fig, axes = plt.subplots(ncols=2)
plotmethod(ax=axes[0])
self.assertTrue(axes[0].has_data())
def imshow_called(self, plotmethod):
plotmethod()
images = plt.gca().findobj(mpl.image.AxesImage)
return len(images) > 0
def contourf_called(self, plotmethod):
plotmethod()
paths = plt.gca().findobj(mpl.collections.PathCollection)
return len(paths) > 0
class TestPlot(PlotTestCase):
def setUp(self):
self.darray = DataArray(easy_array((2, 3, 4)))
def test1d(self):
self.darray[:, 0, 0].plot()
def test_2d_before_squeeze(self):
a = DataArray(easy_array((1, 5)))
a.plot()
def test2d_uniform_calls_imshow(self):
self.assertTrue(self.imshow_called(self.darray[:, :, 0].plot.imshow))
def test2d_nonuniform_calls_contourf(self):
a = self.darray[:, :, 0]
a.coords['dim_1'] = [2, 1, 89]
self.assertTrue(self.contourf_called(a.plot.contourf))
def test3d(self):
self.darray.plot()
def test_can_pass_in_axis(self):
self.pass_in_axis(self.darray.plot)
def test__infer_interval_breaks(self):
self.assertArrayEqual([-0.5, 0.5, 1.5], _infer_interval_breaks([0, 1]))
self.assertArrayEqual([-0.5, 0.5, 5.0, 9.5, 10.5],
_infer_interval_breaks([0, 1, 9, 10]))
self.assertArrayEqual(pd.date_range('20000101', periods=4) - np.timedelta64(12, 'h'),
_infer_interval_breaks(pd.date_range('20000101', periods=3)))
@incompatible_2_6
def test_datetime_dimension(self):
nrow = 3
ncol = 4
time = pd.date_range('2000-01-01', periods=nrow)
a = DataArray(easy_array((nrow, ncol)),
coords=[('time', time), ('y', range(ncol))])
a.plot()
ax = plt.gca()
self.assertTrue(ax.has_data())
def test_convenient_facetgrid(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=['y', 'x', 'z'])
d.coords['z'] = list('abcd')
g = d.plot(x='x', y='y', col='z', col_wrap=2, cmap='cool')
self.assertArrayEqual(g.axes.shape, [2, 2])
for ax in g.axes.flat:
self.assertTrue(ax.has_data())
with self.assertRaisesRegexp(ValueError, '[Ff]acet'):
d.plot(x='x', y='y', col='z', ax=plt.gca())
with self.assertRaisesRegexp(ValueError, '[Ff]acet'):
d[0].plot(x='x', y='y', col='z', ax=plt.gca())
def test_subplot_kws(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=['y', 'x', 'z'])
d.coords['z'] = list('abcd')
g = d.plot(x='x', y='y', col='z', col_wrap=2, cmap='cool',
subplot_kws=dict(axisbg='r'))
for ax in g.axes.flat:
self.assertEqual(ax.get_axis_bgcolor(), 'r')
def test_convenient_facetgrid_4d(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=['y', 'x', 'columns', 'rows'])
g = d.plot(x='x', y='y', col='columns', row='rows')
self.assertArrayEqual(g.axes.shape, [3, 2])
for ax in g.axes.flat:
self.assertTrue(ax.has_data())
with self.assertRaisesRegexp(ValueError, '[Ff]acet'):
d.plot(x='x', y='y', col='columns', ax=plt.gca())
class TestPlot1D(PlotTestCase):
def setUp(self):
d = [0, 1.1, 0, 2]
self.darray = DataArray(d, coords={'period': range(len(d))})
def test_xlabel_is_index_name(self):
self.darray.plot()
self.assertEqual('period', plt.gca().get_xlabel())
def test_no_label_name_on_y_axis(self):
self.darray.plot()
self.assertEqual('', plt.gca().get_ylabel())
def test_ylabel_is_data_name(self):
self.darray.name = 'temperature'
self.darray.plot()
self.assertEqual(self.darray.name, plt.gca().get_ylabel())
def test_wrong_dims_raises_valueerror(self):
twodims = DataArray(easy_array((2, 5)))
with self.assertRaises(ValueError):
twodims.plot.line()
def test_format_string(self):
self.darray.plot.line('ro')
def test_can_pass_in_axis(self):
self.pass_in_axis(self.darray.plot.line)
def test_nonnumeric_index_raises_typeerror(self):
a = DataArray([1, 2, 3], {'letter': ['a', 'b', 'c']})
with self.assertRaisesRegexp(TypeError, r'[Pp]lot'):
a.plot.line()
def test_primitive_returned(self):
p = self.darray.plot.line()
self.assertTrue(isinstance(p[0], mpl.lines.Line2D))
def test_plot_nans(self):
self.darray[1] = np.nan
self.darray.plot.line()
def test_x_ticks_are_rotated_for_time(self):
time = pd.date_range('2000-01-01', '2000-01-10')
a = DataArray(np.arange(len(time)), {'t': time})
a.plot.line()
rotation = plt.gca().get_xticklabels()[0].get_rotation()
self.assertFalse(rotation == 0)
def test_slice_in_title(self):
self.darray.coords['d'] = 10
self.darray.plot.line()
title = plt.gca().get_title()
self.assertEqual('d = 10', title)
class TestPlotHistogram(PlotTestCase):
def setUp(self):
self.darray = DataArray(easy_array((2, 3, 4)))
def test_3d_array(self):
self.darray.plot.hist()
def test_title_no_name(self):
self.darray.plot.hist()
self.assertEqual('', plt.gca().get_title())
def test_title_uses_name(self):
self.darray.name = 'testpoints'
self.darray.plot.hist()
self.assertIn(self.darray.name, plt.gca().get_title())
def test_ylabel_is_count(self):
self.darray.plot.hist()
self.assertEqual('Count', plt.gca().get_ylabel())
def test_can_pass_in_kwargs(self):
nbins = 5
self.darray.plot.hist(bins=nbins)
self.assertEqual(nbins, len(plt.gca().patches))
def test_can_pass_in_axis(self):
self.pass_in_axis(self.darray.plot.hist)
def test_primitive_returned(self):
h = self.darray.plot.hist()
self.assertTrue(isinstance(h[-1][0], mpl.patches.Rectangle))
def test_plot_nans(self):
self.darray[0, 0, 0] = np.nan
self.darray.plot.hist()
@requires_matplotlib
class TestDetermineCmapParams(TestCase):
def setUp(self):
self.data = np.linspace(0, 1, num=100)
def test_robust(self):
cmap_params = _determine_cmap_params(self.data, robust=True)
self.assertEqual(cmap_params['vmin'], np.percentile(self.data, 2))
self.assertEqual(cmap_params['vmax'], np.percentile(self.data, 98))
self.assertEqual(cmap_params['cmap'].name, 'viridis')
self.assertEqual(cmap_params['extend'], 'both')
self.assertIsNone(cmap_params['levels'])
self.assertIsNone(cmap_params['cnorm'])
def test_center(self):
cmap_params = _determine_cmap_params(self.data, center=0.5)
self.assertEqual(cmap_params['vmax'] - 0.5, 0.5 - cmap_params['vmin'])
self.assertEqual(cmap_params['cmap'], 'RdBu_r')
self.assertEqual(cmap_params['extend'], 'neither')
self.assertIsNone(cmap_params['levels'])
self.assertIsNone(cmap_params['cnorm'])
def test_integer_levels(self):
data = self.data + 1
cmap_params = _determine_cmap_params(data, levels=5, vmin=0, vmax=5,
cmap='Blues')
self.assertEqual(cmap_params['vmin'], cmap_params['levels'][0])
self.assertEqual(cmap_params['vmax'], cmap_params['levels'][-1])
self.assertEqual(cmap_params['cmap'].name, 'Blues')
self.assertEqual(cmap_params['extend'], 'neither')
self.assertEqual(cmap_params['cmap'].N, 5)
self.assertEqual(cmap_params['cnorm'].N, 6)
cmap_params = _determine_cmap_params(data, levels=5,
vmin=0.5, vmax=1.5)
self.assertEqual(cmap_params['cmap'].name, 'viridis')
self.assertEqual(cmap_params['extend'], 'max')
def test_list_levels(self):
data = self.data + 1
orig_levels = [0, 1, 2, 3, 4, 5]
# vmin and vmax should be ignored if levels are explicitly provided
cmap_params = _determine_cmap_params(data, levels=orig_levels,
vmin=0, vmax=3)
self.assertEqual(cmap_params['vmin'], 0)
self.assertEqual(cmap_params['vmax'], 5)
self.assertEqual(cmap_params['cmap'].N, 5)
self.assertEqual(cmap_params['cnorm'].N, 6)
for wrap_levels in [list, np.array, pd.Index, DataArray]:
cmap_params = _determine_cmap_params(
data, levels=wrap_levels(orig_levels))
self.assertArrayEqual(cmap_params['levels'], orig_levels)
@requires_matplotlib
class TestDiscreteColorMap(TestCase):
def setUp(self):
x = np.arange(start=0, stop=10, step=2)
y = np.arange(start=9, stop=-7, step=-3)
xy = np.dstack(np.meshgrid(x, y))
distance = np.linalg.norm(xy, axis=2)
self.darray = DataArray(distance, list(zip(('y', 'x'), (y, x))))
self.data_min = distance.min()
self.data_max = distance.max()
def test_recover_from_seaborn_jet_exception(self):
pal = _color_palette('jet', 4)
self.assertTrue(type(pal) == np.ndarray)
self.assertEqual(len(pal), 4)
def test_build_discrete_cmap(self):
for (cmap, levels, extend, filled) in [('jet', [0, 1], 'both', False),
('hot', [-4, 4], 'max', True)]:
ncmap, cnorm = _build_discrete_cmap(cmap, levels, extend, filled)
self.assertEqual(ncmap.N, len(levels) - 1)
self.assertEqual(len(ncmap.colors), len(levels) - 1)
self.assertEqual(cnorm.N, len(levels))
self.assertArrayEqual(cnorm.boundaries, levels)
self.assertEqual(max(levels), cnorm.vmax)
self.assertEqual(min(levels), cnorm.vmin)
if filled:
self.assertEqual(ncmap.colorbar_extend, extend)
else:
self.assertEqual(ncmap.colorbar_extend, 'neither')
def test_discrete_colormap_list_of_levels(self):
for extend, levels in [('max', [-1, 2, 4, 8, 10]),
('both', [2, 5, 10, 11]),
('neither', [0, 5, 10, 15]),
('min', [2, 5, 10, 15])]:
for kind in ['imshow', 'pcolormesh', 'contourf', 'contour']:
primitive = getattr(self.darray.plot, kind)(levels=levels)
self.assertArrayEqual(levels, primitive.norm.boundaries)
self.assertEqual(max(levels), primitive.norm.vmax)
self.assertEqual(min(levels), primitive.norm.vmin)
if kind != 'contour':
self.assertEqual(extend, primitive.cmap.colorbar_extend)
else:
self.assertEqual('neither', primitive.cmap.colorbar_extend)
self.assertEqual(len(levels) - 1, len(primitive.cmap.colors))
def test_discrete_colormap_int_levels(self):
for extend, levels, vmin, vmax in [('neither', 7, None, None),
('neither', 7, None, 20),
('both', 7, 4, 8),
('min', 10, 4, 15)]:
for kind in ['imshow', 'pcolormesh', 'contourf', 'contour']:
primitive = getattr(self.darray.plot, kind)(levels=levels,
vmin=vmin,
vmax=vmax)
self.assertGreaterEqual(levels,
len(primitive.norm.boundaries) - 1)
if vmax is None:
self.assertGreaterEqual(primitive.norm.vmax, self.data_max)
else:
self.assertGreaterEqual(primitive.norm.vmax, vmax)
if vmin is None:
self.assertLessEqual(primitive.norm.vmin, self.data_min)
else:
self.assertLessEqual(primitive.norm.vmin, vmin)
if kind != 'contour':
self.assertEqual(extend, primitive.cmap.colorbar_extend)
else:
self.assertEqual('neither', primitive.cmap.colorbar_extend)
self.assertGreaterEqual(levels, len(primitive.cmap.colors))
def test_discrete_colormap_list_levels_and_vmin_or_vmax(self):
levels = [0, 5, 10, 15]
primitive = self.darray.plot(levels=levels, vmin=-3, vmax=20)
self.assertEqual(primitive.norm.vmax, max(levels))
self.assertEqual(primitive.norm.vmin, min(levels))
class Common2dMixin:
"""
Common tests for 2d plotting go here.
These tests assume that a staticmethod for `self.plotfunc` exists.
Should have the same name as the method.
"""
def setUp(self):
da = DataArray(easy_array(
(10, 15), start=-1), dims=['y', 'x'])
# add 2d coords
ds = da.to_dataset(name='testvar')
x, y = np.meshgrid(da.x.values, da.y.values)
ds['x2d'] = DataArray(x, dims=['y', 'x'])
ds['y2d'] = DataArray(y, dims=['y', 'x'])
ds.set_coords(['x2d', 'y2d'], inplace=True)
# set darray and plot method
self.darray = ds.testvar
self.plotmethod = getattr(self.darray.plot, self.plotfunc.__name__)
def test_label_names(self):
self.plotmethod()
self.assertEqual('x', plt.gca().get_xlabel())
self.assertEqual('y', plt.gca().get_ylabel())
def test_1d_raises_valueerror(self):
with self.assertRaisesRegexp(ValueError, r'DataArray must be 2d'):
self.plotfunc(self.darray[0, :])
def test_3d_raises_valueerror(self):
a = DataArray(easy_array((2, 3, 4)))
with self.assertRaisesRegexp(ValueError, r'DataArray must be 2d'):
self.plotfunc(a)
def test_nonnumeric_index_raises_typeerror(self):
a = DataArray(easy_array((3, 2)),
coords=[['a', 'b', 'c'], ['d', 'e']])
with self.assertRaisesRegexp(TypeError, r'[Pp]lot'):
self.plotfunc(a)
def test_can_pass_in_axis(self):
self.pass_in_axis(self.plotmethod)
def test_xyincrease_false_changes_axes(self):
self.plotmethod(xincrease=False, yincrease=False)
xlim = plt.gca().get_xlim()
ylim = plt.gca().get_ylim()
diffs = xlim[0] - 14, xlim[1] - 0, ylim[0] - 9, ylim[1] - 0
self.assertTrue(all(abs(x) < 1 for x in diffs))
def test_xyincrease_true_changes_axes(self):
self.plotmethod(xincrease=True, yincrease=True)
xlim = plt.gca().get_xlim()
ylim = plt.gca().get_ylim()
diffs = xlim[0] - 0, xlim[1] - 14, ylim[0] - 0, ylim[1] - 9
self.assertTrue(all(abs(x) < 1 for x in diffs))
def test_plot_nans(self):
x1 = self.darray[:5]
x2 = self.darray.copy()
x2[5:] = np.nan
clim1 = self.plotfunc(x1).get_clim()
clim2 = self.plotfunc(x2).get_clim()
self.assertEqual(clim1, clim2)
def test_viridis_cmap(self):
cmap_name = self.plotmethod(cmap='viridis').get_cmap().name
self.assertEqual('viridis', cmap_name)
def test_default_cmap(self):
cmap_name = self.plotmethod().get_cmap().name
self.assertEqual('RdBu_r', cmap_name)
cmap_name = self.plotfunc(abs(self.darray)).get_cmap().name
self.assertEqual('viridis', cmap_name)
def test_seaborn_palette_as_cmap(self):
try:
import seaborn
cmap_name = self.plotmethod(
levels=2, cmap='husl').get_cmap().name
self.assertEqual('husl', cmap_name)
except ImportError:
pass
def test_can_change_default_cmap(self):
cmap_name = self.plotmethod(cmap='Blues').get_cmap().name
self.assertEqual('Blues', cmap_name)
def test_diverging_color_limits(self):
artist = self.plotmethod()
vmin, vmax = artist.get_clim()
self.assertAlmostEqual(-vmin, vmax)
def test_xy_strings(self):
self.plotmethod('y', 'x')
ax = plt.gca()
self.assertEqual('y', ax.get_xlabel())
self.assertEqual('x', ax.get_ylabel())
def test_positional_coord_string(self):
with self.assertRaisesRegexp(ValueError, 'cannot supply only one'):
self.plotmethod('y')
with self.assertRaisesRegexp(ValueError, 'cannot supply only one'):
self.plotmethod(y='x')
def test_bad_x_string_exception(self):
with self.assertRaisesRegexp(ValueError, 'x and y must be coordinate'):
self.plotmethod('not_a_real_dim', 'y')
self.darray.coords['z'] = 100
with self.assertRaisesRegexp(ValueError, 'cannot supply only one'):
self.plotmethod('z')
def test_coord_strings(self):
# 1d coords (same as dims)
self.assertIn('x', self.darray.coords)
self.assertIn('y', self.darray.coords)
self.plotmethod(y='y', x='x')
def test_default_title(self):
a = DataArray(easy_array((4, 3, 2)), dims=['a', 'b', 'c'])
a.coords['d'] = u'foo'
self.plotfunc(a.isel(c=1))
title = plt.gca().get_title()
self.assertEqual('c = 1, d = foo', title)
def test_colorbar_label(self):
self.darray.name = 'testvar'
self.plotmethod()
self.assertIn(self.darray.name, text_in_fig())
def test_no_labels(self):
self.darray.name = 'testvar'
self.plotmethod(add_labels=False)
alltxt = text_in_fig()
for string in ['x', 'y', 'testvar']:
self.assertNotIn(string, alltxt)
def test_verbose_facetgrid(self):
a = easy_array((10, 15, 3))
d = DataArray(a, dims=['y', 'x', 'z'])
g = xplt.FacetGrid(d, col='z')
g.map_dataarray(self.plotfunc, 'x', 'y')
for ax in g.axes.flat:
self.assertTrue(ax.has_data())
@incompatible_2_6
def test_2d_function_and_method_signature_same(self):
func_sig = inspect.getcallargs(self.plotfunc, self.darray)
method_sig = inspect.getcallargs(self.plotmethod)
del method_sig['_PlotMethods_obj']
del func_sig['darray']
self.assertEqual(func_sig, method_sig)
def test_convenient_facetgrid(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=['y', 'x', 'z'])
g = self.plotfunc(d, x='x', y='y', col='z', col_wrap=2)
self.assertArrayEqual(g.axes.shape, [2, 2])
for (y, x), ax in np.ndenumerate(g.axes):
self.assertTrue(ax.has_data())
if x == 0:
self.assertEqual('y', ax.get_ylabel())
else:
self.assertEqual('', ax.get_ylabel())
if y == 1:
self.assertEqual('x', ax.get_xlabel())
else:
self.assertEqual('', ax.get_xlabel())
# Infering labels
g = self.plotfunc(d, col='z', col_wrap=2)
self.assertArrayEqual(g.axes.shape, [2, 2])
for (y, x), ax in np.ndenumerate(g.axes):
self.assertTrue(ax.has_data())
if x == 0:
self.assertEqual('y', ax.get_ylabel())
else:
self.assertEqual('', ax.get_ylabel())
if y == 1:
self.assertEqual('x', ax.get_xlabel())
else:
self.assertEqual('', ax.get_xlabel())
def test_convenient_facetgrid_4d(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=['y', 'x', 'columns', 'rows'])
g = self.plotfunc(d, x='x', y='y', col='columns', row='rows')
self.assertArrayEqual(g.axes.shape, [3, 2])
for ax in g.axes.flat:
self.assertTrue(ax.has_data())
def test_facetgrid_cmap(self):
# Regression test for GH592
data = (np.random.random(size=(20, 25, 12)) + np.linspace(-3, 3, 12))
d = DataArray(data, dims=['x', 'y', 'time'])
fg = d.plot.pcolormesh(col='time')
# check that all color limits are the same
self.assertTrue(len(set(m.get_clim() for m in fg._mappables)) == 1)
# check that all colormaps are the same
self.assertTrue(len(set(m.get_cmap().name for m in fg._mappables)) == 1)
class TestContourf(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.contourf)
def test_contourf_called(self):
# Having both statements ensures the test works properly
self.assertFalse(self.contourf_called(self.darray.plot.imshow))
self.assertTrue(self.contourf_called(self.darray.plot.contourf))
def test_primitive_artist_returned(self):
artist = self.plotmethod()
self.assertTrue(isinstance(artist, mpl.contour.QuadContourSet))
def test_extend(self):
artist = self.plotmethod()
self.assertEqual(artist.extend, 'neither')
self.darray[0, 0] = -100
self.darray[-1, -1] = 100
artist = self.plotmethod(robust=True)
self.assertEqual(artist.extend, 'both')
self.darray[0, 0] = 0
self.darray[-1, -1] = 0
artist = self.plotmethod(vmin=-0, vmax=10)
self.assertEqual(artist.extend, 'min')
artist = self.plotmethod(vmin=-10, vmax=0)
self.assertEqual(artist.extend, 'max')
def test_2d_coord_names(self):
self.plotmethod(x='x2d', y='y2d')
# make sure labels came out ok
ax = plt.gca()
self.assertEqual('x2d', ax.get_xlabel())
self.assertEqual('y2d', ax.get_ylabel())
def test_levels(self):
artist = self.plotmethod(levels=[-0.5, -0.4, 0.1])
self.assertEqual(artist.extend, 'both')
artist = self.plotmethod(levels=3)
self.assertEqual(artist.extend, 'neither')
class TestContour(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.contour)
def test_colors(self):
# matplotlib cmap.colors gives an rgbA ndarray
# when seaborn is used, instead we get an rgb tuble
def _color_as_tuple(c):
return tuple(c[:3])
artist = self.plotmethod(colors='k')
self.assertEqual(
_color_as_tuple(artist.cmap.colors[0]),
(0.0, 0.0, 0.0))
artist = self.plotmethod(colors=['k', 'b'])
self.assertEqual(
_color_as_tuple(artist.cmap.colors[1]),
(0.0, 0.0, 1.0))
def test_cmap_and_color_both(self):
with self.assertRaises(ValueError):
self.plotmethod(colors='k', cmap='RdBu')
def list_of_colors_in_cmap_deprecated(self):
with self.assertRaises(Exception):
self.plotmethod(cmap=['k', 'b'])
def test_2d_coord_names(self):
self.plotmethod(x='x2d', y='y2d')
# make sure labels came out ok
ax = plt.gca()
self.assertEqual('x2d', ax.get_xlabel())
self.assertEqual('y2d', ax.get_ylabel())
class TestPcolormesh(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.pcolormesh)
def test_primitive_artist_returned(self):
artist = self.plotmethod()
self.assertTrue(isinstance(artist, mpl.collections.QuadMesh))
def test_everything_plotted(self):
artist = self.plotmethod()
self.assertEqual(artist.get_array().size, self.darray.size)
def test_2d_coord_names(self):
self.plotmethod(x='x2d', y='y2d')
# make sure labels came out ok
ax = plt.gca()
self.assertEqual('x2d', ax.get_xlabel())
self.assertEqual('y2d', ax.get_ylabel())
class TestImshow(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.imshow)
def test_imshow_called(self):
# Having both statements ensures the test works properly
self.assertFalse(self.imshow_called(self.darray.plot.contourf))
self.assertTrue(self.imshow_called(self.darray.plot.imshow))
def test_xy_pixel_centered(self):
self.darray.plot.imshow(yincrease=False)
self.assertTrue(np.allclose([-0.5, 14.5], plt.gca().get_xlim()))
self.assertTrue(np.allclose([9.5, -0.5], plt.gca().get_ylim()))
def test_default_aspect_is_auto(self):
self.darray.plot.imshow()
self.assertEqual('auto', plt.gca().get_aspect())
def test_can_change_aspect(self):
self.darray.plot.imshow(aspect='equal')
self.assertEqual('equal', plt.gca().get_aspect())
def test_primitive_artist_returned(self):
artist = self.plotmethod()
self.assertTrue(isinstance(artist, mpl.image.AxesImage))
def test_seaborn_palette_needs_levels(self):
try:
import seaborn
with self.assertRaises(ValueError):
self.plotmethod(cmap='husl')
except ImportError:
pass
def test_2d_coord_names(self):
with self.assertRaisesRegexp(ValueError, 'requires 1D coordinates'):
self.plotmethod(x='x2d', y='y2d')
class TestFacetGrid(PlotTestCase):
def setUp(self):
d = easy_array((10, 15, 3))
self.darray = DataArray(d, dims=['y', 'x', 'z'],
coords={'z': ['a', 'b', 'c']})
self.g = xplt.FacetGrid(self.darray, col='z')
def test_no_args(self):
self.g.map_dataarray(xplt.contourf, 'x', 'y')
# Don't want colorbar labeled with 'None'
alltxt = text_in_fig()
self.assertNotIn('None', alltxt)
for ax in self.g.axes.flat:
self.assertTrue(ax.has_data())
# default font size should be small
fontsize = ax.title.get_size()
self.assertLessEqual(fontsize, 12)
def test_names_appear_somewhere(self):
self.darray.name = 'testvar'
self.g.map_dataarray(xplt.contourf, 'x', 'y')
for k, ax in zip('abc', self.g.axes.flat):
self.assertEqual('z = {0}'.format(k), ax.get_title())
alltxt = text_in_fig()
self.assertIn(self.darray.name, alltxt)
for label in ['x', 'y']:
self.assertIn(label, alltxt)
def test_text_not_super_long(self):
self.darray.coords['z'] = [100 * letter for letter in 'abc']
g = xplt.FacetGrid(self.darray, col='z')
g.map_dataarray(xplt.contour, 'x', 'y')
alltxt = text_in_fig()
maxlen = max(len(txt) for txt in alltxt)
self.assertLess(maxlen, 50)
t0 = g.axes[0, 0].get_title()
self.assertTrue(t0.endswith('...'))
def test_colorbar(self):
vmin = self.darray.values.min()
vmax = self.darray.values.max()
expected = np.array((vmin, vmax))
self.g.map_dataarray(xplt.imshow, 'x', 'y')
for image in plt.gcf().findobj(mpl.image.AxesImage):
clim = np.array(image.get_clim())
self.assertTrue(np.allclose(expected, clim))
# There's only one colorbar
cbar = plt.gcf().findobj(mpl.collections.QuadMesh)
self.assertEqual(1, len(cbar))
def test_empty_cell(self):
g = xplt.FacetGrid(self.darray, col='z', col_wrap=2)
g.map_dataarray(xplt.imshow, 'x', 'y')
bottomright = g.axes[-1, -1]
self.assertFalse(bottomright.has_data())
self.assertFalse(bottomright.get_visible())
def test_norow_nocol_error(self):
with self.assertRaisesRegexp(ValueError, r'[Rr]ow'):
xplt.FacetGrid(self.darray)
def test_groups(self):
self.g.map_dataarray(xplt.imshow, 'x', 'y')
upperleft_dict = self.g.name_dicts[0, 0]
upperleft_array = self.darray.loc[upperleft_dict]
z0 = self.darray.isel(z=0)
self.assertDataArrayEqual(upperleft_array, z0)
def test_float_index(self):
self.darray.coords['z'] = [0.1, 0.2, 0.4]
g = xplt.FacetGrid(self.darray, col='z')
g.map_dataarray(xplt.imshow, 'x', 'y')
def test_nonunique_index_error(self):
self.darray.coords['z'] = [0.1, 0.2, 0.2]
with self.assertRaisesRegexp(ValueError, r'[Uu]nique'):
xplt.FacetGrid(self.darray, col='z')
def test_robust(self):
z = np.zeros((20, 20, 2))
darray = DataArray(z, dims=['y', 'x', 'z'])
darray[:, :, 1] = 1
darray[2, 0, 0] = -1000
darray[3, 0, 0] = 1000
g = xplt.FacetGrid(darray, col='z')
g.map_dataarray(xplt.imshow, 'x', 'y', robust=True)
# Color limits should be 0, 1
# The largest number displayed in the figure should be less than 21
numbers = set()
alltxt = text_in_fig()
for txt in alltxt:
try:
numbers.add(float(txt))
except ValueError:
pass
largest = max(abs(x) for x in numbers)
self.assertLess(largest, 21)
def test_can_set_vmin_vmax(self):
vmin, vmax = 50.0, 1000.0
expected = np.array((vmin, vmax))
self.g.map_dataarray(xplt.imshow, 'x', 'y', vmin=vmin, vmax=vmax)
for image in plt.gcf().findobj(mpl.image.AxesImage):
clim = np.array(image.get_clim())
self.assertTrue(np.allclose(expected, clim))
def test_figure_size(self):
self.assertArrayEqual(self.g.fig.get_size_inches(), (10, 3))
g = xplt.FacetGrid(self.darray, col='z', size=6)
self.assertArrayEqual(g.fig.get_size_inches(), (19, 6))
g = self.darray.plot.imshow(col='z', size=6)
self.assertArrayEqual(g.fig.get_size_inches(), (19, 6))
g = xplt.FacetGrid(self.darray, col='z', size=4, aspect=0.5)
self.assertArrayEqual(g.fig.get_size_inches(), (7, 4))
def test_num_ticks(self):
nticks = 100
maxticks = nticks + 1
self.g.map_dataarray(xplt.imshow, 'x', 'y')
self.g.set_ticks(max_xticks=nticks, max_yticks=nticks)
for ax in self.g.axes.flat:
xticks = len(ax.get_xticks())
yticks = len(ax.get_yticks())
self.assertLessEqual(xticks, maxticks)
self.assertLessEqual(yticks, maxticks)
self.assertGreaterEqual(xticks, nticks / 2.0)
self.assertGreaterEqual(yticks, nticks / 2.0)
def test_map(self):
self.g.map(plt.contourf, 'x', 'y', Ellipsis)
self.g.map(lambda: None)
class TestFacetGrid4d(PlotTestCase):
def setUp(self):
a = easy_array((10, 15, 3, 2))
darray = DataArray(a, dims=['y', 'x', 'col', 'row'])
darray.coords['col'] = np.array(['col' + str(x) for x in
darray.coords['col'].values])
darray.coords['row'] = np.array(['row' + str(x) for x in
darray.coords['row'].values])
self.darray = darray
def test_default_labels(self):
g = xplt.FacetGrid(self.darray, col='col', row='row')
self.assertEqual((2, 3), g.axes.shape)
g.map_dataarray(xplt.imshow, 'x', 'y')
# Rightmost column should be labeled
for label, ax in zip(self.darray.coords['row'].values, g.axes[:, -1]):
self.assertTrue(substring_in_axes(label, ax))
# Top row should be labeled
for label, ax in zip(self.darray.coords['col'].values, g.axes[0, :]):
self.assertTrue(substring_in_axes(label, ax))
| apache-2.0 |
unreal666/outwiker | plugins/source/source/pygments/lexers/erlang.py | 6 | 18938 | # -*- coding: utf-8 -*-
"""
pygments.lexers.erlang
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Erlang.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, words, do_insertions, \
include, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['ErlangLexer', 'ErlangShellLexer', 'ElixirConsoleLexer',
'ElixirLexer']
line_re = re.compile('.*?\n')
class ErlangLexer(RegexLexer):
"""
For the Erlang functional programming language.
Blame Jeremy Thurgood (http://jerith.za.net/).
.. versionadded:: 0.9
"""
name = 'Erlang'
aliases = ['erlang']
filenames = ['*.erl', '*.hrl', '*.es', '*.escript']
mimetypes = ['text/x-erlang']
keywords = (
'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if',
'let', 'of', 'query', 'receive', 'try', 'when',
)
builtins = ( # See erlang(3) man page
'abs', 'append_element', 'apply', 'atom_to_list', 'binary_to_list',
'bitstring_to_list', 'binary_to_term', 'bit_size', 'bump_reductions',
'byte_size', 'cancel_timer', 'check_process_code', 'delete_module',
'demonitor', 'disconnect_node', 'display', 'element', 'erase', 'exit',
'float', 'float_to_list', 'fun_info', 'fun_to_list',
'function_exported', 'garbage_collect', 'get', 'get_keys',
'group_leader', 'hash', 'hd', 'integer_to_list', 'iolist_to_binary',
'iolist_size', 'is_atom', 'is_binary', 'is_bitstring', 'is_boolean',
'is_builtin', 'is_float', 'is_function', 'is_integer', 'is_list',
'is_number', 'is_pid', 'is_port', 'is_process_alive', 'is_record',
'is_reference', 'is_tuple', 'length', 'link', 'list_to_atom',
'list_to_binary', 'list_to_bitstring', 'list_to_existing_atom',
'list_to_float', 'list_to_integer', 'list_to_pid', 'list_to_tuple',
'load_module', 'localtime_to_universaltime', 'make_tuple', 'md5',
'md5_final', 'md5_update', 'memory', 'module_loaded', 'monitor',
'monitor_node', 'node', 'nodes', 'open_port', 'phash', 'phash2',
'pid_to_list', 'port_close', 'port_command', 'port_connect',
'port_control', 'port_call', 'port_info', 'port_to_list',
'process_display', 'process_flag', 'process_info', 'purge_module',
'put', 'read_timer', 'ref_to_list', 'register', 'resume_process',
'round', 'send', 'send_after', 'send_nosuspend', 'set_cookie',
'setelement', 'size', 'spawn', 'spawn_link', 'spawn_monitor',
'spawn_opt', 'split_binary', 'start_timer', 'statistics',
'suspend_process', 'system_flag', 'system_info', 'system_monitor',
'system_profile', 'term_to_binary', 'tl', 'trace', 'trace_delivered',
'trace_info', 'trace_pattern', 'trunc', 'tuple_size', 'tuple_to_list',
'universaltime_to_localtime', 'unlink', 'unregister', 'whereis'
)
operators = r'(\+\+?|--?|\*|/|<|>|/=|=:=|=/=|=<|>=|==?|<-|!|\?)'
word_operators = (
'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor',
'div', 'not', 'or', 'orelse', 'rem', 'xor'
)
atom_re = r"(?:[a-z]\w*|'[^\n']*[^\\]')"
variable_re = r'(?:[A-Z_]\w*)'
esc_char_re = r'[bdefnrstv\'"\\]'
esc_octal_re = r'[0-7][0-7]?[0-7]?'
esc_hex_re = r'(?:x[0-9a-fA-F]{2}|x\{[0-9a-fA-F]+\})'
esc_ctrl_re = r'\^[a-zA-Z]'
escape_re = r'(?:\\(?:'+esc_char_re+r'|'+esc_octal_re+r'|'+esc_hex_re+r'|'+esc_ctrl_re+r'))'
macro_re = r'(?:'+variable_re+r'|'+atom_re+r')'
base_re = r'(?:[2-9]|[12][0-9]|3[0-6])'
tokens = {
'root': [
(r'\s+', Text),
(r'%.*\n', Comment),
(words(keywords, suffix=r'\b'), Keyword),
(words(builtins, suffix=r'\b'), Name.Builtin),
(words(word_operators, suffix=r'\b'), Operator.Word),
(r'^-', Punctuation, 'directive'),
(operators, Operator),
(r'"', String, 'string'),
(r'<<', Name.Label),
(r'>>', Name.Label),
('(' + atom_re + ')(:)', bygroups(Name.Namespace, Punctuation)),
('(?:^|(?<=:))(' + atom_re + r')(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[+-]?' + base_re + r'#[0-9a-zA-Z]+', Number.Integer),
(r'[+-]?\d+', Number.Integer),
(r'[+-]?\d+.\d+', Number.Float),
(r'[]\[:_@\".{}()|;,]', Punctuation),
(variable_re, Name.Variable),
(atom_re, Name),
(r'\?'+macro_re, Name.Constant),
(r'\$(?:'+escape_re+r'|\\[ %]|[^\\])', String.Char),
(r'#'+atom_re+r'(:?\.'+atom_re+r')?', Name.Label),
# Erlang script shebang
(r'\A#!.+\n', Comment.Hashbang),
# EEP 43: Maps
# http://www.erlang.org/eeps/eep-0043.html
(r'#\{', Punctuation, 'map_key'),
],
'string': [
(escape_re, String.Escape),
(r'"', String, '#pop'),
(r'~[0-9.*]*[~#+BPWXb-ginpswx]', String.Interpol),
(r'[^"\\~]+', String),
(r'~', String),
],
'directive': [
(r'(define)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Constant), '#pop'),
(r'(record)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Label), '#pop'),
(atom_re, Name.Entity, '#pop'),
],
'map_key': [
include('root'),
(r'=>', Punctuation, 'map_val'),
(r':=', Punctuation, 'map_val'),
(r'\}', Punctuation, '#pop'),
],
'map_val': [
include('root'),
(r',', Punctuation, '#pop'),
(r'(?=\})', Punctuation, '#pop'),
],
}
class ErlangShellLexer(Lexer):
"""
Shell sessions in erl (for Erlang code).
.. versionadded:: 1.1
"""
name = 'Erlang erl session'
aliases = ['erl']
filenames = ['*.erl-sh']
mimetypes = ['text/x-erl-shellsession']
_prompt_re = re.compile(r'\d+>(?=\s|\Z)')
def get_tokens_unprocessed(self, text):
erlexer = ErlangLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('*'):
yield match.start(), Generic.Traceback, line
else:
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
def gen_elixir_string_rules(name, symbol, token):
states = {}
states['string_' + name] = [
(r'[^#%s\\]+' % (symbol,), token),
include('escapes'),
(r'\\.', token),
(r'(%s)' % (symbol,), bygroups(token), "#pop"),
include('interpol')
]
return states
def gen_elixir_sigstr_rules(term, token, interpol=True):
if interpol:
return [
(r'[^#%s\\]+' % (term,), token),
include('escapes'),
(r'\\.', token),
(r'%s[a-zA-Z]*' % (term,), token, '#pop'),
include('interpol')
]
else:
return [
(r'[^%s\\]+' % (term,), token),
(r'\\.', token),
(r'%s[a-zA-Z]*' % (term,), token, '#pop'),
]
class ElixirLexer(RegexLexer):
"""
For the `Elixir language <http://elixir-lang.org>`_.
.. versionadded:: 1.5
"""
name = 'Elixir'
aliases = ['elixir', 'ex', 'exs']
filenames = ['*.ex', '*.exs']
mimetypes = ['text/x-elixir']
KEYWORD = ('fn', 'do', 'end', 'after', 'else', 'rescue', 'catch')
KEYWORD_OPERATOR = ('not', 'and', 'or', 'when', 'in')
BUILTIN = (
'case', 'cond', 'for', 'if', 'unless', 'try', 'receive', 'raise',
'quote', 'unquote', 'unquote_splicing', 'throw', 'super',
)
BUILTIN_DECLARATION = (
'def', 'defp', 'defmodule', 'defprotocol', 'defmacro', 'defmacrop',
'defdelegate', 'defexception', 'defstruct', 'defimpl', 'defcallback',
)
BUILTIN_NAMESPACE = ('import', 'require', 'use', 'alias')
CONSTANT = ('nil', 'true', 'false')
PSEUDO_VAR = ('_', '__MODULE__', '__DIR__', '__ENV__', '__CALLER__')
OPERATORS3 = (
'<<<', '>>>', '|||', '&&&', '^^^', '~~~', '===', '!==',
'~>>', '<~>', '|~>', '<|>',
)
OPERATORS2 = (
'==', '!=', '<=', '>=', '&&', '||', '<>', '++', '--', '|>', '=~',
'->', '<-', '|', '.', '=', '~>', '<~',
)
OPERATORS1 = ('<', '>', '+', '-', '*', '/', '!', '^', '&')
PUNCTUATION = (
'\\\\', '<<', '>>', '=>', '(', ')', ':', ';', ',', '[', ']',
)
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self.KEYWORD:
yield index, Keyword, value
elif value in self.KEYWORD_OPERATOR:
yield index, Operator.Word, value
elif value in self.BUILTIN:
yield index, Keyword, value
elif value in self.BUILTIN_DECLARATION:
yield index, Keyword.Declaration, value
elif value in self.BUILTIN_NAMESPACE:
yield index, Keyword.Namespace, value
elif value in self.CONSTANT:
yield index, Name.Constant, value
elif value in self.PSEUDO_VAR:
yield index, Name.Builtin.Pseudo, value
else:
yield index, token, value
else:
yield index, token, value
def gen_elixir_sigil_rules():
# all valid sigil terminators (excluding heredocs)
terminators = [
(r'\{', r'\}', 'cb'),
(r'\[', r'\]', 'sb'),
(r'\(', r'\)', 'pa'),
(r'<', r'>', 'ab'),
(r'/', r'/', 'slas'),
(r'\|', r'\|', 'pipe'),
('"', '"', 'quot'),
("'", "'", 'apos'),
]
# heredocs have slightly different rules
triquotes = [(r'"""', 'triquot'), (r"'''", 'triapos')]
token = String.Other
states = {'sigils': []}
for term, name in triquotes:
states['sigils'] += [
(r'(~[a-z])(%s)' % (term,), bygroups(token, String.Heredoc),
(name + '-end', name + '-intp')),
(r'(~[A-Z])(%s)' % (term,), bygroups(token, String.Heredoc),
(name + '-end', name + '-no-intp')),
]
states[name + '-end'] = [
(r'[a-zA-Z]+', token, '#pop'),
default('#pop'),
]
states[name + '-intp'] = [
(r'^\s*' + term, String.Heredoc, '#pop'),
include('heredoc_interpol'),
]
states[name + '-no-intp'] = [
(r'^\s*' + term, String.Heredoc, '#pop'),
include('heredoc_no_interpol'),
]
for lterm, rterm, name in terminators:
states['sigils'] += [
(r'~[a-z]' + lterm, token, name + '-intp'),
(r'~[A-Z]' + lterm, token, name + '-no-intp'),
]
states[name + '-intp'] = gen_elixir_sigstr_rules(rterm, token)
states[name + '-no-intp'] = \
gen_elixir_sigstr_rules(rterm, token, interpol=False)
return states
op3_re = "|".join(re.escape(s) for s in OPERATORS3)
op2_re = "|".join(re.escape(s) for s in OPERATORS2)
op1_re = "|".join(re.escape(s) for s in OPERATORS1)
ops_re = r'(?:%s|%s|%s)' % (op3_re, op2_re, op1_re)
punctuation_re = "|".join(re.escape(s) for s in PUNCTUATION)
alnum = r'\w'
name_re = r'(?:\.\.\.|[a-z_]%s*[!?]?)' % alnum
modname_re = r'[A-Z]%(alnum)s*(?:\.[A-Z]%(alnum)s*)*' % {'alnum': alnum}
complex_name_re = r'(?:%s|%s|%s)' % (name_re, modname_re, ops_re)
special_atom_re = r'(?:\.\.\.|<<>>|%\{\}|%|\{\})'
long_hex_char_re = r'(\\x\{)([\da-fA-F]+)(\})'
hex_char_re = r'(\\x[\da-fA-F]{1,2})'
escape_char_re = r'(\\[abdefnrstv])'
tokens = {
'root': [
(r'\s+', Text),
(r'#.*$', Comment.Single),
# Various kinds of characters
(r'(\?)' + long_hex_char_re,
bygroups(String.Char,
String.Escape, Number.Hex, String.Escape)),
(r'(\?)' + hex_char_re,
bygroups(String.Char, String.Escape)),
(r'(\?)' + escape_char_re,
bygroups(String.Char, String.Escape)),
(r'\?\\?.', String.Char),
# '::' has to go before atoms
(r':::', String.Symbol),
(r'::', Operator),
# atoms
(r':' + special_atom_re, String.Symbol),
(r':' + complex_name_re, String.Symbol),
(r':"', String.Symbol, 'string_double_atom'),
(r":'", String.Symbol, 'string_single_atom'),
# [keywords: ...]
(r'(%s|%s)(:)(?=\s|\n)' % (special_atom_re, complex_name_re),
bygroups(String.Symbol, Punctuation)),
# @attributes
(r'@' + name_re, Name.Attribute),
# identifiers
(name_re, Name),
(r'(%%?)(%s)' % (modname_re,), bygroups(Punctuation, Name.Class)),
# operators and punctuation
(op3_re, Operator),
(op2_re, Operator),
(punctuation_re, Punctuation),
(r'&\d', Name.Entity), # anon func arguments
(op1_re, Operator),
# numbers
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[\da-fA-F]+', Number.Hex),
(r'\d(_?\d)*\.\d(_?\d)*([eE][-+]?\d(_?\d)*)?', Number.Float),
(r'\d(_?\d)*', Number.Integer),
# strings and heredocs
(r'"""\s*', String.Heredoc, 'heredoc_double'),
(r"'''\s*$", String.Heredoc, 'heredoc_single'),
(r'"', String.Double, 'string_double'),
(r"'", String.Single, 'string_single'),
include('sigils'),
(r'%\{', Punctuation, 'map_key'),
(r'\{', Punctuation, 'tuple'),
],
'heredoc_double': [
(r'^\s*"""', String.Heredoc, '#pop'),
include('heredoc_interpol'),
],
'heredoc_single': [
(r"^\s*'''", String.Heredoc, '#pop'),
include('heredoc_interpol'),
],
'heredoc_interpol': [
(r'[^#\\\n]+', String.Heredoc),
include('escapes'),
(r'\\.', String.Heredoc),
(r'\n+', String.Heredoc),
include('interpol'),
],
'heredoc_no_interpol': [
(r'[^\\\n]+', String.Heredoc),
(r'\\.', String.Heredoc),
(r'\n+', String.Heredoc),
],
'escapes': [
(long_hex_char_re,
bygroups(String.Escape, Number.Hex, String.Escape)),
(hex_char_re, String.Escape),
(escape_char_re, String.Escape),
],
'interpol': [
(r'#\{', String.Interpol, 'interpol_string'),
],
'interpol_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'map_key': [
include('root'),
(r':', Punctuation, 'map_val'),
(r'=>', Punctuation, 'map_val'),
(r'\}', Punctuation, '#pop'),
],
'map_val': [
include('root'),
(r',', Punctuation, '#pop'),
(r'(?=\})', Punctuation, '#pop'),
],
'tuple': [
include('root'),
(r'\}', Punctuation, '#pop'),
],
}
tokens.update(gen_elixir_string_rules('double', '"', String.Double))
tokens.update(gen_elixir_string_rules('single', "'", String.Single))
tokens.update(gen_elixir_string_rules('double_atom', '"', String.Symbol))
tokens.update(gen_elixir_string_rules('single_atom', "'", String.Symbol))
tokens.update(gen_elixir_sigil_rules())
class ElixirConsoleLexer(Lexer):
"""
For Elixir interactive console (iex) output like:
.. sourcecode:: iex
iex> [head | tail] = [1,2,3]
[1,2,3]
iex> head
1
iex> tail
[2,3]
iex> [head | tail]
[1,2,3]
iex> length [head | tail]
3
.. versionadded:: 1.5
"""
name = 'Elixir iex session'
aliases = ['iex']
mimetypes = ['text/x-elixir-shellsession']
_prompt_re = re.compile(r'(iex|\.{3})(\(\d+\))?> ')
def get_tokens_unprocessed(self, text):
exlexer = ElixirLexer(**self.options)
curcode = ''
in_error = False
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith(u'** '):
in_error = True
insertions.append((len(curcode),
[(0, Generic.Error, line[:-1])]))
curcode += line[-1:]
else:
m = self._prompt_re.match(line)
if m is not None:
in_error = False
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(
insertions, exlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
token = Generic.Error if in_error else Generic.Output
yield match.start(), token, line
if curcode:
for item in do_insertions(
insertions, exlexer.get_tokens_unprocessed(curcode)):
yield item
| gpl-3.0 |
chafique-delli/OpenUpgrade | addons/l10n_be/__init__.py | 430 | 1060 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
google-code-export/pychess | testing/bitboard.py | 21 | 1674 | import unittest
import random
import operator
from functools import reduce
from pychess.Utils.lutils.bitboard import *
class BitboardTestCase(unittest.TestCase):
def setUp (self):
self.positionSets = []
# Random positions. Ten of each length. Will also include range(64) and
# range(0)
for i in range(10):
for length in range(64):
if length:
positions = random.sample(range(64), length)
board = reduce(operator.or_, (1<<(63-i) for i in positions))
self.positionSets.append( (positions, board) )
else:
self.positionSets.append( ([], 0) )
def test1(self):
"""Testing setbit and clearbit"""
for positions,board in self.positionSets:
b = 0
for pos in positions:
b = setBit(b, pos)
self.assertEqual(b, board)
for pos in positions:
b = clearBit(b, pos)
self.assertEqual(b, 0)
def test2(self):
"""Testing firstbit and lastbit"""
for positions,board in self.positionSets:
if positions:
positions.sort()
self.assertEqual(positions[0], firstBit(board))
self.assertEqual(positions[-1], lastBit(board))
def test3(self):
"""Testing iterbits"""
for positions,board in self.positionSets:
positions.sort()
itered = sorted(iterBits(board))
self.assertEqual(positions, itered)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
jhawkesworth/ansible | lib/ansible/modules/network/f5/bigip_device_auth_ldap.py | 38 | 26870 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_device_auth_ldap
short_description: Manage LDAP device authentication settings on BIG-IP
description:
- Manage LDAP device authentication settings on BIG-IP.
version_added: 2.8
options:
servers:
description:
- Specifies the LDAP servers that the system must use to obtain
authentication information. You must specify a server when you
create an LDAP configuration object.
type: list
port:
description:
- Specifies the port that the system uses for access to the remote host server.
- When configuring LDAP device authentication for the first time, if this parameter
is not specified, the default port is C(389).
type: int
remote_directory_tree:
description:
- Specifies the file location (tree) of the user authentication database on the
server.
type: str
scope:
description:
- Specifies the level of the remote Active Directory or LDAP directory that the
system should search for the user authentication.
type: str
choices:
- sub
- one
- base
bind_dn:
description:
- Specifies the distinguished name for the Active Directory or LDAP server user
ID.
- The BIG-IP client authentication module does not support Active Directory or
LDAP servers that do not perform bind referral when authenticating referred
accounts.
- Therefore, if you plan to use Active Directory or LDAP as your authentication
source and want to use referred accounts, make sure your servers perform bind
referral.
type: str
bind_password:
description:
- Specifies a password for the Active Directory or LDAP server user ID.
type: str
user_template:
description:
- Specifies the distinguished name of the user who is logging on.
- You specify the template as a variable that the system replaces with user-specific
information during the logon attempt.
- For example, you could specify a user template such as C(%[email protected]) or
C(uxml:id=%s,ou=people,dc=siterequest,dc=com).
- When a user attempts to log on, the system replaces C(%s) with the name the user
specified in the Basic Authentication dialog box, and passes that as the
distinguished name for the bind operation.
- The system passes the associated password as the password for the bind operation.
- This field can contain only one C(%s) and cannot contain any other format
specifiers.
type: str
check_member_attr:
description:
- Checks the user's member attribute in the remote LDAP or AD group.
type: bool
ssl:
description:
- Specifies whether the system uses an SSL port to communicate with the LDAP server.
type: str
choices:
- "yes"
- "no"
- start-tls
ca_cert:
description:
- Specifies the name of an SSL certificate from a certificate authority (CA).
- To remove this value, use the reserved value C(none).
type: str
aliases: [ ssl_ca_cert ]
client_key:
description:
- Specifies the name of an SSL client key.
- To remove this value, use the reserved value C(none).
type: str
aliases: [ ssl_client_key ]
client_cert:
description:
- Specifies the name of an SSL client certificate.
- To remove this value, use the reserved value C(none).
type: str
aliases: [ ssl_client_cert ]
validate_certs:
description:
- Specifies whether the system checks an SSL peer, as a result of which the
system requires and verifies the server certificate.
type: bool
aliases: [ ssl_check_peer ]
login_ldap_attr:
description:
- Specifies the LDAP directory attribute containing the local user name that is
associated with the selected directory entry.
- When configuring LDAP device authentication for the first time, if this parameter
is not specified, the default port is C(samaccountname).
type: str
fallback_to_local:
description:
- Specifies that the system uses the Local authentication method if the remote
authentication method is not available.
type: bool
state:
description:
- When C(present), ensures the device authentication method exists.
- When C(absent), ensures the device authentication method does not exist.
type: str
choices:
- present
- absent
default: present
update_password:
description:
- C(always) will always update the C(bind_password).
- C(on_create) will only set the C(bind_password) for newly created authentication
mechanisms.
type: str
choices:
- always
- on_create
default: always
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create an LDAP authentication object
bigip_device_auth_ldap:
name: foo
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
servers:
description: LDAP servers used by the system to obtain authentication information.
returned: changed
type: list
sample: ['192.168.1.1', '192.168.1.2']
port:
description: The port that the system uses for access to the remote LDAP server.
returned: changed
type: int
sample: 389
remote_directory_tree:
description: File location (tree) of the user authentication database on the server.
returned: changed
type: str
sample: "CN=Users,DC=FOOBAR,DC=LOCAL"
scope:
description: The level of the remote Active Directory or LDAP directory searched for user authentication.
returned: changed
type: str
sample: base
bind_dn:
description: The distinguished name for the Active Directory or LDAP server user ID.
returned: changed
type: str
sample: "[email protected]"
user_template:
description: The distinguished name of the user who is logging on.
returned: changed
type: str
sample: "uid=%s,ou=people,dc=foobar,dc=local"
check_member_attr:
description: The user's member attribute in the remote LDAP or AD group.
returned: changed
type: bool
sample: yes
ssl:
description: Specifies whether the system uses an SSL port to communicate with the LDAP server.
returned: changed
type: str
sample: start-tls
ca_cert:
description: The name of an SSL certificate from a certificate authority.
returned: changed
type: str
sample: My-Trusted-CA-Bundle.crt
client_key:
description: The name of an SSL client key.
returned: changed
type: str
sample: MyKey.key
client_cert:
description: The name of an SSL client certificate.
returned: changed
type: str
sample: MyCert.crt
validate_certs:
description: Indicates if the system checks an SSL peer.
returned: changed
type: bool
sample: yes
login_ldap_attr:
description: The LDAP directory attribute containing the local user name associated with the selected directory entry.
returned: changed
type: str
sample: samaccountname
fallback_to_local:
description: Specifies that the system uses the Local authentication method as fallback
returned: changed
type: bool
sample: yes
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.compare import cmp_str_with_none
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.compare import cmp_str_with_none
class Parameters(AnsibleF5Parameters):
api_map = {
'bindDn': 'bind_dn',
'bindPw': 'bind_password',
'userTemplate': 'user_template',
'fallback': 'fallback_to_local',
'loginAttribute': 'login_ldap_attr',
'sslCheckPeer': 'validate_certs',
'sslClientCert': 'client_cert',
'sslClientKey': 'client_key',
'sslCaCertFile': 'ca_cert',
'checkRolesGroup': 'check_member_attr',
'searchBaseDn': 'remote_directory_tree',
}
api_attributes = [
'bindDn',
'bindPw',
'checkRolesGroup',
'loginAttribute',
'port',
'scope',
'searchBaseDn',
'servers',
'ssl',
'sslCaCertFile',
'sslCheckPeer',
'sslClientCert',
'sslClientKey',
'userTemplate',
]
returnables = [
'bind_dn',
'bind_password',
'check_member_attr',
'fallback_to_local',
'login_ldap_attr',
'port',
'remote_directory_tree',
'scope',
'servers',
'ssl',
'ca_cert',
'validate_certs',
'client_cert',
'client_key',
'user_template',
]
updatables = [
'bind_dn',
'bind_password',
'check_member_attr',
'fallback_to_local',
'login_ldap_attr',
'port',
'remote_directory_tree',
'scope',
'servers',
'ssl',
'ssl_ca_cert',
'ssl_check_peer',
'ssl_client_cert',
'ssl_client_key',
'user_template',
]
@property
def ssl_ca_cert(self):
if self._values['ssl_ca_cert'] is None:
return None
elif self._values['ssl_ca_cert'] in ['none', '']:
return ''
return fq_name(self.partition, self._values['ssl_ca_cert'])
@property
def ssl_client_key(self):
if self._values['ssl_client_key'] is None:
return None
elif self._values['ssl_client_key'] in ['none', '']:
return ''
return fq_name(self.partition, self._values['ssl_client_key'])
@property
def ssl_client_cert(self):
if self._values['ssl_client_cert'] is None:
return None
elif self._values['ssl_client_cert'] in ['none', '']:
return ''
return fq_name(self.partition, self._values['ssl_client_cert'])
@property
def ssl_check_peer(self):
return flatten_boolean(self._values['ssl_check_peer'])
@property
def fallback_to_local(self):
return flatten_boolean(self._values['fallback_to_local'])
@property
def check_member_attr(self):
return flatten_boolean(self._values['check_member_attr'])
@property
def login_ldap_attr(self):
if self._values['login_ldap_attr'] is None:
return None
elif self._values['login_ldap_attr'] in ['none', '']:
return ''
return self._values['login_ldap_attr']
@property
def user_template(self):
if self._values['user_template'] is None:
return None
elif self._values['user_template'] in ['none', '']:
return ''
return self._values['user_template']
@property
def ssl(self):
if self._values['ssl'] is None:
return None
elif self._values['ssl'] == 'start-tls':
return 'start-tls'
return flatten_boolean(self._values['ssl'])
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def ssl_check_peer(self):
if self._values['ssl_check_peer'] is None:
return None
elif self._values['ssl_check_peer'] == 'yes':
return 'enabled'
return 'disabled'
@property
def fallback_to_local(self):
if self._values['fallback_to_local'] is None:
return None
elif self._values['fallback_to_local'] == 'yes':
return 'true'
return 'false'
@property
def check_member_attr(self):
if self._values['check_member_attr'] is None:
return None
elif self._values['check_member_attr'] == 'yes':
return 'enabled'
return 'disabled'
@property
def ssl(self):
if self._values['ssl'] is None:
return None
elif self._values['ssl'] == 'start-tls':
return 'start-tls'
elif self._values['ssl'] == 'yes':
return 'enabled'
return 'disabled'
class ReportableChanges(Changes):
@property
def bind_password(self):
return None
@property
def ssl_check_peer(self):
return flatten_boolean(self._values['ssl_check_peer'])
@property
def check_member_attr(self):
return flatten_boolean(self._values['check_member_attr'])
@property
def ssl(self):
if self._values['ssl'] is None:
return None
elif self._values['ssl'] == 'start-tls':
return 'start-tls'
return flatten_boolean(self._values['ssl'])
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def login_ldap_attr(self):
return cmp_str_with_none(self.want.login_ldap_attr, self.have.login_ldap_attr)
@property
def user_template(self):
return cmp_str_with_none(self.want.user_template, self.have.user_template)
@property
def ssl_ca_cert(self):
return cmp_str_with_none(self.want.ssl_ca_cert, self.have.ssl_ca_cert)
@property
def ssl_client_key(self):
return cmp_str_with_none(self.want.ssl_client_key, self.have.ssl_client_key)
@property
def ssl_client_cert(self):
return cmp_str_with_none(self.want.ssl_client_cert, self.have.ssl_client_cert)
@property
def bind_password(self):
if self.want.bind_password != self.have.bind_password and self.want.update_password == 'always':
return self.want.bind_password
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def update_auth_source_on_device(self, source):
"""Set the system auth source.
Configuring the authentication source is only one step in the process of setting
up an auth source. The other step is to inform the system of the auth source
you want to use.
This method is used for situations where
* The ``use_for_auth`` parameter is set to ``yes``
* The ``use_for_auth`` parameter is set to ``no``
* The ``state`` parameter is set to ``absent``
When ``state`` equal to ``absent``, before you can delete the TACACS+ configuration,
you must set the system auth to "something else". The system ships with a system
auth called "local", so this is the logical "something else" to use.
When ``use_for_auth`` is no, the same situation applies as when ``state`` equal
to ``absent`` is done above.
When ``use_for_auth`` is ``yes``, this method will set the current system auth
state to TACACS+.
Arguments:
source (string): The source that you want to set on the device.
"""
params = dict(
type=source
)
uri = 'https://{0}:{1}/mgmt/tm/auth/source/'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_fallback_on_device(self, fallback):
params = dict(
fallback=fallback
)
uri = 'https://{0}:{1}/mgmt/tm/auth/source/'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
if self.want.fallback_to_local == 'yes':
self.update_fallback_on_device('true')
elif self.want.fallback_to_local == 'no':
self.update_fallback_on_device('false')
return True
def remove(self):
if self.module.check_mode:
return True
self.update_auth_source_on_device('local')
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
if self.want.fallback_to_local == 'yes':
self.update_fallback_on_device('true')
elif self.want.fallback_to_local == 'no':
self.update_fallback_on_device('false')
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/auth/ldap/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name('Common', 'system-auth')
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = 'system-auth'
params['partition'] = 'Common'
uri = "https://{0}:{1}/mgmt/tm/auth/ldap/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 409]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def update_on_device(self):
params = self.changes.api_params()
if not params:
return
uri = "https://{0}:{1}/mgmt/tm/auth/ldap/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name('Common', 'system-auth')
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/auth/ldap/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name('Common', 'system-auth')
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/auth/ldap/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name('Common', 'system-auth')
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = ApiParameters(params=response)
uri = 'https://{0}:{1}/mgmt/tm/auth/source/'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result.update({'fallback': response['fallback']})
return result
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
servers=dict(type='list'),
port=dict(type='int'),
remote_directory_tree=dict(),
scope=dict(
choices=['sub', 'one', 'base']
),
bind_dn=dict(),
bind_password=dict(no_log=True),
user_template=dict(),
check_member_attr=dict(type='bool'),
ssl=dict(
choices=['yes', 'no', 'start-tls']
),
ca_cert=dict(aliases=['ssl_ca_cert']),
client_key=dict(aliases=['ssl_client_key']),
client_cert=dict(aliases=['ssl_client_cert']),
validate_certs=dict(type='bool', aliases=['ssl_check_peer']),
login_ldap_attr=dict(),
fallback_to_local=dict(type='bool'),
update_password=dict(
default='always',
choices=['always', 'on_create']
),
state=dict(default='present', choices=['absent', 'present']),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
alissonperez/django-onmydesk | onmydesk/tests/test_commands.py | 1 | 3564 | from datetime import date
from django.test import TestCase
from io import StringIO
from unittest import mock
from django.core import management
from onmydesk.models import Report, Scheduler
class SchedulerProcesssTestCase(TestCase):
def setUp(self):
# Cleaning up reports
Report.objects.all().delete()
def my_output_file_handler(filepath):
return filepath
self._patch('onmydesk.models.output_file_handler', my_output_file_handler)
self._mock_report_import_function()
def _mock_report_import_function(self):
self.report_class = mock.MagicMock()
self.report_class.name = 'My Report'
self._patch('onmydesk.models.my_import', return_value=self.report_class)
def _patch(self, *args, **kwargs):
patcher = mock.patch(*args, **kwargs)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def test_call_must_not_out_errors(self):
scheduler = Scheduler(report='my_report_class',
periodicity=Scheduler.PER_MON_SUN)
scheduler.save()
errout = StringIO()
management.call_command('scheduler_process', stderr=errout)
self.assertEqual(len(errout.getvalue()), 0, errout.getvalue())
def test_call_must_call_process_from_scheduler(self):
with mock.patch('onmydesk.models.Scheduler.process') as process_mocked:
scheduler = Scheduler(report='my_report_class',
periodicity=Scheduler.PER_MON_SUN)
scheduler.save()
management.call_command('scheduler_process')
self.assertTrue(process_mocked.called)
def test_call_must_have_first_and_last_message_correct(self):
scheduler = Scheduler(report='my_report_class',
periodicity=Scheduler.PER_MON_SUN)
scheduler.save()
out = StringIO()
management.call_command('scheduler_process', stdout=out)
first_line, *_, last_line, blank_line = out.getvalue().split('\n')
first_message = 'Starting scheduler process'
last_message = 'Scheduler #{} processed'.format(scheduler.id)
self.assertEqual(first_line, first_message)
self.assertEqual(last_line, last_message)
def test_call_must_create_correct_report(self):
scheduler = Scheduler(report='my_report_class',
periodicity=Scheduler.PER_MON_SUN)
scheduler.save()
self.assertEqual(Report.objects.all().count(), 0)
out = StringIO()
management.call_command('scheduler_process', stdout=out)
self.assertEqual(Report.objects.all().count(), 1)
repo = Report.objects.all().first()
self.assertEqual(repo.report, 'my_report_class')
def test_call_must_not_process_schedulers_from_other_day(self):
# Forcing a date on sunday
sunday_date = date(2016, 5, 15)
date_mocked = mock.MagicMock()
date_mocked.today.return_value = sunday_date
with mock.patch('onmydesk.management.commands.scheduler_process.date',
return_value=date_mocked):
# Creating a report scheduled to monday
scheduler = Scheduler(report='my_report_class',
periodicity=Scheduler.PER_MON)
scheduler.save()
self.assertEqual(Report.objects.all().count(), 0)
management.call_command('scheduler_process')
self.assertEqual(Report.objects.all().count(), 0)
| mit |
EgideCorp/Cover | db.py | 1 | 1710 | import logging
import aiomysql
DB = "follow_the_white_rabbit"
class Db:
def __init__(self, connection):
self.conn = connection
self.logger = logging.getLogger(self.__module__)
@classmethod
async def create_db(cls, loop, user, password, host="127.0.0.1", port=3306):
conn = await aiomysql.connect(host=host, port=port, user=user, password=password, db=DB, loop=loop)
cur = await conn.cursor()
await cur.execute("SHOW TABLES LIKE 'pastes'")
r = await cur.fetchone()
if r is None:
logging.getLogger("Db").info("Tables not found.Creating tables...")
await cur.execute("CREATE TABLE pastes (id INT NOT NULL AUTO_INCREMENT, paste_id VARCHAR(255), expiration VARCHAR(255), vector TEXT, cipher TEXT, PRIMARY KEY (id));")
await conn.commit()
logging.getLogger("Db").info("Done.")
else:
logging.getLogger("Db").info("Tables found.")
await cur.close()
return cls(conn)
async def insert_paste(self, paste_id, paste_data):
cur = await self.conn.cursor()
query = "INSERT INTO pastes(paste_id,expiration,vector,cipher) VALUES('{}','{}','{}','{}');".format(
paste_id,
paste_data['expiration'],
paste_data['vector'],
paste_data['cipher'])
await cur.execute(query)
await self.conn.commit()
await cur.close()
async def retrieve_paste(self, paste_id):
cur = await self.conn.cursor(aiomysql.DictCursor)
await cur.execute("SELECT * FROM pastes WHERE paste_id=%s;", paste_id)
result = await cur.fetchone()
await cur.close()
return result
| gpl-3.0 |
slayher/android_kernel_samsung_zerofltetmo | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
Cenditel/cenditel.comunidades.cynin | src/ubify.cyninv2theme/ubify/cyninv2theme/portlets/spacemembersportlet.py | 5 | 10508 | ###############################################################################
#cyn.in is an open source Collaborative Knowledge Management Appliance that
#enables teams to seamlessly work together on files, documents and content in
#a secure central environment.
#
#cyn.in v2 an open source appliance is distributed under the GPL v3 license
#along with commercial support options.
#
#cyn.in is a Cynapse Invention.
#
#Copyright (C) 2008 Cynapse India Pvt. Ltd.
#
#This program is free software: you can redistribute it and/or modify it under
#the terms of the GNU General Public License as published by the Free Software
#Foundation, either version 3 of the License, or any later version and observe
#the Additional Terms applicable to this program and must display appropriate
#legal notices. In accordance with Section 7(b) of the GNU General Public
#License version 3, these Appropriate Legal Notices must retain the display of
#the "Powered by cyn.in" AND "A Cynapse Invention" logos. You should have
#received a copy of the detailed Additional Terms License with this program.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
#Public License for more details.
#
#You should have received a copy of the GNU General Public License along with
#this program. If not, see <http://www.gnu.org/licenses/>.
#
#You can contact Cynapse at [email protected] with any problems with cyn.in.
#For any queries regarding the licensing, please send your mails to
# [email protected]
#
#You can also contact Cynapse at:
#802, Building No. 1,
#Dheeraj Sagar, Malad(W)
#Mumbai-400064, India
###############################################################################
from zope import schema
from zope.component import getMultiAdapter
from zope.formlib import form
from zope.interface import implements
from plone.app.portlets.portlets import base
from plone.memoize import ram
from plone.memoize.compress import xhtml_compress
from plone.memoize.instance import memoize
from plone.portlets.interfaces import IPortletDataProvider
from plone.app.portlets.cache import render_cachekey
from Acquisition import aq_inner
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from ubify.policy import CyninMessageFactory as _
class ISpaceMembersPortlet(IPortletDataProvider):
displaytitle = schema.TextLine(title=_(u'Title'),required=False)
count = schema.Int(title=_(u'Number of items to display'),
description=_(u'How many items to list.'),
required=True,
default=5)
role = schema.Choice(title=_(u"Select Role"),
description=_(u"Choose a role of members to be displayed for this Space"),
vocabulary="ubify.cyninv2theme.portlets.ListRoles")
class Assignment(base.Assignment):
implements(ISpaceMembersPortlet)
def __init__(self, count=5, role='Member',displaytitle=''):
self.count = count
self.resultcount = 0
self.role = role
self.displaytitle = displaytitle
@property
def title(self):
return _(u"Space Members in role " + self.role)
#def _render_cachekey(fun, self):
# if self.anonymous:
# raise ram.DontCache()
# return render_cachekey(fun, self)
class Renderer(base.Renderer):
render = ViewPageTemplateFile('spacemembersportlet.pt')
def __init__(self, *args):
base.Renderer.__init__(self, *args)
context = aq_inner(self.context)
portal_state = getMultiAdapter((context, self.request), name=u'plone_portal_state')
self.anonymous = portal_state.anonymous()
self.portal_url = portal_state.portal_url()
self.typesToShow = 'Document'
plone_tools = getMultiAdapter((context, self.request), name=u'plone_tools')
self.catalog = plone_tools.catalog()
self.moreurl = self.portal_url + "/".join(context.getPhysicalPath()) + "/@@spacemembers?membertype=" + self.data.role
#@ram.cache(_render_cachekey)
#def render(self):
# return xhtml_compress(self._template())
@property
def available(self):
return not self.anonymous and len(self._data())
def results(self):
limit = self.data.count
return self._data()[:limit]
def portlettitle(self):
from vocabularies import roleslist
roleTitle = self.data.displaytitle
found = False
if roleTitle is None:
roleTitle = ""
if roleTitle == "":
for r in roleslist:
if r['id'].lower() == self.data.role.lower():
roleTitle = r['name'] + "s"
found = True
if found == True:
break
return roleTitle
def generate_dict_list(self,listObjects,context):
user_role_map = []
for user in listObjects:
roleslist = context.get_local_roles_for_userid(user)
if len(roleslist) > 0:
user_role_map.append(dict(useritem = user,roles = roleslist ))
else:
roles_groupslist = []
aclusers = context.portal_membership.acl_users
objUser = aclusers.getUserById(user)
listGroups = objUser.getGroups()
for gr in listGroups:
templist = context.get_local_roles_for_userid(gr)
for lst in templist:
if lst not in roleslist:
roles_groupslist.append(lst)
roles_groupslist.sort()
user_role_map.append(dict(useritem = user,roles = roles_groupslist ))
return user_role_map
def checkIfGroupGetUsers(self,listobjects,context):
listdummy = []
pm = context.portal_membership
aclusers = pm.acl_users
for obj in listobjects:
if aclusers.getUserById(obj) is None:
#user doesn't exists with name search in group
objGroup = aclusers.getGroupById(obj)
if objGroup <> None:
listtemp = objGroup.listAssignedPrincipals(obj)
for tempobj in listtemp:
if len(tempobj) > 0 and aclusers.getUserById(tempobj[0]):
listdummy.append(tempobj[0])
else:
listdummy.append(obj)
return listdummy
def resultscount(self):
return self.data.resultscount - self.data.count
def concatroles(self,roleslist):
strVal = ""
for obrole in roleslist:
strVal = strVal + obrole + ', '
strVal = str(strVal)
strVal = strVal.rstrip(', ')
return strVal
@memoize
def _data(self):
context = aq_inner(self.context)
objresults = []
role_to_search = self.data.role
listReaders = context.users_with_local_role('Reader')
listReaders = self.checkIfGroupGetUsers(listReaders,context)
listContributors = context.users_with_local_role('Contributor')
listContributors = self.checkIfGroupGetUsers(listContributors,context)
listReviewers = context.users_with_local_role('Reviewer')
listReviewers = self.checkIfGroupGetUsers(listReviewers,context)
listEditors = context.users_with_local_role('Editor')
listEditors = self.checkIfGroupGetUsers(listEditors,context)
if role_to_search == 'Reader':
for user in listReaders:
if user not in listContributors and user not in listReviewers and user not in listEditors:
objresults.append(user)
elif role_to_search == 'Contributor':
for user in listContributors:
if user not in listReviewers and user not in listEditors:
objresults.append(user)
elif role_to_search == 'Reviewer':
for user in listReviewers:
if user not in listEditors:
objresults.append(user)
elif role_to_search == 'Editor':
objresults = listEditors
else: #Special case when all members will be listed priority given to managers
listmembers = []
listtemp = []
for userEditor in listEditors:
listtemp.append(userEditor)
listtemp.sort()
for userReviewer in listReviewers:
if userReviewer not in listmembers and userReviewer not in listtemp:
listmembers.append(userReviewer)
for userContributor in listContributors:
if userContributor not in listmembers and userContributor not in listtemp:
listmembers.append(userContributor)
for userReader in listReaders:
if userReader not in listmembers and userReader not in listtemp:
listmembers.append(userReader)
listmembers.sort()
listtemp.extend(listmembers)
objresults.extend(listtemp)
self.data.resultscount = len(objresults)
return self.generate_dict_list(objresults,context)
objresults.sort()
self.data.resultscount = len(objresults)
return self.generate_dict_list(objresults,context)
class AddForm(base.AddForm):
form_fields = form.Fields(ISpaceMembersPortlet)
label = _(u"Add Space Members Portlet")
description = _(u"This portlet displays members for current space.")
def create(self, data):
return Assignment(count=data.get('count', 5),role=data.get('role','Reader'))
class EditForm(base.EditForm):
form_fields = form.Fields(ISpaceMembersPortlet)
label = _(u"Edit Space Members Portlet")
description = _(u"This portlet displays members for current space.") | gpl-3.0 |
lsinfo/odoo | addons/marketing_campaign/__init__.py | 380 | 1087 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import marketing_campaign
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
xuegang/gpdb | src/test/tinc/ext/unittest2/test/test_setups.py | 111 | 16845 | import sys
from cStringIO import StringIO
import unittest2
from unittest2.test.support import resultFactory
class TestSetups(unittest2.TestCase):
def getRunner(self):
return unittest2.TextTestRunner(resultclass=resultFactory,
stream=StringIO())
def runTests(self, *cases):
suite = unittest2.TestSuite()
for case in cases:
tests = unittest2.defaultTestLoader.loadTestsFromTestCase(case)
suite.addTests(tests)
runner = self.getRunner()
# creating a nested suite exposes some potential bugs
realSuite = unittest2.TestSuite()
realSuite.addTest(suite)
# adding empty suites to the end exposes potential bugs
suite.addTest(unittest2.TestSuite())
realSuite.addTest(unittest2.TestSuite())
return runner.run(realSuite)
def test_setup_class(self):
class Test(unittest2.TestCase):
setUpCalled = 0
@classmethod
def setUpClass(cls):
Test.setUpCalled += 1
unittest2.TestCase.setUpClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.setUpCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class(self):
class Test(unittest2.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest2.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_teardown_class_two_classes(self):
class Test(unittest2.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test.tearDownCalled += 1
unittest2.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest2.TestCase):
tearDownCalled = 0
@classmethod
def tearDownClass(cls):
Test2.tearDownCalled += 1
unittest2.TestCase.tearDownClass()
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(Test.tearDownCalled, 1)
self.assertEqual(Test2.tearDownCalled, 1)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 0)
def test_error_in_setupclass(self):
class BrokenTest(unittest2.TestCase):
@classmethod
def setUpClass(cls):
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(BrokenTest)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'setUpClass (%s.BrokenTest)' % __name__)
def test_error_in_teardown_class(self):
class Test(unittest2.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest2.TestCase):
tornDown = 0
@classmethod
def tearDownClass(cls):
Test2.tornDown += 1
raise TypeError('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test, Test2)
self.assertEqual(result.testsRun, 4)
self.assertEqual(len(result.errors), 2)
self.assertEqual(Test.tornDown, 1)
self.assertEqual(Test2.tornDown, 1)
error, _ = result.errors[0]
self.assertEqual(str(error),
'tearDownClass (%s.Test)' % __name__)
def test_class_not_torndown_when_setup_fails(self):
class Test(unittest2.TestCase):
tornDown = False
@classmethod
def setUpClass(cls):
raise TypeError
@classmethod
def tearDownClass(cls):
Test.tornDown = True
raise TypeError('foo')
def test_one(self):
pass
self.runTests(Test)
self.assertFalse(Test.tornDown)
def test_class_not_setup_or_torndown_when_skipped(self):
class Test(unittest2.TestCase):
classSetUp = False
tornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.tornDown = True
def test_one(self):
pass
Test = unittest2.skip("hop")(Test)
self.runTests(Test)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.tornDown)
def test_setup_teardown_order_with_pathological_suite(self):
results = []
class Module1(object):
@staticmethod
def setUpModule():
results.append('Module1.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module1.tearDownModule')
class Module2(object):
@staticmethod
def setUpModule():
results.append('Module2.setUpModule')
@staticmethod
def tearDownModule():
results.append('Module2.tearDownModule')
class Test1(unittest2.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 1')
@classmethod
def tearDownClass(cls):
results.append('teardown 1')
def testOne(self):
results.append('Test1.testOne')
def testTwo(self):
results.append('Test1.testTwo')
class Test2(unittest2.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 2')
@classmethod
def tearDownClass(cls):
results.append('teardown 2')
def testOne(self):
results.append('Test2.testOne')
def testTwo(self):
results.append('Test2.testTwo')
class Test3(unittest2.TestCase):
@classmethod
def setUpClass(cls):
results.append('setup 3')
@classmethod
def tearDownClass(cls):
results.append('teardown 3')
def testOne(self):
results.append('Test3.testOne')
def testTwo(self):
results.append('Test3.testTwo')
Test1.__module__ = Test2.__module__ = 'Module'
Test3.__module__ = 'Module2'
sys.modules['Module'] = Module1
sys.modules['Module2'] = Module2
first = unittest2.TestSuite((Test1('testOne'),))
second = unittest2.TestSuite((Test1('testTwo'),))
third = unittest2.TestSuite((Test2('testOne'),))
fourth = unittest2.TestSuite((Test2('testTwo'),))
fifth = unittest2.TestSuite((Test3('testOne'),))
sixth = unittest2.TestSuite((Test3('testTwo'),))
suite = unittest2.TestSuite((first, second, third, fourth, fifth, sixth))
runner = self.getRunner()
result = runner.run(suite)
self.assertEqual(result.testsRun, 6)
self.assertEqual(len(result.errors), 0)
self.assertEqual(results,
['Module1.setUpModule', 'setup 1',
'Test1.testOne', 'Test1.testTwo', 'teardown 1',
'setup 2', 'Test2.testOne', 'Test2.testTwo',
'teardown 2', 'Module1.tearDownModule',
'Module2.setUpModule', 'setup 3',
'Test3.testOne', 'Test3.testTwo',
'teardown 3', 'Module2.tearDownModule'])
def test_setup_module(self):
class Module(object):
moduleSetup = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
class Test(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_setup_module(self):
class Module(object):
moduleSetup = 0
moduleTornDown = 0
@staticmethod
def setUpModule():
Module.moduleSetup += 1
raise TypeError('foo')
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest2.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleSetup, 1)
self.assertEqual(Module.moduleTornDown, 0)
self.assertEqual(result.testsRun, 0)
self.assertFalse(Test.classSetUp)
self.assertFalse(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'setUpModule (Module)')
def test_testcase_with_missing_module(self):
class Test(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules.pop('Module', None)
result = self.runTests(Test)
self.assertEqual(result.testsRun, 2)
def test_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
class Test(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 2)
self.assertEqual(len(result.errors), 0)
def test_error_in_teardown_module(self):
class Module(object):
moduleTornDown = 0
@staticmethod
def tearDownModule():
Module.moduleTornDown += 1
raise TypeError('foo')
class Test(unittest2.TestCase):
classSetUp = False
classTornDown = False
@classmethod
def setUpClass(cls):
Test.classSetUp = True
@classmethod
def tearDownClass(cls):
Test.classTornDown = True
def test_one(self):
pass
def test_two(self):
pass
class Test2(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
Test.__module__ = 'Module'
Test2.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test, Test2)
self.assertEqual(Module.moduleTornDown, 1)
self.assertEqual(result.testsRun, 4)
self.assertTrue(Test.classSetUp)
self.assertTrue(Test.classTornDown)
self.assertEqual(len(result.errors), 1)
error, _ = result.errors[0]
self.assertEqual(str(error), 'tearDownModule (Module)')
def test_skiptest_in_setupclass(self):
class Test(unittest2.TestCase):
@classmethod
def setUpClass(cls):
raise unittest2.SkipTest('foo')
def test_one(self):
pass
def test_two(self):
pass
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpClass (%s.Test)' % __name__)
def test_skiptest_in_setupmodule(self):
class Test(unittest2.TestCase):
def test_one(self):
pass
def test_two(self):
pass
class Module(object):
@staticmethod
def setUpModule():
raise unittest2.SkipTest('foo')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
result = self.runTests(Test)
self.assertEqual(result.testsRun, 0)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
skipped = result.skipped[0][0]
self.assertEqual(str(skipped), 'setUpModule (Module)')
def test_suite_debug_executes_setups_and_teardowns(self):
ordering = []
class Module(object):
@staticmethod
def setUpModule():
ordering.append('setUpModule')
@staticmethod
def tearDownModule():
ordering.append('tearDownModule')
class Test(unittest2.TestCase):
@classmethod
def setUpClass(cls):
ordering.append('setUpClass')
@classmethod
def tearDownClass(cls):
ordering.append('tearDownClass')
def test_something(self):
ordering.append('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
suite = unittest2.defaultTestLoader.loadTestsFromTestCase(Test)
suite.debug()
expectedOrder = ['setUpModule', 'setUpClass', 'test_something', 'tearDownClass', 'tearDownModule']
self.assertEqual(ordering, expectedOrder)
def test_suite_debug_propagates_exceptions(self):
class Module(object):
@staticmethod
def setUpModule():
if phase == 0:
raise Exception('setUpModule')
@staticmethod
def tearDownModule():
if phase == 1:
raise Exception('tearDownModule')
class Test(unittest2.TestCase):
@classmethod
def setUpClass(cls):
if phase == 2:
raise Exception('setUpClass')
@classmethod
def tearDownClass(cls):
if phase == 3:
raise Exception('tearDownClass')
def test_something(self):
if phase == 4:
raise Exception('test_something')
Test.__module__ = 'Module'
sys.modules['Module'] = Module
_suite = unittest2.defaultTestLoader.loadTestsFromTestCase(Test)
suite = unittest2.TestSuite()
# nesting a suite again exposes a bug in the initial implementation
suite.addTest(_suite)
messages = ('setUpModule', 'tearDownModule', 'setUpClass', 'tearDownClass', 'test_something')
for phase, msg in enumerate(messages):
self.assertRaisesRegexp(Exception, msg, suite.debug)
| apache-2.0 |
philgyford/django-spectator | tests/core/test_templatetags.py | 1 | 9503 | from distutils.version import StrictVersion
from unittest.mock import Mock, patch
from django import get_version
from django.http import QueryDict
from django.test import TestCase
from .. import make_date
from spectator.core.apps import Apps
from spectator.core.templatetags.spectator_core import (
domain_urlize,
get_enabled_apps,
get_item,
change_object_link_card,
most_read_creators,
most_read_creators_card,
most_visited_venues,
most_visited_venues_card,
query_string,
)
from spectator.core.factories import IndividualCreatorFactory
from spectator.events.factories import MiscEventFactory, VenueFactory
from spectator.reading.factories import (
PublicationFactory,
PublicationRoleFactory,
ReadingFactory,
)
class GetEnabledAppsTestCase(TestCase):
@patch.object(Apps, "all")
def test_results(self, patched_all):
# all() will return an app that is not installed:
patched_all.return_value = ["events", "reading", "NOPE"]
# So 'NOPE' shouldn't be returned here:
enabled_apps = get_enabled_apps()
self.assertEqual(2, len(enabled_apps))
self.assertEqual(enabled_apps[0], "events")
self.assertEqual(enabled_apps[1], "reading")
class GetItemTestCase(TestCase):
def test_key(self):
dict = {"a": 1}
self.assertEqual(get_item(dict, "a"), 1)
def test_key_none(self):
dict = {"a": 1}
self.assertIsNone(get_item(dict, "b"))
class DomainUrlizeTestCase(TestCase):
def test_domain_urlize(self):
self.assertEqual(
domain_urlize("http://www.example.org/foo/"),
'<a href="http://www.example.org/foo/" rel="nofollow">example.org</a>',
)
class ChangeObjectLinkCardTestCase(TestCase):
def test_output_can_change(self):
creator = IndividualCreatorFactory(pk=5)
perms = ["spectator.can_edit_creator"]
result = change_object_link_card(creator, perms)
self.assertTrue(result["display_link"])
if get_version() < StrictVersion("1.9.0"):
self.assertEqual(result["change_url"], "/admin/spectator_core/creator/5/")
else:
self.assertEqual(
result["change_url"], "/admin/spectator_core/creator/5/change/"
)
class QueryStringTestCase(TestCase):
def test_adds_arg(self):
"It adds your key/value to the existing GET string."
context = {"request": Mock(GET=QueryDict("a=1"))}
self.assertIn(
query_string(context, "foo", "bar"), ["foo=bar&a=1", "a=1&foo=bar"]
)
def test_replaces_arg(self):
"It replaces an existing GET arg with what you supply."
context = {"request": Mock(GET=QueryDict("a=1"))}
self.assertEqual(query_string(context, "a", "bar"), "a=bar")
def test_handles_missing_request(self):
"If there's no request object, it doesn't complain."
context = {}
self.assertEqual(query_string(context, "foo", "bar"), "foo=bar")
def test_urlencodes(self):
"It URL-encodes the returned string."
context = {"request": Mock(GET=QueryDict("a=1"))}
self.assertIn(
query_string(context, "foo", "bar&bar"),
["foo=bar%26bar&a=1", "a=1&foo=bar%26bar"],
)
class MostReadCreatorsTestCase(TestCase):
def test_returns_queryset(self):
"It should return 10 items by default."
d = make_date("2017-02-15")
for i in range(11):
c = IndividualCreatorFactory()
pub = PublicationFactory()
PublicationRoleFactory(publication=pub, creator=c, role_name="")
ReadingFactory(publication=pub, start_date=d, end_date=d, is_finished=True)
creators = most_read_creators()
self.assertEqual(len(creators), 10)
def test_num(self):
"It should return `num` items."
d = make_date("2017-02-15")
for i in range(4):
c = IndividualCreatorFactory()
pub = PublicationFactory()
PublicationRoleFactory(publication=pub, creator=c, role_name="")
ReadingFactory(publication=pub, start_date=d, end_date=d, is_finished=True)
creators = most_read_creators(num=3)
self.assertEqual(len(creators), 3)
def test_finished(self):
"It should only return finished readings"
d = make_date("2017-02-15")
# A finished reading
c1 = IndividualCreatorFactory()
pub1 = PublicationFactory()
PublicationRoleFactory(publication=pub1, creator=c1, role_name="")
ReadingFactory(publication=pub1, start_date=d, end_date=d, is_finished=True)
# An unfinished reading
c2 = IndividualCreatorFactory()
pub2 = PublicationFactory()
PublicationRoleFactory(publication=pub2, creator=c2, role_name="")
ReadingFactory(publication=pub2, start_date=d, end_date=d, is_finished=False)
creators = most_read_creators()
self.assertEqual(len(creators), 1)
self.assertEqual(creators[0], c1)
class MostReadCreatorsCardTestCase(TestCase):
def test_returns_correct_data(self):
d = make_date("2017-02-15")
for i in range(2, 13):
c = IndividualCreatorFactory()
pub = PublicationFactory()
PublicationRoleFactory(publication=pub, creator=c, role_name="")
# It'll cut off any with only 1 reading, so:
ReadingFactory.create_batch(
i, publication=pub, start_date=d, end_date=d, is_finished=True
)
data = most_read_creators_card()
self.assertIn("card_title", data)
self.assertIn("score_attr", data)
self.assertIn("object_list", data)
self.assertEqual(data["card_title"], "Most read authors")
self.assertEqual(data["score_attr"], "num_readings")
self.assertEqual(len(data["object_list"]), 10)
def test_num(self):
"It should return `num` items."
d = make_date("2017-02-15")
for i in range(2, 6):
c = IndividualCreatorFactory()
pub = PublicationFactory()
PublicationRoleFactory(publication=pub, creator=c, role_name="")
# It'll cut off any with only 1 reading, so:
ReadingFactory.create_batch(
i, publication=pub, start_date=d, end_date=d, is_finished=True
)
data = most_read_creators_card(num=3)
self.assertIn("object_list", data)
self.assertEqual(len(data["object_list"]), 3)
def test_finished(self):
"It should only return finished readings"
d = make_date("2017-02-15")
# A finished reading
c1 = IndividualCreatorFactory()
pub1 = PublicationFactory()
PublicationRoleFactory(publication=pub1, creator=c1, role_name="")
# It'll cut off any with only 1 reading, so:
ReadingFactory.create_batch(
3, publication=pub1, start_date=d, end_date=d, is_finished=True
)
# Another finished reading (so there's a chart)
c2 = IndividualCreatorFactory()
pub2 = PublicationFactory()
PublicationRoleFactory(publication=pub2, creator=c2, role_name="")
# It'll cut off any with only 1 reading, so:
ReadingFactory.create_batch(
2, publication=pub2, start_date=d, end_date=d, is_finished=True
)
# An unfinished reading
c3 = IndividualCreatorFactory()
pub3 = PublicationFactory()
PublicationRoleFactory(publication=pub3, creator=c3, role_name="")
# It'll cut off any with only 1 reading, so:
ReadingFactory.create_batch(
2, publication=pub3, start_date=d, end_date=d, is_finished=False
)
data = most_read_creators_card()
self.assertIn("object_list", data)
self.assertEqual(len(data["object_list"]), 2)
self.assertEqual(data["object_list"][0], c1)
self.assertEqual(data["object_list"][1], c2)
class MostVisitedVenuesTestCase(TestCase):
def test_returns_queryset(self):
"It should return 10 items by default."
for i in range(11):
MiscEventFactory(venue=VenueFactory())
venues = most_visited_venues()
self.assertEqual(len(venues), 10)
def test_num(self):
"It should return `num` items."
for i in range(4):
MiscEventFactory(venue=VenueFactory())
venues = most_visited_venues(num=3)
self.assertEqual(len(venues), 3)
class MostVisitedVenuesCardTestCase(TestCase):
def test_returns_correct_data(self):
for i in range(2, 13):
# It'll cut off any with only 1 reading, so:
MiscEventFactory.create_batch(i, venue=VenueFactory())
data = most_visited_venues_card()
self.assertIn("card_title", data)
self.assertIn("score_attr", data)
self.assertIn("object_list", data)
self.assertEqual(data["card_title"], "Most visited venues")
self.assertEqual(data["score_attr"], "num_visits")
self.assertEqual(len(data["object_list"]), 10)
def test_num(self):
"It should return `num` items."
for i in range(2, 6):
# It'll cut off any with only 1 reading, so:
MiscEventFactory.create_batch(i, venue=VenueFactory())
data = most_visited_venues_card(num=3)
self.assertIn("object_list", data)
self.assertEqual(len(data["object_list"]), 3)
| mit |
drpaneas/linuxed.gr | lib/python2.7/site-packages/pelican/tools/pelican_themes.py | 31 | 7823 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import six
import argparse
import os
import shutil
import sys
try:
import pelican
except:
err('Cannot import pelican.\nYou must install Pelican in order to run this script.', -1)
global _THEMES_PATH
_THEMES_PATH = os.path.join(
os.path.dirname(
os.path.abspath(
pelican.__file__
)
),
'themes'
)
__version__ = '0.2'
_BUILTIN_THEMES = ['simple', 'notmyidea']
def err(msg, die=None):
"""Print an error message and exits if an exit code is given"""
sys.stderr.write(msg + '\n')
if die:
sys.exit((die if type(die) is int else 1))
def main():
"""Main function"""
parser = argparse.ArgumentParser(description="""Install themes for Pelican""")
excl= parser.add_mutually_exclusive_group()
excl.add_argument('-l', '--list', dest='action', action="store_const", const='list',
help="Show the themes already installed and exit")
excl.add_argument('-p', '--path', dest='action', action="store_const", const='path',
help="Show the themes path and exit")
excl.add_argument('-V', '--version', action='version', version='pelican-themes v{0}'.format(__version__),
help='Print the version of this script')
parser.add_argument('-i', '--install', dest='to_install', nargs='+', metavar="theme path",
help='The themes to install')
parser.add_argument('-r', '--remove', dest='to_remove', nargs='+', metavar="theme name",
help='The themes to remove')
parser.add_argument('-U', '--upgrade', dest='to_upgrade', nargs='+',
metavar="theme path", help='The themes to upgrade')
parser.add_argument('-s', '--symlink', dest='to_symlink', nargs='+', metavar="theme path",
help="Same as `--install', but create a symbolic link instead of copying the theme. Useful for theme development")
parser.add_argument('-c', '--clean', dest='clean', action="store_true",
help="Remove the broken symbolic links of the theme path")
parser.add_argument('-v', '--verbose', dest='verbose', action="store_true",
help="Verbose output")
args = parser.parse_args()
to_install = args.to_install or args.to_upgrade
to_sym = args.to_symlink or args.clean
if args.action:
if args.action is 'list':
list_themes(args.verbose)
elif args.action is 'path':
print(_THEMES_PATH)
elif to_install or args.to_remove or to_sym:
if args.to_remove:
if args.verbose:
print('Removing themes...')
for i in args.to_remove:
remove(i, v=args.verbose)
if args.to_install:
if args.verbose:
print('Installing themes...')
for i in args.to_install:
install(i, v=args.verbose)
if args.to_upgrade:
if args.verbose:
print('Upgrading themes...')
for i in args.to_upgrade:
install(i, v=args.verbose, u=True)
if args.to_symlink:
if args.verbose:
print('Linking themes...')
for i in args.to_symlink:
symlink(i, v=args.verbose)
if args.clean:
if args.verbose:
print('Cleaning the themes directory...')
clean(v=args.verbose)
else:
print('No argument given... exiting.')
def themes():
"""Returns the list of the themes"""
for i in os.listdir(_THEMES_PATH):
e = os.path.join(_THEMES_PATH, i)
if os.path.isdir(e):
if os.path.islink(e):
yield (e, os.readlink(e))
else:
yield (e, None)
def list_themes(v=False):
"""Display the list of the themes"""
for t, l in themes():
if not v:
t = os.path.basename(t)
if l:
if v:
print(t + (" (symbolic link to `" + l + "')"))
else:
print(t + '@')
else:
print(t)
def remove(theme_name, v=False):
"""Removes a theme"""
theme_name = theme_name.replace('/','')
target = os.path.join(_THEMES_PATH, theme_name)
if theme_name in _BUILTIN_THEMES:
err(theme_name + ' is a builtin theme.\nYou cannot remove a builtin theme with this script, remove it by hand if you want.')
elif os.path.islink(target):
if v:
print('Removing link `' + target + "'")
os.remove(target)
elif os.path.isdir(target):
if v:
print('Removing directory `' + target + "'")
shutil.rmtree(target)
elif os.path.exists(target):
err(target + ' : not a valid theme')
else:
err(target + ' : no such file or directory')
def install(path, v=False, u=False):
"""Installs a theme"""
if not os.path.exists(path):
err(path + ' : no such file or directory')
elif not os.path.isdir(path):
err(path + ' : not a directory')
else:
theme_name = os.path.basename(os.path.normpath(path))
theme_path = os.path.join(_THEMES_PATH, theme_name)
exists = os.path.exists(theme_path)
if exists and not u:
err(path + ' : already exists')
elif exists and u:
remove(theme_name, v)
install(path, v)
else:
if v:
print("Copying `{p}' to `{t}' ...".format(p=path, t=theme_path))
try:
shutil.copytree(path, theme_path)
try:
if os.name == 'posix':
for root, dirs, files in os.walk(theme_path):
for d in dirs:
dname = os.path.join(root, d)
os.chmod(dname, 493) # 0o755
for f in files:
fname = os.path.join(root, f)
os.chmod(fname, 420) # 0o644
except OSError as e:
err("Cannot change permissions of files or directory in `{r}':\n{e}".format(r=theme_path, e=str(e)), die=False)
except Exception as e:
err("Cannot copy `{p}' to `{t}':\n{e}".format(p=path, t=theme_path, e=str(e)))
def symlink(path, v=False):
"""Symbolically link a theme"""
if not os.path.exists(path):
err(path + ' : no such file or directory')
elif not os.path.isdir(path):
err(path + ' : not a directory')
else:
theme_name = os.path.basename(os.path.normpath(path))
theme_path = os.path.join(_THEMES_PATH, theme_name)
if os.path.exists(theme_path):
err(path + ' : already exists')
else:
if v:
print("Linking `{p}' to `{t}' ...".format(p=path, t=theme_path))
try:
os.symlink(path, theme_path)
except Exception as e:
err("Cannot link `{p}' to `{t}':\n{e}".format(p=path, t=theme_path, e=str(e)))
def is_broken_link(path):
"""Returns True if the path given as is a broken symlink"""
path = os.readlink(path)
return not os.path.exists(path)
def clean(v=False):
"""Removes the broken symbolic links"""
c=0
for path in os.listdir(_THEMES_PATH):
path = os.path.join(_THEMES_PATH, path)
if os.path.islink(path):
if is_broken_link(path):
if v:
print('Removing {0}'.format(path))
try:
os.remove(path)
except OSError as e:
print('Error: cannot remove {0}'.format(path))
else:
c+=1
print("\nRemoved {0} broken links".format(c))
| mit |
chen2aaron/SnirteneCodes | PythonCookbookPractise/chapter8/delegating_attribute_access.py | 1 | 2524 | # 8.15. Delegating Attribute Access
class A:
def spam(self, x):
pass
def foo(self):
pass
class B1:
"""简单的代理"""
def __init__(self):
self._a = A()
def spam(self, x):
# Delegate to the internal self._a instance
return self._a.spam(x)
def foo(self):
# Delegate to the internal self._a instance
return self._a.foo()
def bar(self):
pass
class B2:
"""使用__getattr__的代理,代理方法比较多时候"""
def __init__(self):
self._a = A()
def bar(self):
pass
# Expose all of the methods defined on class A
def __getattr__(self, name):
"""这个方法在访问的attribute不存在的时候被调用
the __getattr__() method is actually a fallback method
that only gets called when an attribute is not found"""
return getattr(self._a, name)
b = B2()
# A proxy class that wraps around another object, but
# exposes its public attributes
class Proxy:
def __init__(self, obj):
self._obj = obj
# Delegate attribute lookup to internal obj
def __getattr__(self, name):
print('getattr:', name)
return getattr(self._obj, name)
# Delegate attribute assignment
def __setattr__(self, name, value):
if name.startswith('_'):
super().__setattr__(name, value)
else:
print('setattr:', name, value)
setattr(self._obj, name, value)
# Delegate attribute deletion
def __delattr__(self, name):
if name.startswith('_'):
super().__delattr__(name)
else:
print('delattr:', name)
delattr(self._obj, name)
class Spam:
def __init__(self, x):
self.x = x
def bar(self, y):
print('Spam.bar:', self.x, y)
s = Spam(2)
p = Proxy(s)
print(p.x)
p.bar(4)
p.x = 33
p.foo = 3
p._dog = 5
print(p.__dict__)
class ListLike:
"""__getattr__对于双下划线开始和结尾的方法是不能用的,需要一个个去重定义"""
def __init__(self):
self._items = []
def __getattr__(self, name):
return getattr(self._items, name)
# Added special methods to support certain list operations
def __len__(self):
return len(self._items)
def __getitem__(self, index):
return self._items[index]
def __setitem__(self, index, value):
self._items[index] = value
def __delitem__(self, index):
del self._items[index]
| gpl-2.0 |
cfossace/crits | crits/indicators/migrate.py | 13 | 3846 | from crits.core.crits_mongoengine import EmbeddedCampaign
def migrate_indicator(self):
"""
Migrate to the latest schema version.
"""
migrate_2_to_3(self)
def migrate_2_to_3(self):
"""
Migrate from schema 2 to 3.
"""
if self.schema_version < 2:
migrate_1_to_2(self)
if self.schema_version == 2:
from crits.core.core_migrate import migrate_analysis_results
migrate_analysis_results(self)
self.schema_version = 3
self.save()
self.reload()
def migrate_1_to_2(self):
"""
Migrate from schema 1 to 2.
"""
if self.schema_version < 1:
migrate_0_to_1(self)
if self.schema_version == 1:
old_analysis = getattr(self.unsupported_attrs, 'old_analysis', None)
self.activity = []
self.campaign = []
if old_analysis:
# activity
if 'activity' in old_analysis:
for a in old_analysis['activity']:
(analyst, description) = ('', '')
(date, start_date, end_date) = (None, None, None)
if 'analyst' in a:
analyst = a['analyst']
if 'description' in a:
description = a['description']
if 'date' in a:
date = a['date']
if 'start_date' in a:
start_date = a['start_date']
if 'end_date' in a:
end_date = a['end_date']
self.add_activity(
analyst=analyst,
start_date=start_date,
end_date=end_date,
date=date,
description=description
)
# campaign
if 'campaign' in old_analysis:
for c in old_analysis['campaign']:
(analyst, description) = ('', '')
(date, confidence, name) = (None, 'low', '')
if not 'analyst' in c:
c['analyst'] = analyst
if not 'description' in c:
c['description'] = description
if not 'date' in c:
c['date'] = date
if not 'confidence' in c:
c['confidence'] = confidence
if not 'name' in c:
c['name'] = name
ec = EmbeddedCampaign(
analyst=c['analyst'],
description=c['description'],
date=c['date'],
confidence=c['confidence'],
name=c['name']
)
self.add_campaign(ec)
# confidence
if 'confidence' in old_analysis:
confidence = old_analysis['confidence']
(analyst, rating) = ('', 'unknown')
if 'analyst' in confidence:
analyst = confidence['analyst']
if 'rating' in confidence:
rating = confidence['rating']
self.set_confidence(analyst=analyst, rating=rating)
# impact
if 'impact' in old_analysis:
impact = old_analysis['impact']
(analyst, rating) = ('', 'unknown')
if 'analyst' in impact:
analyst = impact['analyst']
if 'rating' in impact:
rating = impact['rating']
self.set_impact(analyst=analyst, rating=rating)
self.schema_version = 2
def migrate_0_to_1(self):
"""
Migrate from schema 0 to 1.
"""
if self.schema_version < 1:
self.schema_version = 1
| mit |
lem9/weblate | weblate/wladmin/tests.py | 1 | 4790 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2017 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
import os
from django.conf import settings
from django.core.urlresolvers import reverse
from weblate.trans.tests.test_views import FixtureTestCase
from weblate.trans.util import add_configuration_error
from weblate.trans.tests.utils import get_test_file
from weblate.trans.data import check_data_writable
from weblate.utils.unittest import tempdir_setting
class AdminTest(FixtureTestCase):
"""Test for customized admin interface."""
def setUp(self):
super(AdminTest, self).setUp()
self.user.is_staff = True
self.user.is_superuser = True
self.user.save()
def test_index(self):
response = self.client.get(reverse('admin:index'))
self.assertContains(response, 'SSH')
def test_ssh(self):
response = self.client.get(reverse('admin:ssh'))
self.assertContains(response, 'SSH keys')
@tempdir_setting('DATA_DIR')
def test_ssh_generate(self):
check_data_writable()
response = self.client.get(reverse('admin:ssh'))
self.assertContains(response, 'Generate SSH key')
response = self.client.post(
reverse('admin:ssh'),
{'action': 'generate'}
)
self.assertContains(response, 'Created new SSH key')
@tempdir_setting('DATA_DIR')
def test_ssh_add(self):
check_data_writable()
try:
oldpath = os.environ['PATH']
os.environ['PATH'] = ':'.join(
(get_test_file(''), os.environ['PATH'])
)
# Verify there is button for adding
response = self.client.get(reverse('admin:ssh'))
self.assertContains(response, 'Add host key')
# Add the key
response = self.client.post(
reverse('admin:ssh'),
{'action': 'add-host', 'host': 'github.com'}
)
self.assertContains(response, 'Added host key for github.com')
finally:
os.environ['PATH'] = oldpath
# Check the file contains it
hostsfile = os.path.join(settings.DATA_DIR, 'ssh', 'known_hosts')
with open(hostsfile) as handle:
self.assertIn('github.com', handle.read())
def test_performace(self):
response = self.client.get(reverse('admin:performance'))
self.assertContains(response, 'Django caching')
def test_error(self):
add_configuration_error('Test error', 'FOOOOOOOOOOOOOO')
response = self.client.get(reverse('admin:performance'))
self.assertContains(response, 'FOOOOOOOOOOOOOO')
def test_report(self):
response = self.client.get(reverse('admin:report'))
self.assertContains(response, 'On branch master')
def test_create_project(self):
response = self.client.get(reverse('admin:trans_project_add'))
self.assertContains(response, 'Required fields are marked as bold')
def test_create_subproject(self):
response = self.client.get(reverse('admin:trans_subproject_add'))
self.assertContains(
response, 'Importing a new translation can take some time'
)
def test_subproject(self):
"""Test for custom subproject actions."""
self.assert_custom_admin(
reverse('admin:trans_subproject_changelist')
)
def test_project(self):
"""Test for custom project actions."""
self.assert_custom_admin(
reverse('admin:trans_project_changelist')
)
def assert_custom_admin(self, url):
"""Test for (sub)project custom admin."""
response = self.client.get(url)
self.assertContains(
response, 'Update VCS repository'
)
for action in 'force_commit', 'update_checks', 'update_from_git':
response = self.client.post(
url,
{
'_selected_action': '1',
'action': action,
}
)
self.assertRedirects(response, url)
| gpl-3.0 |
NoyaInRain/tornado | maint/vm/windows/bootstrap.py | 8 | 3262 | #!/usr/bin/env python
r"""Installs files needed for tornado testing on windows.
These instructions are compatible with the VMs provided by http://modern.ie.
The bootstrapping script works on the WinXP/IE6 and Win8/IE10 configurations,
although tornado's tests do not pass on XP.
1) Install virtualbox guest additions (from the device menu in virtualbox)
2) Set up a shared folder to the root of your tornado repo. It must be a
read-write mount to use tox, although the tests can be run directly
in a read-only mount. This will probably assign drive letter E:.
3) Install Python 2.7 from python.org.
4) Run this script by double-clicking it, or running
"c:\python27\python.exe bootstrap.py" in a shell.
To run the tests by hand, cd to e:\ and run
c:\python27\python.exe -m tornado.test.runtests
To run the tests with tox, cd to e:\maint\vm\windows and run
c:\python27\scripts\tox
To run under cygwin (which must be installed separately), run
cd /cygdrive/e; python -m tornado.test.runtests
"""
import os
import subprocess
import sys
import urllib
TMPDIR = r'c:\tornado_bootstrap'
PYTHON_VERSIONS = [
(r'c:\python27\python.exe', 'http://www.python.org/ftp/python/2.7.3/python-2.7.3.msi'),
(r'c:\python36\python.exe', 'http://www.python.org/ftp/python/3.6.0/python-3.6.0.msi'),
]
SCRIPTS_DIR = r'c:\python27\scripts'
EASY_INSTALL = os.path.join(SCRIPTS_DIR, 'easy_install.exe')
PY_PACKAGES = ['tox', 'virtualenv', 'pip']
def download_to_cache(url, local_name=None):
if local_name is None:
local_name = url.split('/')[-1]
filename = os.path.join(TMPDIR, local_name)
if not os.path.exists(filename):
data = urllib.urlopen(url).read()
with open(filename, 'wb') as f:
f.write(data)
return filename
def main():
if not os.path.exists(TMPDIR):
os.mkdir(TMPDIR)
os.chdir(TMPDIR)
for exe, url in PYTHON_VERSIONS:
if os.path.exists(exe):
print("%s already exists, skipping" % exe)
continue
print("Installing %s" % url)
filename = download_to_cache(url)
# http://blog.jaraco.com/2012/01/how-i-install-python-on-windows.html
subprocess.check_call(['msiexec', '/i', filename,
'ALLUSERS=1', '/passive'])
if not os.path.exists(EASY_INSTALL):
filename = download_to_cache('http://python-distribute.org/distribute_setup.py')
subprocess.check_call([sys.executable, filename])
subprocess.check_call([EASY_INSTALL] + PY_PACKAGES)
# cygwin's setup.exe doesn't like being run from a script (looks
# UAC-related). If it did, something like this might install it.
# (install python, python-setuptools, python3, and easy_install
# unittest2 (cygwin's python 2 is 2.6))
#filename = download_to_cache('http://cygwin.com/setup.exe')
#CYGTMPDIR = os.path.join(TMPDIR, 'cygwin')
#if not os.path.exists(CYGTMPDIR):
# os.mkdir(CYGTMPDIR)
## http://www.jbmurphy.com/2011/06/16/powershell-script-to-install-cygwin/
#CYGWIN_ARGS = [filename, '-q', '-l', CYGTMPDIR,
# '-s', 'http://mirror.nyi.net/cygwin/', '-R', r'c:\cygwin']
#subprocess.check_call(CYGWIN_ARGS)
if __name__ == '__main__':
main()
| apache-2.0 |
Teagan42/home-assistant | homeassistant/util/logging.py | 5 | 7075 | """Logging utilities."""
import asyncio
from asyncio.events import AbstractEventLoop
from functools import partial, wraps
import inspect
import logging
import threading
import traceback
from typing import Any, Callable, Coroutine, Optional
class HideSensitiveDataFilter(logging.Filter):
"""Filter API password calls."""
def __init__(self, text: str) -> None:
"""Initialize sensitive data filter."""
super().__init__()
self.text = text
def filter(self, record: logging.LogRecord) -> bool:
"""Hide sensitive data in messages."""
record.msg = record.msg.replace(self.text, "*******")
return True
# pylint: disable=invalid-name
class AsyncHandler:
"""Logging handler wrapper to add an async layer."""
def __init__(self, loop: AbstractEventLoop, handler: logging.Handler) -> None:
"""Initialize async logging handler wrapper."""
self.handler = handler
self.loop = loop
self._queue: asyncio.Queue = asyncio.Queue(loop=loop)
self._thread = threading.Thread(target=self._process)
# Delegate from handler
self.setLevel = handler.setLevel
self.setFormatter = handler.setFormatter
self.addFilter = handler.addFilter
self.removeFilter = handler.removeFilter
self.filter = handler.filter
self.flush = handler.flush
self.handle = handler.handle
self.handleError = handler.handleError
self.format = handler.format
self._thread.start()
def close(self) -> None:
"""Wrap close to handler."""
self.emit(None)
async def async_close(self, blocking: bool = False) -> None:
"""Close the handler.
When blocking=True, will wait till closed.
"""
await self._queue.put(None)
if blocking:
while self._thread.is_alive():
await asyncio.sleep(0)
def emit(self, record: Optional[logging.LogRecord]) -> None:
"""Process a record."""
ident = self.loop.__dict__.get("_thread_ident")
# inside eventloop
if ident is not None and ident == threading.get_ident():
self._queue.put_nowait(record)
# from a thread/executor
else:
self.loop.call_soon_threadsafe(self._queue.put_nowait, record)
def __repr__(self) -> str:
"""Return the string names."""
return str(self.handler)
def _process(self) -> None:
"""Process log in a thread."""
while True:
record = asyncio.run_coroutine_threadsafe(
self._queue.get(), self.loop
).result()
if record is None:
self.handler.close()
return
self.handler.emit(record)
def createLock(self) -> None:
"""Ignore lock stuff."""
pass
def acquire(self) -> None:
"""Ignore lock stuff."""
pass
def release(self) -> None:
"""Ignore lock stuff."""
pass
@property
def level(self) -> int:
"""Wrap property level to handler."""
return self.handler.level
@property
def formatter(self) -> Optional[logging.Formatter]:
"""Wrap property formatter to handler."""
return self.handler.formatter
@property
def name(self) -> str:
"""Wrap property set_name to handler."""
return self.handler.get_name() # type: ignore
@name.setter
def name(self, name: str) -> None:
"""Wrap property get_name to handler."""
self.handler.set_name(name) # type: ignore
def catch_log_exception(
func: Callable[..., Any], format_err: Callable[..., Any], *args: Any
) -> Callable[[], None]:
"""Decorate a callback to catch and log exceptions."""
def log_exception(*args: Any) -> None:
module = inspect.getmodule(inspect.stack()[1][0])
if module is not None:
module_name = module.__name__
else:
# If Python is unable to access the sources files, the call stack frame
# will be missing information, so let's guard.
# https://github.com/home-assistant/home-assistant/issues/24982
module_name = __name__
# Do not print the wrapper in the traceback
frames = len(inspect.trace()) - 1
exc_msg = traceback.format_exc(-frames)
friendly_msg = format_err(*args)
logging.getLogger(module_name).error("%s\n%s", friendly_msg, exc_msg)
# Check for partials to properly determine if coroutine function
check_func = func
while isinstance(check_func, partial):
check_func = check_func.func
wrapper_func = None
if asyncio.iscoroutinefunction(check_func):
@wraps(func)
async def async_wrapper(*args: Any) -> None:
"""Catch and log exception."""
try:
await func(*args)
except Exception: # pylint: disable=broad-except
log_exception(*args)
wrapper_func = async_wrapper
else:
@wraps(func)
def wrapper(*args: Any) -> None:
"""Catch and log exception."""
try:
func(*args)
except Exception: # pylint: disable=broad-except
log_exception(*args)
wrapper_func = wrapper
return wrapper_func
def catch_log_coro_exception(
target: Coroutine[Any, Any, Any], format_err: Callable[..., Any], *args: Any
) -> Coroutine[Any, Any, Any]:
"""Decorate a coroutine to catch and log exceptions."""
async def coro_wrapper(*args: Any) -> Any:
"""Catch and log exception."""
try:
return await target
except Exception: # pylint: disable=broad-except
module = inspect.getmodule(inspect.stack()[1][0])
if module is not None:
module_name = module.__name__
else:
# If Python is unable to access the sources files, the frame
# will be missing information, so let's guard.
# https://github.com/home-assistant/home-assistant/issues/24982
module_name = __name__
# Do not print the wrapper in the traceback
frames = len(inspect.trace()) - 1
exc_msg = traceback.format_exc(-frames)
friendly_msg = format_err(*args)
logging.getLogger(module_name).error("%s\n%s", friendly_msg, exc_msg)
return None
return coro_wrapper()
def async_create_catching_coro(target: Coroutine) -> Coroutine:
"""Wrap a coroutine to catch and log exceptions.
The exception will be logged together with a stacktrace of where the
coroutine was wrapped.
target: target coroutine.
"""
trace = traceback.extract_stack()
wrapped_target = catch_log_coro_exception(
target,
lambda *args: "Exception in {} called from\n {}".format(
target.__name__, # type: ignore
"".join(traceback.format_list(trace[:-1])),
),
)
return wrapped_target
| apache-2.0 |
StevenAston/donkbot | plugins/bf.py | 11 | 2474 | '''brainfuck interpreter adapted from (public domain) code at
http://brainfuck.sourceforge.net/brain.py'''
import re
import random
from util import hook
BUFFER_SIZE = 5000
MAX_STEPS = 1000000
@hook.command
def bf(inp):
".bf <prog> -- executes brainfuck program <prog>"""
program = re.sub('[^][<>+-.,]', '', inp)
# create a dict of brackets pairs, for speed later on
brackets = {}
open_brackets = []
for pos in range(len(program)):
if program[pos] == '[':
open_brackets.append(pos)
elif program[pos] == ']':
if len(open_brackets) > 0:
brackets[pos] = open_brackets[-1]
brackets[open_brackets[-1]] = pos
open_brackets.pop()
else:
return 'unbalanced brackets'
if len(open_brackets) != 0:
return 'unbalanced brackets'
# now we can start interpreting
ip = 0 # instruction pointer
mp = 0 # memory pointer
steps = 0
memory = [0] * BUFFER_SIZE # initial memory area
rightmost = 0
output = "" # we'll save the output here
# the main program loop:
while ip < len(program):
c = program[ip]
if c == '+':
memory[mp] = (memory[mp] + 1) % 256
elif c == '-':
memory[mp] = (memory[mp] - 1) % 256
elif c == '>':
mp += 1
if mp > rightmost:
rightmost = mp
if mp >= len(memory):
# no restriction on memory growth!
memory.extend([0] * BUFFER_SIZE)
elif c == '<':
mp = mp - 1 % len(memory)
elif c == '.':
output += chr(memory[mp])
if len(output) > 500:
break
elif c == ',':
memory[mp] = random.randint(1, 255)
elif c == '[':
if memory[mp] == 0:
ip = brackets[ip]
elif c == ']':
if memory[mp] != 0:
ip = brackets[ip]
ip += 1
steps += 1
if steps > MAX_STEPS:
if output == '':
output = '(no output)'
output += '[exceeded %d iterations]' % MAX_STEPS
break
stripped_output = re.sub(r'[\x00-\x1F]', '', output)
if stripped_output == '':
if output != '':
return 'no printable output'
return 'no output'
return stripped_output[:430].decode('utf8', 'ignore')
| unlicense |
shifter/rekall | rekall-core/rekall/plugins/darwin/WKdm.py | 8 | 11479 | # Rekall Memory Forensics
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""A WKdm decompressor.
This code is very closely based on the C implementation by
Paul Wilson -- [email protected]
and
Scott F. Kaplan -- [email protected]
from September 1997.
"""
__author__ = "Andreas Moser <[email protected]>"
import itertools
import math
import struct
import time
DICTIONARY_SIZE = 16
TAGS_AREA_OFFSET = 4
TAGS_AREA_SIZE = 64
NUM_LOW_BITS = 10
LOW_BITS_MASK = 0x3FF
ZERO_TAG = 0x0
PARTIAL_TAG = 0x1
MISS_TAG = 0x2
EXACT_TAG = 0x3
# Set up the dictionary before performing compression or
# decompression. Each element is loaded with some value, the
# high-bits version of that value, and a next pointer.
# these are the constants for the hash function lookup table.
# Only zero maps to zero. The rest of the tabale is the result
# of appending 17 randomizations of 1 to 14. Generated by a Scheme
# script in hash.scm.
HASH_LOOKUP_TABLE_CONTENTS = [
0, 13, 2, 14, 4, 3, 7, 5, 1, 9, 12, 6, 11, 10, 8, 15,
2, 3, 7, 5, 1, 15, 4, 9, 6, 12, 11, 8, 13, 14, 10, 3,
2, 12, 4, 13, 15, 7, 14, 8, 5, 6, 9, 10, 11, 1, 2, 10,
15, 8, 5, 11, 1, 9, 13, 6, 4, 14, 12, 3, 7, 4, 2, 10,
9, 7, 8, 3, 1, 11, 13, 5, 6, 12, 15, 14, 10, 12, 2, 8,
7, 9, 1, 11, 5, 14, 15, 6, 13, 4, 3, 3, 1, 12, 5, 2,
13, 4, 15, 6, 9, 11, 7, 14, 10, 8, 9, 5, 6, 15, 10, 11,
13, 4, 8, 1, 12, 2, 7, 14, 3, 7, 8, 10, 13, 9, 4, 5,
12, 2, 1, 15, 6, 14, 11, 3, 2, 9, 6, 7, 4, 15, 5, 14,
8, 10, 12, 3, 1, 11, 13, 11, 10, 3, 14, 2, 9, 6, 15, 7,
12, 1, 8, 5, 4, 13, 15, 3, 6, 9, 2, 1, 4, 14, 12, 11,
10, 13, 8, 5, 7, 8, 3, 9, 7, 6, 14, 10, 4, 13, 11, 1,
5, 15, 2, 12, 12, 13, 3, 5, 8, 11, 9, 7, 1, 10, 6, 2,
14, 15, 4, 9, 8, 2, 10, 1, 13, 6, 11, 5, 3, 7, 12, 14,
4, 15, 1, 13, 15, 12, 5, 4, 14, 11, 6, 2, 10, 3, 8, 7,
9, 6, 8, 3, 1, 5, 4, 15, 9, 7, 2, 13, 10, 12, 11, 14
]
# /***********************************************************************
# * THE PACKING ROUTINES
# */
def WK_pack_2bits(source_buf):
res = []
it = itertools.izip(*([iter(source_buf)] * 16))
for (in1, in2, in3, in4, in5, in6, in7, in8,
in9, in10, in11, in12, in13, in14, in15, in16) in it:
res.extend([
in1 + (in5 << 2) + (in9 << 4) + (in13 << 6),
in2 + (in6 << 2) + (in10 << 4) + (in14 << 6),
in3 + (in7 << 2) + (in11 << 4) + (in15 << 6),
in4 + (in8 << 2) + (in12 << 4) + (in16 << 6)
])
return res
# /* WK_pack_4bits()
# * Pack an even number of words holding 4-bit patterns in the low bits
# * of each byte into half as many words.
# * note: pad out the input with zeroes to an even number of words!
# */
def WK_pack_4bits(source_buf):
res = []
it = itertools.izip(*([iter(source_buf)] * 8))
for in1, in2, in3, in4, in5, in6, in7, in8 in it:
res.extend([
in1 + (in5 << 4),
in2 + (in6 << 4),
in3 + (in7 << 4),
in4 + (in8 << 4)
])
return res
# /* pack_3_tenbits()
# * Pack a sequence of three ten bit items into one word.
# * note: pad out the input with zeroes to an even number of words!
# */
def WK_pack_3_tenbits(source_buf):
packed_input = []
for in1, in2, in3 in itertools.izip(*([iter(source_buf)] * 3)):
packed_input.append(in1 | (in2 << 10) | (in3 << 20))
return packed_input
# /***************************************************************************
# * THE UNPACKING ROUTINES should GO HERE
# */
# /* WK_unpack_2bits takes any number of words containing 16 two-bit values
# * and unpacks them into four times as many words containg those
# * two bit values as bytes (with the low two bits of each byte holding
# * the actual value.
# */
and3_sh0 = []
and3_sh2 = []
and3_sh4 = []
and3_sh6 = []
and_f = []
sh4_and_f = []
for i in xrange(256):
and3_sh0.append((i >> 0) & 3)
and3_sh2.append((i >> 2) & 3)
and3_sh4.append((i >> 4) & 3)
and3_sh6.append((i >> 6) & 3)
and_f.append(i & 0xf)
sh4_and_f.append((i >> 4) & 0xf)
def WK_unpack_2bits(input_buf):
output = []
for in1, in2, in3, in4 in itertools.izip(*([iter(input_buf)] * 4)):
output.extend([
and3_sh0[in1], and3_sh0[in2], and3_sh0[in3], and3_sh0[in4],
and3_sh2[in1], and3_sh2[in2], and3_sh2[in3], and3_sh2[in4],
and3_sh4[in1], and3_sh4[in2], and3_sh4[in3], and3_sh4[in4],
and3_sh6[in1], and3_sh6[in2], and3_sh6[in3], and3_sh6[in4]
])
return output
# /* unpack four bits consumes any number of words (between input_buf
# * and input_end) holding 8 4-bit values per word, and unpacks them
# * into twice as many words, with each value in a separate byte.
# * (The four-bit values occupy the low halves of the bytes in the
# * result).
# */
def WK_unpack_4bits(input_buf):
output = []
for in1, in2, in3, in4 in itertools.izip(*([iter(input_buf)] * 4)):
output.extend([
and_f[in1],
and_f[in2],
and_f[in3],
and_f[in4],
sh4_and_f[in1],
sh4_and_f[in2],
sh4_and_f[in3],
sh4_and_f[in4]])
return output
# /* unpack_3_tenbits unpacks three 10-bit items from (the low 30 bits of)
# * a 32-bit word
# */
def WK_unpack_3_tenbits(input_buf):
output = []
for in1, in2, in3, in4 in itertools.izip(*([iter(input_buf)] * 4)):
output.extend([
in1 & 0x3FF, (in1 >> 10) & 0x3FF, (in1 >> 20) & 0x3FF,
in2 & 0x3FF, (in2 >> 10) & 0x3FF, (in2 >> 20) & 0x3FF,
in3 & 0x3FF, (in3 >> 10) & 0x3FF, (in3 >> 20) & 0x3FF,
in4 & 0x3FF, (in4 >> 10) & 0x3FF, (in4 >> 20) & 0x3FF
])
return output
def WKdm_compress(src_buf):
dictionary = []
for _ in xrange(DICTIONARY_SIZE):
dictionary.append((1, 0))
hashLookupTable = HASH_LOOKUP_TABLE_CONTENTS
tempTagsArray = []
tempQPosArray = []
# Holds words.
tempLowBitsArray = []
# Holds words.
full_patterns = []
input_words = struct.unpack("I" * (len(src_buf) / 4), src_buf)
for input_word in input_words:
# Equivalent to >> 10.
input_high_bits = input_word / 1024
dict_location = hashLookupTable[input_high_bits % 256]
dict_word, dict_high = dictionary[dict_location]
if (input_word == dict_word):
tempTagsArray.append(EXACT_TAG)
tempQPosArray.append(dict_location)
elif (input_word == 0):
tempTagsArray.append(ZERO_TAG)
else:
if input_high_bits == dict_high:
tempTagsArray.append(PARTIAL_TAG)
tempQPosArray.append(dict_location)
tempLowBitsArray.append((input_word % 1024))
else:
tempTagsArray.append(MISS_TAG)
full_patterns.append(input_word)
dictionary[dict_location] = (input_word, input_high_bits)
qpos_start = len(full_patterns) + TAGS_AREA_OFFSET + (len(src_buf) / 64)
packed_tags = WK_pack_2bits(tempTagsArray)
num_bytes_to_pack = len(tempQPosArray)
num_packed_words = math.ceil(num_bytes_to_pack / 8.0)
num_source_bytes = int(num_packed_words * 8)
tempQPosArray += [0] * (num_source_bytes - len(tempQPosArray))
packed_qp = WK_pack_4bits(tempQPosArray)
low_start = qpos_start + int(num_packed_words)
num_packed_words = len(tempLowBitsArray) / 3
# Align to 3 tenbits.
while len(tempLowBitsArray) % 3:
tempLowBitsArray.append(0)
packed_low = WK_pack_3_tenbits(tempLowBitsArray)
low_end = low_start + len(packed_low)
header = [0, qpos_start, low_start, low_end]
return struct.pack(
"IIII" + # header
"B" * len(packed_tags) +
"I" * len(full_patterns) +
"B" * len(packed_qp) +
"I" * len(packed_low),
* (header + packed_tags + full_patterns + packed_qp + packed_low))
def WKdm_decompress_apple(src_buf):
qpos_start, low_start, low_end = struct.unpack("III", src_buf[:12])
return _WKdm_decompress(src_buf, qpos_start, low_start, low_end, 12)
def WKdm_decompress(src_buf):
qpos_start, low_start, low_end = struct.unpack("III", src_buf[4:16])
return _WKdm_decompress(src_buf, qpos_start, low_start, low_end, 16)
def _WKdm_decompress(src_buf, qpos_start, low_start, low_end, header_size):
if max(qpos_start, low_start, low_end) > len(src_buf):
return None
if qpos_start > low_start or low_start > low_end:
return None
dictionary = [1] * DICTIONARY_SIZE
hashLookupTable = HASH_LOOKUP_TABLE_CONTENTS
tags_str = src_buf[header_size : header_size + 256]
tags_array = WK_unpack_2bits(struct.unpack("B" * len(tags_str), tags_str))
qpos_str = src_buf[qpos_start * 4:low_start * 4]
tempQPosArray = WK_unpack_4bits(
struct.unpack("B" * len(qpos_str), qpos_str))
lowbits_str = src_buf[low_start * 4:low_end * 4]
num_lowbits_bytes = len(lowbits_str)
num_lowbits_words = num_lowbits_bytes / 4
num_packed_lowbits = num_lowbits_words * 3
rem = len(lowbits_str) % 16
if rem:
lowbits_str += "\x00" * (16 - rem)
packed_lowbits = struct.unpack("I" * (len(lowbits_str) / 4), lowbits_str)
tempLowBitsArray = WK_unpack_3_tenbits(packed_lowbits)[:num_packed_lowbits]
patterns_str = src_buf[256 + header_size:qpos_start * 4]
full_patterns = struct.unpack("I" * (len(patterns_str) / 4), patterns_str)
p_tempQPosArray = iter(tempQPosArray)
p_tempLowBitsArray = iter(tempLowBitsArray)
p_full_patterns = iter(full_patterns)
output = []
for tag in tags_array:
if tag == ZERO_TAG:
output.append(0)
elif tag == EXACT_TAG:
output.append(dictionary[p_tempQPosArray.next()])
elif tag == PARTIAL_TAG:
dict_idx = p_tempQPosArray.next()
temp = ((dictionary[dict_idx] / 1024) * 1024)
temp += p_tempLowBitsArray.next()
dictionary[dict_idx] = temp
output.append(temp)
elif tag == MISS_TAG:
missed_word = p_full_patterns.next()
dict_idx = hashLookupTable[(missed_word / 1024) % 256]
dictionary[dict_idx] = missed_word
output.append(missed_word)
for p in [p_tempQPosArray, p_tempLowBitsArray, p_full_patterns]:
for leftover in p:
if leftover != 0:
# Something went wrong, we have leftover data to decompress.
return None
return struct.pack("I" * len(output), *output)
| gpl-2.0 |
ColinIanKing/autotest | client/partition_unittest.py | 6 | 6045 | #!/usr/bin/python
"""Tests for autotest.client.partition."""
__author__ = '[email protected] (Gregory P. Smith)'
import os, sys, unittest
from cStringIO import StringIO
try:
import autotest.common as common
except ImportError:
import common
from autotest.client.shared.test_utils import mock
from autotest.client import partition
class FsOptions_common(object):
def test_constructor(self):
self.assertRaises(ValueError, partition.FsOptions, '', '', '', '')
self.assertRaises(ValueError, partition.FsOptions, 'ext2', '', '', '')
obj = partition.FsOptions('ext2', 'ext2_vanilla', '', '')
obj = partition.FsOptions(fstype='ext2', fs_tag='ext2_vanilla')
obj = partition.FsOptions('fs', 'shortie', 'mkfs opts', 'mount opts')
self.assertEqual('fs', obj.fstype)
self.assertEqual('shortie', obj.fs_tag)
self.assertEqual('mkfs opts', obj.mkfs_flags)
self.assertEqual('mount opts', obj.mount_options)
def test__str__(self):
str_obj = str(partition.FsOptions('abc', 'def', 'ghi', 'jkl'))
self.assert_('FsOptions' in str_obj)
self.assert_('abc' in str_obj)
self.assert_('def' in str_obj)
self.assert_('ghi' in str_obj)
self.assert_('jkl' in str_obj)
# Test data used in GetPartitionTest below.
SAMPLE_SWAPS = """
Filename Type Size Used Priority
/dev/hdc2 partition 9863868 0 -1
"""
SAMPLE_PARTITIONS_HDC_ONLY = """
major minor #blocks name
8 16 390711384 hdc
8 18 530113 hdc2
8 19 390178687 hdc3
"""
# yes I manually added a hda1 line to this output to test parsing when the Boot
# flag exists.
SAMPLE_FDISK = "/sbin/fdisk -l -u '/dev/hdc'"
SAMPLE_FDISK_OUTPUT = """
Disk /dev/hdc: 400.0 GB, 400088457216 bytes
255 heads, 63 sectors/track, 48641 cylinders, total 781422768 sectors
Units = sectors of 1 * 512 = 512 bytes
Device Boot Start End Blocks Id System
/dev/hdc2 63 1060289 530113+ 82 Linux swap / Solaris
/dev/hdc3 1060290 781417664 390178687+ 83 Linux
/dev/hdc4 * faketest FAKETEST 232323+ 83 Linux
"""
class get_partition_list_common(object):
def setUp(self):
self.god = mock.mock_god()
self.god.stub_function(os, 'popen')
def tearDown(self):
self.god.unstub_all()
def test_is_linux_fs_type(self):
for unused in xrange(4):
os.popen.expect_call(SAMPLE_FDISK).and_return(
StringIO(SAMPLE_FDISK_OUTPUT))
self.assertFalse(partition.is_linux_fs_type('/dev/hdc1'))
self.assertFalse(partition.is_linux_fs_type('/dev/hdc2'))
self.assertTrue(partition.is_linux_fs_type('/dev/hdc3'))
self.assertTrue(partition.is_linux_fs_type('/dev/hdc4'))
self.god.check_playback()
def test_get_partition_list(self):
def fake_open(filename):
"""Fake open() to pass to get_partition_list as __open."""
if filename == '/proc/swaps':
return StringIO(SAMPLE_SWAPS)
elif filename == '/proc/partitions':
return StringIO(SAMPLE_PARTITIONS_HDC_ONLY)
else:
self.assertFalse("Unexpected open() call: %s" % filename)
job = 'FakeJob'
# Test a filter func that denies all.
parts = partition.get_partition_list(job, filter_func=lambda x: False,
open_func=fake_open)
self.assertEqual([], parts)
self.god.check_playback()
# Test normal operation.
self.god.stub_function(partition, 'partition')
partition.partition.expect_call(job, '/dev/hdc3').and_return('3')
parts = partition.get_partition_list(job, open_func=fake_open)
self.assertEqual(['3'], parts)
self.god.check_playback()
# Test exclude_swap can be disabled.
partition.partition.expect_call(job, '/dev/hdc2').and_return('2')
partition.partition.expect_call(job, '/dev/hdc3').and_return('3')
parts = partition.get_partition_list(job, exclude_swap=False,
open_func=fake_open)
self.assertEqual(['2', '3'], parts)
self.god.check_playback()
# Test that min_blocks works.
partition.partition.expect_call(job, '/dev/hdc3').and_return('3')
parts = partition.get_partition_list(job, min_blocks=600000,
exclude_swap=False,
open_func=fake_open)
self.assertEqual(['3'], parts)
self.god.check_playback()
# we want to run the unit test suite once strictly on the non site specific
# version of partition (ie on base_partition.py) and once on the version
# that would result after the site specific overrides take place in order
# to check that the overrides to not break expected functionality of the
# non site specific code
class FSOptions_base_test(FsOptions_common, unittest.TestCase):
def setUp(self):
sys.modules['autotest.client.site_partition'] = None
reload(partition)
class get_partition_list_base_test(get_partition_list_common, unittest.TestCase):
def setUp(self):
sys.modules['autotest.client.site_partition'] = None
reload(partition)
get_partition_list_common.setUp(self)
class FSOptions_test(FsOptions_common, unittest.TestCase):
def setUp(self):
if 'autotest.client.site_partition' in sys.modules:
del sys.modules['autotest.client.site_partition']
reload(partition)
class get_partition_list_test(get_partition_list_common, unittest.TestCase):
def setUp(self):
if 'autotest.client.site_partition' in sys.modules:
del sys.modules['autotest.client.site_partition']
reload(partition)
get_partition_list_common.setUp(self)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
bop/hybrid | lib/python2.6/site-packages/setuptools/command/rotate.py | 285 | 2062 | import distutils, os
from setuptools import Command
from setuptools.compat import basestring
from distutils.util import convert_path
from distutils import log
from distutils.errors import *
class rotate(Command):
"""Delete older distributions"""
description = "delete older distributions, keeping N newest files"
user_options = [
('match=', 'm', "patterns to match (required)"),
('dist-dir=', 'd', "directory where the distributions are"),
('keep=', 'k', "number of matching distributions to keep"),
]
boolean_options = []
def initialize_options(self):
self.match = None
self.dist_dir = None
self.keep = None
def finalize_options(self):
if self.match is None:
raise DistutilsOptionError(
"Must specify one or more (comma-separated) match patterns "
"(e.g. '.zip' or '.egg')"
)
if self.keep is None:
raise DistutilsOptionError("Must specify number of files to keep")
try:
self.keep = int(self.keep)
except ValueError:
raise DistutilsOptionError("--keep must be an integer")
if isinstance(self.match, basestring):
self.match = [
convert_path(p.strip()) for p in self.match.split(',')
]
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
def run(self):
self.run_command("egg_info")
from glob import glob
for pattern in self.match:
pattern = self.distribution.get_name()+'*'+pattern
files = glob(os.path.join(self.dist_dir,pattern))
files = [(os.path.getmtime(f),f) for f in files]
files.sort()
files.reverse()
log.info("%d file(s) matching %s", len(files), pattern)
files = files[self.keep:]
for (t,f) in files:
log.info("Deleting %s", f)
if not self.dry_run:
os.unlink(f)
| gpl-2.0 |
Kubuxu/cjdns | node_build/dependencies/libuv/build/gyp/test/mac/gyptest-ldflags.py | 100 | 2078 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that filenames passed to various linker flags are converted into
build-directory relative paths correctly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
# The xcode-ninja generator handles gypfiles which are not at the
# project root incorrectly.
# cf. https://code.google.com/p/gyp/issues/detail?id=460
if test.format == 'xcode-ninja':
test.skip_test()
CHDIR = 'ldflags'
test.run_gyp('subdirectory/test.gyp', chdir=CHDIR)
test.build('subdirectory/test.gyp', test.ALL, chdir=CHDIR)
test.pass_test()
# These flags from `man ld` couldl show up in OTHER_LDFLAGS and need path
# translation.
#
# Done:
# -exported_symbols_list filename
# -unexported_symbols_list file
# -reexported_symbols_list file
# -sectcreate segname sectname file
#
# Will be done on demand:
# -weak_library path_to_library
# -reexport_library path_to_library
# -lazy_library path_to_library
# -upward_library path_to_library
# -syslibroot rootdir
# -framework name[,suffix]
# -weak_framework name[,suffix]
# -reexport_framework name[,suffix]
# -lazy_framework name[,suffix]
# -upward_framework name[,suffix]
# -force_load path_to_archive
# -filelist file[,dirname]
# -dtrace file
# -order_file file # should use ORDER_FILE
# -exported_symbols_order file
# -bundle_loader executable # should use BUNDLE_LOADER
# -alias_list filename
# -seg_addr_table filename
# -dylib_file install_name:file_name
# -interposable_list filename
# -object_path_lto filename
#
#
# obsolete:
# -sectorder segname sectname orderfile
# -seg_addr_table_filename path
#
#
# ??:
# -map map_file_path
# -sub_library library_name
# -sub_umbrella framework_name
| gpl-3.0 |
vasyvas/deepdive | examples/chunking/udf/ext_features.py | 15 | 1120 | #! /usr/bin/env python
import fileinput
import json
import itertools
import sys
def tostr(s):
# In TSV extractor, '\N' is NULL in psql, 'NULL' is NULL in mysql
return '' if s is None or s in ['\N', 'NULL'] else str(s)
# for each word
for row in sys.stdin:
# obj = json.loads(row)
word_id, word1, pos1, word2, pos2 = row.rstrip().split('\t')
features = set()
# sys.stderr.write(str(obj))
# features
w1_word = 'word=' + tostr(word1)
w1_pos = 'pos=' + tostr(pos1)
# if 'w2.word' in obj.keys():
if word2 != 'NULL' and word2 != '\N':
# w2_word = 'prev_word=' + tostr(word2)
w2_pos = 'prev_pos=' + tostr(pos2)
else:
# w2_word = 'prev_word='
w2_pos = 'prev_pos='
#w3_word = 'next_word=' + tostr(obj["words_raw.w3.word"])
#w3_pos = 'next_pos=' + tostr(obj["words_raw.w3.pos"])
features.add(w1_word)
features.add(w1_pos)
features.add(w2_pos)
for f in features:
print '\t'.join([word_id, f, '\N'])
| apache-2.0 |
UpYou/relay | my_gnuradio/gr/benchmark_filters.py | 17 | 2672 | #!/usr/bin/env python
#
# Copyright 2005,2006,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import time
import random
from optparse import OptionParser
from gnuradio import gr
from gnuradio.eng_option import eng_option
def make_random_complex_tuple(L):
result = []
for x in range(L):
result.append(complex(random.uniform(-1000,1000),
random.uniform(-1000,1000)))
return tuple(result)
def benchmark(name, creator, dec, ntaps, total_test_size, block_size):
block_size = 32768
tb = gr.top_block()
taps = make_random_complex_tuple(ntaps)
src = gr.vector_source_c(make_random_complex_tuple(block_size), True)
head = gr.head(gr.sizeof_gr_complex, int(total_test_size))
op = creator(dec, taps)
dst = gr.null_sink(gr.sizeof_gr_complex)
tb.connect(src, head, op, dst)
start = time.time()
tb.run()
stop = time.time()
delta = stop - start
print "%16s: taps: %4d input: %4g, time: %6.3f taps/sec: %10.4g" % (
name, ntaps, total_test_size, delta, ntaps*total_test_size/delta)
def main():
parser = OptionParser(option_class=eng_option)
parser.add_option("-n", "--ntaps", type="int", default=256)
parser.add_option("-t", "--total-input-size", type="eng_float", default=40e6)
parser.add_option("-b", "--block-size", type="intx", default=50000)
parser.add_option("-d", "--decimation", type="int", default=1)
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
ntaps = options.ntaps
total_input_size = options.total_input_size
block_size = options.block_size
dec = options.decimation
benchmark("gr.fir_filter_ccc", gr.fir_filter_ccc,
dec, ntaps, total_input_size, block_size)
benchmark("gr.fft_filter_ccc", gr.fft_filter_ccc,
dec, ntaps, total_input_size, block_size)
if __name__ == '__main__':
main()
| gpl-3.0 |
titasakgm/dsimapcloud | js/tools/shrinksafe.py | 293 | 1498 | #!/usr/bin/env python
#
# Script to provide a wrapper around the ShrinkSafe "web service"
# <http://shrinksafe.dojotoolkit.org/>
#
#
# We use this script for two reasons:
#
# * This avoids having to install and configure Java and the standalone
# ShrinkSafe utility.
#
# * The current ShrinkSafe standalone utility was broken when we last
# used it.
#
import sys
import urllib
import urllib2
URL_SHRINK_SAFE = "http://shrinksafe.dojotoolkit.org/shrinksafe.php"
# This would normally be dynamically generated:
BOUNDARY_MARKER = "---------------------------72288400411964641492083565382"
if __name__ == "__main__":
## Grab the source code
try:
sourceFilename = sys.argv[1]
except:
print "Usage: %s (<source filename>|-)" % sys.argv[0]
raise SystemExit
if sourceFilename == "-":
sourceCode = sys.stdin.read()
sourceFilename = "stdin.js"
else:
sourceCode = open(sourceFilename).read()
## Create the request replicating posting of the form from the web page
request = urllib2.Request(url=URL_SHRINK_SAFE)
request.add_header("Content-Type",
"multipart/form-data; boundary=%s" % BOUNDARY_MARKER)
request.add_data("""
--%s
Content-Disposition: form-data; name="shrinkfile[]"; filename="%s"
Content-Type: application/x-javascript
%s
""" % (BOUNDARY_MARKER, sourceFilename, sourceCode))
## Deliver the result
print urllib2.urlopen(request).read(),
| gpl-3.0 |
pmisik/buildbot | master/buildbot/util/backoff.py | 2 | 2713 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import time
from twisted.internet import defer
from buildbot.util import asyncSleep
class BackoffTimeoutExceededError(Exception):
pass
class ExponentialBackoffEngine:
def __init__(self, start_seconds, multiplier, max_wait_seconds):
if start_seconds < 0:
raise ValueError('start_seconds cannot be negative')
if multiplier < 0:
raise ValueError('multiplier cannot be negative')
if max_wait_seconds < 0:
raise ValueError('max_wait_seconds cannot be negative')
self.start_seconds = start_seconds
self.multiplier = multiplier
self.max_wait_seconds = max_wait_seconds
self.on_success()
def on_success(self):
self.current_total_wait_seconds = 0
self.current_wait_seconds = self.start_seconds
def wait_on_failure(self):
raise NotImplementedError()
def calculate_wait_on_failure_seconds(self):
if self.current_total_wait_seconds >= self.max_wait_seconds:
raise BackoffTimeoutExceededError()
seconds = self.current_wait_seconds
self.current_wait_seconds *= self.multiplier
if self.current_total_wait_seconds + seconds < self.max_wait_seconds:
self.current_total_wait_seconds += seconds
else:
seconds = self.max_wait_seconds - self.current_total_wait_seconds
self.current_total_wait_seconds = self.max_wait_seconds
return seconds
class ExponentialBackoffEngineSync(ExponentialBackoffEngine):
def wait_on_failure(self):
seconds = self.calculate_wait_on_failure_seconds()
time.sleep(seconds)
class ExponentialBackoffEngineAsync(ExponentialBackoffEngine):
def __init__(self, reactor, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reactor = reactor
@defer.inlineCallbacks
def wait_on_failure(self):
seconds = self.calculate_wait_on_failure_seconds()
yield asyncSleep(seconds, reactor=self.reactor)
| gpl-2.0 |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/unittest/test/test_runner.py | 37 | 8609 | import unittest
from cStringIO import StringIO
import pickle
from unittest.test.support import (LoggingResult,
ResultWithNoStartTestRunStopTestRun)
class TestCleanUp(unittest.TestCase):
def testCleanUp(self):
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
self.assertEqual(test._cleanups, [])
cleanups = []
def cleanup1(*args, **kwargs):
cleanups.append((1, args, kwargs))
def cleanup2(*args, **kwargs):
cleanups.append((2, args, kwargs))
test.addCleanup(cleanup1, 1, 2, 3, four='hello', five='goodbye')
test.addCleanup(cleanup2)
self.assertEqual(test._cleanups,
[(cleanup1, (1, 2, 3), dict(four='hello', five='goodbye')),
(cleanup2, (), {})])
result = test.doCleanups()
self.assertTrue(result)
self.assertEqual(cleanups, [(2, (), {}), (1, (1, 2, 3),
dict(four='hello', five='goodbye'))])
def testCleanUpWithErrors(self):
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
class MockResult(object):
errors = []
def addError(self, test, exc_info):
self.errors.append((test, exc_info))
result = MockResult()
test = TestableTest('testNothing')
test._resultForDoCleanups = result
exc1 = Exception('foo')
exc2 = Exception('bar')
def cleanup1():
raise exc1
def cleanup2():
raise exc2
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
self.assertFalse(test.doCleanups())
(test1, (Type1, instance1, _)), (test2, (Type2, instance2, _)) = reversed(MockResult.errors)
self.assertEqual((test1, Type1, instance1), (test, Exception, exc1))
self.assertEqual((test2, Type2, instance2), (test, Exception, exc2))
def testCleanupInRun(self):
blowUp = False
ordering = []
class TestableTest(unittest.TestCase):
def setUp(self):
ordering.append('setUp')
if blowUp:
raise Exception('foo')
def testNothing(self):
ordering.append('test')
def tearDown(self):
ordering.append('tearDown')
test = TestableTest('testNothing')
def cleanup1():
ordering.append('cleanup1')
def cleanup2():
ordering.append('cleanup2')
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
def success(some_test):
self.assertEqual(some_test, test)
ordering.append('success')
result = unittest.TestResult()
result.addSuccess = success
test.run(result)
self.assertEqual(ordering, ['setUp', 'test', 'tearDown',
'cleanup2', 'cleanup1', 'success'])
blowUp = True
ordering = []
test = TestableTest('testNothing')
test.addCleanup(cleanup1)
test.run(result)
self.assertEqual(ordering, ['setUp', 'cleanup1'])
def testTestCaseDebugExecutesCleanups(self):
ordering = []
class TestableTest(unittest.TestCase):
def setUp(self):
ordering.append('setUp')
self.addCleanup(cleanup1)
def testNothing(self):
ordering.append('test')
def tearDown(self):
ordering.append('tearDown')
test = TestableTest('testNothing')
def cleanup1():
ordering.append('cleanup1')
test.addCleanup(cleanup2)
def cleanup2():
ordering.append('cleanup2')
test.debug()
self.assertEqual(ordering, ['setUp', 'test', 'tearDown', 'cleanup1', 'cleanup2'])
class Test_TextTestRunner(unittest.TestCase):
"""Tests for TextTestRunner."""
def test_init(self):
runner = unittest.TextTestRunner()
self.assertFalse(runner.failfast)
self.assertFalse(runner.buffer)
self.assertEqual(runner.verbosity, 1)
self.assertTrue(runner.descriptions)
self.assertEqual(runner.resultclass, unittest.TextTestResult)
def test_multiple_inheritance(self):
class AResult(unittest.TestResult):
def __init__(self, stream, descriptions, verbosity):
super(AResult, self).__init__(stream, descriptions, verbosity)
class ATextResult(unittest.TextTestResult, AResult):
pass
# This used to raise an exception due to TextTestResult not passing
# on arguments in its __init__ super call
ATextResult(None, None, 1)
def testBufferAndFailfast(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
result = unittest.TestResult()
runner = unittest.TextTestRunner(stream=StringIO(), failfast=True,
buffer=True)
# Use our result object
runner._makeResult = lambda: result
runner.run(Test('testFoo'))
self.assertTrue(result.failfast)
self.assertTrue(result.buffer)
def testRunnerRegistersResult(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
originalRegisterResult = unittest.runner.registerResult
def cleanup():
unittest.runner.registerResult = originalRegisterResult
self.addCleanup(cleanup)
result = unittest.TestResult()
runner = unittest.TextTestRunner(stream=StringIO())
# Use our result object
runner._makeResult = lambda: result
self.wasRegistered = 0
def fakeRegisterResult(thisResult):
self.wasRegistered += 1
self.assertEqual(thisResult, result)
unittest.runner.registerResult = fakeRegisterResult
runner.run(unittest.TestSuite())
self.assertEqual(self.wasRegistered, 1)
def test_works_with_result_without_startTestRun_stopTestRun(self):
class OldTextResult(ResultWithNoStartTestRunStopTestRun):
separator2 = ''
def printErrors(self):
pass
class Runner(unittest.TextTestRunner):
def __init__(self):
super(Runner, self).__init__(StringIO())
def _makeResult(self):
return OldTextResult()
runner = Runner()
runner.run(unittest.TestSuite())
def test_startTestRun_stopTestRun_called(self):
class LoggingTextResult(LoggingResult):
separator2 = ''
def printErrors(self):
pass
class LoggingRunner(unittest.TextTestRunner):
def __init__(self, events):
super(LoggingRunner, self).__init__(StringIO())
self._events = events
def _makeResult(self):
return LoggingTextResult(self._events)
events = []
runner = LoggingRunner(events)
runner.run(unittest.TestSuite())
expected = ['startTestRun', 'stopTestRun']
self.assertEqual(events, expected)
def test_pickle_unpickle(self):
# Issue #7197: a TextTestRunner should be (un)pickleable. This is
# required by test_multiprocessing under Windows (in verbose mode).
from StringIO import StringIO as PickleableIO
# cStringIO objects are not pickleable, but StringIO objects are.
stream = PickleableIO("foo")
runner = unittest.TextTestRunner(stream)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(runner, protocol=protocol)
obj = pickle.loads(s)
# StringIO objects never compare equal, a cheap test instead.
self.assertEqual(obj.stream.getvalue(), stream.getvalue())
def test_resultclass(self):
def MockResultClass(*args):
return args
STREAM = object()
DESCRIPTIONS = object()
VERBOSITY = object()
runner = unittest.TextTestRunner(STREAM, DESCRIPTIONS, VERBOSITY,
resultclass=MockResultClass)
self.assertEqual(runner.resultclass, MockResultClass)
expectedresult = (runner.stream, DESCRIPTIONS, VERBOSITY)
self.assertEqual(runner._makeResult(), expectedresult)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
Lingotek/filesystem-connector | python3/ltk/actions/status_action.py | 2 | 8811 | from ltk.actions.action import *
from tabulate import tabulate
class StatusAction(Action):
def __init__(self, path):
Action.__init__(self, path)
self.uploadWaitTime = 300
def get_status(self, **kwargs):
try:
doc_name = None
detailed = False
if 'detailed' in kwargs and kwargs['detailed']:
detailed = True
if 'doc_name' in kwargs:
doc_name = kwargs['doc_name']
if 'all' in kwargs and kwargs['all']:
self._get_all_status(detailed)
return
else:
doc_ids = self._get_doc_ids(doc_name)
if not doc_ids:
print("No documents to report")
return
for doc_id in doc_ids:
doc_id = self.get_latest_document_version(doc_id) or doc_id
self._get_status_of_doc(doc_id, detailed)
except requests.exceptions.ConnectionError:
logger.warning("Could not connect to Lingotek")
exit()
except ValueError:
logger.warning("Could not connect to Lingotek")
exit()
# Python 3
#except json.decoder.JSONDecodeError:
#logger.warning("Could not connect to Lingotek")
#exit()
# End Python 3
except Exception as e:
log_error(self.error_file_name, e)
logger.warning("Error on requesting status: "+str(e))
def _get_doc_ids(self, doc_name):
if doc_name is not None:
entry = self.doc_manager.get_doc_by_prop('name', doc_name)
try:
doc_ids = [entry['id']]
except TypeError:
raise exceptions.ResourceNotFound("Document name specified for status doesn't exist: {0}".format(doc_name))
else:
doc_ids = self.doc_manager.get_doc_ids()
return doc_ids
def _get_all_status(self, detailed):
response = self.api.list_documents(self.project_id)
if response.status_code == 204:
print("No documents to report")
return
elif response.status_code != 200:
if check_response(response):
raise_error(response.json(), "Failed to get status of documents", True)
else:
raise_error("", "Failed to get status of documents", True)
else:
for entry in response.json()['entities']:
#title = entry['entities'][0]['properties']['title']
#progress = entry['entities'][0]['properties']['progress']
#self._print_status(title, progress)
#if detailed:
# self._print_detailed_status(entry['properties']['id'], title)
self._get_status_of_doc(entry['properties']['id'], detailed)
def _get_status_of_doc(self, doc_id, detailed):
doc_id = self.get_latest_document_version(doc_id) or doc_id
response = self.api.document_status(doc_id)
if response.status_code != 200:
entry = self.doc_manager.get_doc_by_prop('id', doc_id)
if entry:
error_message = "Failed to get status of document "+entry['file_name']
else:
error_message = "Failed to get status of document "+str(doc_id)
if check_response(response):
raise_error(response.json(), error_message, True, doc_id)
else:
self._get_process(entry)
else:
title = response.json()['properties']['title']
progress = response.json()['properties']['progress']
statustext = response.json()['properties']['status'].upper()
self._print_status(title, doc_id, progress, statustext)
if detailed:
self._print_detailed_status(doc_id, title)
def _get_process(self, entry):
if 'process_id' not in entry:
error_message = "Check Lingotek TMS to see if \'"+entry['file_name']+"\' has been deleted or was not properly imported"
raise_error("", "Not Found: "+error_message, True, entry['id'])
return
process_id = entry['process_id']
if process_id == 'imported':#documents added with ltk import -t don't come with a process id, so they get set to 'imported'
error_message = "Check Lingotek TMS to see if \'"+entry['file_name']+"\' has been deleted"
raise_error("", "Not Found: "+error_message, True, entry['id'])
return
response = self.api.get_process(process_id)
if response.status_code == 404:
# The process doesn't exist for some reason
self._failed_entry(entry['id'], entry['name'])
else:
status = response.json()['properties']['status']
progress = response.json()['properties']['progress']
if status.lower() == 'in_progress':
# Process is currently in progress. Replaces need for upload wait time since now we can get the
# current document process progress.
print('Uploading document {0}: {1}% complete'.format(entry['name'], progress))
elif status.lower() == 'completed':
# Process is completed and the document was uploaded to TMS, but there was an error in getting the document status
# Seems to happen when the document is deleted from within TMS
print('Document {0} was imported, but could not be found within TMS. You may need to run ltk clean to update the local database'.format(entry['name']))
else:
# Process has a failed status
self._failed_entry(entry['id'], entry['name'])
def _failed_entry(self, doc_id, name):
error_message = "\'"+name+"\' failed to import properly"
raise_error("", "Not Found: "+error_message, True, doc_id)
self.doc_manager.remove_element(doc_id)
# Process has failed status/does not exist, so document info is
# deleted from the local database
def _print_status(self, title, doc_id, progress, statustext):
print ('{0} ({1}): {2}% ({3})'.format(title, doc_id, progress, statustext))
# print title + ': ' + str(progress) + '%'
# for each doc id, also call /document/id/translation and get % of each locale
def _print_detailed_status(self, doc_id, doc_name):
doc_id = self.get_latest_document_version(doc_id) or doc_id
response = self.api.document_translation_status(doc_id)
if response.status_code != 200:
raise_error(response.json(), 'Failed to get detailed status of document', True, doc_id, doc_name)
try:
# print(response.json())
if 'entities' in response.json():
for entry in response.json()['entities']:
curr_locale = entry['properties']['locale_code']
curr_progress = entry['properties']['percent_complete']
curr_statustext = entry['properties']['status']
# print ('\tlocale: {0} \t percent complete: {1}%'.format(curr_locale, curr_progress))
if 'entities' in entry:
for entity in entry['entities']:
if entity['rel'][0] == 'phases':
if 'entities' in entity:
table = []
for phase in entity['entities']:
phase_name = phase['properties']['name']
phase_order = phase['properties']['order']
phase_percent_complete = phase['properties']['percent_completed']
phase_status = phase['properties']['status']
table.append({"Phase": str(phase_order), "Name": phase_name, "Status": phase_status, "Phase Percent Complete": str(phase_percent_complete) + '%'})
table.sort(key=lambda x: x['Phase'])
print('\n')
print('Locale: {0} \t Total Percent Complete: {1}% ({2})\n'.format(curr_locale, curr_progress, curr_statustext))
# print('Locale: {0} \n'.format(curr_locale))
print(tabulate(table, headers="keys"))
# detailed_status[doc_id] = (curr_locale, curr_progress)
except KeyError as e:
log_error(self.error_file_name, e)
print("Error listing translations")
return
# return detailed_status
| mit |
bhargavz/py-twitter-sentiment-analysis | twitter/FriendsFollowers.py | 1 | 11654 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# FILE: FriendsFollowers.py
#
# Object to request friends and followers of the specified user. This request
# requires cursoring. This really requires throttling because of the number of
# friends/followers are very large. Therefore, throttling this on by default.
#
# Copyright by Author. All rights reserved. Not for reuse without
# express permissions.
#
import sys, time, json, logging
from sochi.twitter.Login import Login
from sochi.twitter.TwitterBase import TwitterBase
from sochi.twitter.auth_settings import *
class FriendsFollowers(TwitterBase):
def __init__(self,
name="FriendsFollowers",
logger=None,
args=(),
kwargs={}):
TwitterBase.__init__(self, name=name, logger=logger,
args=args, kwargs=kwargs)
self.friends_url = "https://api.twitter.com/1.1/friends/ids.json"
self.followers_url = "https://api.twitter.com/1.1/followers/ids.json"
self.cursor_forward = True
self.next_cursor = None
self.prev_cursor = None
self.set_request_type_as_friends()
##
# Sets the domain to the friend search
#
def set_request_type_as_friends(self):
if( not self.querying ):
self.clear_request_params()
self.set_request_domain(self.friends_url)
self.set_rate_limit_resource("friends","ids")
self._set_cursor()
# should *almost always* throttle these friends/followers queries
self.set_throttling(tr=True)
##
# Sets the domain to the friend search
#
def set_request_type_as_followers(self):
if( not self.querying ):
self.clear_request_params()
self.set_request_domain(self.followers_url)
self.set_rate_limit_resource("followers","ids")
self._set_cursor()
# should *almost always* throttle these friends/followers queries
self.set_throttling(tr=True)
##
# Set the user (username/screen name) whose friends/followers will
# be returned
#
def set_username(self, un=None):
if( not self.querying ):
# if setting the username, then unset the user_id
self.set_request_param(kw="screen_name",val=un)
self.set_request_param(kw="user_id",val=None)
self._set_cursor()
##
# Set the user (username/screen name) whose friends/followers will
# be returned
#
def set_screen_name(self, sc=None):
self.set_username(un=sc)
##
# Set the user (user_id) whose friends/followers will be returned
#
def set_user_id(self, uid=None):
if( not self.querying ):
# if setting the user_id, then unset the screen_name
self.set_request_param(kw="user_id",val=str(uid))
self.set_request_param(kw="screen_name",val=None)
self._set_cursor()
##
# Set the count, the number of ids to be returned, current default
# for twitter is 5000 ids per request
#
def set_count(self, c=5000):
if( not self.querying ):
self.set_request_param(kw="count",val=str(c))
##
# Sets the cursor for the current request
#
def _set_cursor(self, cursor="-1"):
if( cursor ):
self.set_request_param(kw="cursor",val=str(cursor))
else:
self.set_request_param(kw="cursor",val=None)
##
#
#
def make_request(self):
# this code is not reentrant, don't make the request twice
if( self.querying ):
return
self.querying = True
self.warning_or_error = False
self.last_warning_message = {}
try:
self.next_cursor = -1
self.prev_cursor = -1
if( self.cursor_forward ):
self._set_cursor(cursor=self.next_cursor)
cursor_end = self.next_cursor
else:
self._set_cursor(cursor=self.prev_cursor)
cursor_end = self.prev_cursor
while( cursor_end ):
self.set_request(domain=self.get_request_domain(),
method="GET",
params=self.get_request_params())
request_results = self._make_request(request=self._request_data)
if( request_results or request_results.text ):
try:
js = request_results.json()
#print "IN make_request() cursor=%d"%(next_cursor)
#print json.dumps(js, sort_keys=True, indent=4)
self.put_message(m=js)
if( "error" in js ):
self.next_cursor = 0
self.prev_cursor = 0
else:
if( "next_cursor" in js ):
self.next_cursor = js['next_cursor']
else:
self.next_cursor = 0
if( "previous_cursor" in js ):
self.prev_cursor = js['previous_cursor']
else:
self.prev_cursor = 0
if( self.cursor_forward ):
self._set_cursor(cursor=self.next_cursor)
cursor_end = self.next_cursor
else:
self._set_cursor(cursor=self.prev_cursor)
cursor_end = self.prev_cursor
except ValueError, e:
mesg = "JSON ValueError: "+str(e)
self.logger.info(mesg)
js = None
cursor_end = 0
else:
cursor_end = 0
self.querying = False
except:
self.querying = False
raise
return
def parse_params(argv):
auth = None
user = None
uname = None
uid = None
count = 0
followers = True
logging = False
json = False
limits = False
pc = 1
while( pc < len(argv) ):
param = argv[pc]
if( param == "-auth"):
pc += 1
auth = argv[pc]
if( param == "-user"):
pc += 1
user = argv[pc]
if( param == "-n"):
pc += 1
uname = argv[pc]
if( param == "-name"):
pc += 1
uname = argv[pc]
if( param == "-id"):
pc += 1
uid = argv[pc]
if( param == "-uid"):
pc += 1
uid = argv[pc]
if( param == "-count"):
pc += 1
count = int(argv[pc])
if( param == "-friends"):
followers = False
if( param == "-followers"):
followers = True
if( param == "-log"):
logging = True
if( param == "-json"):
json = True
if( param == "-limits"):
limits = True
pc += 1
return {'auth':auth, 'user':user,
'followers':followers, 'uid':uid, 'uname':uname, 'count':count,
'logging':logging, 'json':json, 'limits':limits }
#python FriendsFollowers.py -auth INFX547Test01 -user infxtweets -friends -name aplusk
#python FriendsFollowers.py -auth INFX547Test01 -user infxtweets -friends -name apluskTV
#python FriendsFollowers.py -auth INFX547Test01 -user infxtweets -friends -name NatGeo
#python FriendsFollowers.py -auth INFX547Test01 -user infxtweets -friends -name timoreilly
#python FriendsFollowers.py -auth INFX547Test01 -user infxtweets -followers -name dwmcphd -count 5
#python FriendsFollowers.py -auth INFX547Test01 -user infxtweets -followers -name timoreilly
def usage(argv):
print "USAGE: python %s -auth <appname> -user <auth_user> [-friends | -followers] -n <username> | -id <userid> [-count <count_per_request>] [-json]"%(argv[0])
sys.exit(0)
def main(argv):
if len(argv) < 6:
usage(argv)
p = parse_params(argv)
print p
twit = FriendsFollowers()
twit.set_user_agent(agent="random")
twit.set_throttling(True)
if( p['logging'] ):
log_fname = twit.get_preferred_logname()
fmt='[%(asctime)s][%(module)s:%(funcName)s():%(lineno)d] %(levelname)s:%(message)s'
logging.basicConfig(filename=log_fname,format=fmt,level=logging.INFO)
log = logging.getLogger("twit_tools")
if( p['followers'] ):
print "Requesting FOLLOWERS"
twit.set_request_type_as_followers()
else:
print "Requesting FRIENDS"
twit.set_request_type_as_friends()
lg = None
if( not p['auth'] and not p['user'] ):
print "Must have authenticating User and Application!"
usage(argv)
return
if( p['auth'] ):
app = p['auth']
app_keys = TWITTER_APP_OAUTH_PAIR(app=p['auth'])
app_token_fname = TWITTER_APP_TOKEN_FNAME(app=p['auth'])
lg = Login( name="FriendsFollowersLoginObj",
app_name=p['auth'],
app_user=p['user'],
token_fname=app_token_fname)
#lg.set_debug(True)
## Key and secret for specified application
lg.set_consumer_key(consumer_key=app_keys['consumer_key'])
lg.set_consumer_secret(consumer_secret=app_keys['consumer_secret'])
lg.login()
twit.set_auth_obj(obj=lg)
if( p['count']>0 ):
print "Requesting %d IDs per request"%(p['count'])
twit.set_count(p['count'])
if( p['uname'] ):
print "Requesting user:",p['uname']
twit.set_username(p['uname'])
elif( p['uid'] ):
print "Requesting UID:",p['uid']
twit.set_user_id(long(p['uid']))
else:
print "Must supply a username or user id"
return
twit.start_thread()
twit.start_request()
# The request is being made by an asynchronous thread, we need
# to wait until that thread is done before we can see the result.
#
# This convenience routine must be called by a different thread.
# In our case here, we're in the "__main__" thread which can make
# this call and safely wait until the twit thread is done.
twit.wait_request()
if( twit.messages()==0 ):
print "No results from query."
m = None
count = 0
total = 0
while( twit.messages()>0 or twit.query_in_process() ):
m = twit.get_message()
if( m ):
count += 1
#print m
if( p['limits'] ):
print "Limits:",twit.get_rate_limit(),twit._throttling()
if( ("errors" in m) and m['errors'] ):
error = m['errors'][0]
print "\tError %d: %s"%(error['code'],error['message'])
else:
id_list = m['ids']
total = total + len(id_list)
if( p['json'] ):
print json.dumps(m, sort_keys=True, indent=4)
else:
print "Messages: %d"%(count)
print id_list
print "IDs: %d Total IDs: %d"%(len(id_list),total)
if( twit.had_warning() ):
print "WARNING:",twit.get_last_warning()
if( twit.had_error() ):
print "ERROR:",twit.get_last_error()
twit.terminate_thread()
return
if __name__ == '__main__':
main(sys.argv)
| mit |
crosswalk-project/chromium-crosswalk-efl | chrome/common/extensions/docs/server2/servlet.py | 34 | 4476 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class RequestHeaders(object):
'''A custom dictionary impementation for headers which ignores the case
of requests, since different HTTP libraries seem to mangle them.
'''
def __init__(self, dict_):
if isinstance(dict_, RequestHeaders):
self._dict = dict_
else:
self._dict = dict((k.lower(), v) for k, v in dict_.iteritems())
def get(self, key, default=None):
return self._dict.get(key.lower(), default)
def __repr__(self):
return repr(self._dict)
def __str__(self):
return repr(self._dict)
class Request(object):
'''Request data.
'''
def __init__(self, path, host, headers, arguments={}):
self.path = path.lstrip('/')
self.host = host.rstrip('/')
self.headers = RequestHeaders(headers)
self.arguments = arguments
@staticmethod
def ForTest(path, host=None, headers=None, arguments=None):
return Request(path,
host or 'http://developer.chrome.com',
headers or {},
arguments or {})
def __repr__(self):
return 'Request(path=%s, host=%s, headers=%s)' % (
self.path, self.host, self.headers)
def __str__(self):
return repr(self)
class _ContentBuilder(object):
'''Builds the response content.
'''
def __init__(self):
self._buf = []
def Append(self, content):
if isinstance(content, unicode):
content = content.encode('utf-8', 'replace')
self._buf.append(content)
def ToString(self):
self._Collapse()
return self._buf[0]
def __str__(self):
return self.ToString()
def __len__(self):
return len(self.ToString())
def _Collapse(self):
self._buf = [''.join(self._buf)]
class Response(object):
'''The response from Get().
'''
def __init__(self, content=None, headers=None, status=None):
self.content = _ContentBuilder()
if content is not None:
self.content.Append(content)
self.headers = {}
if headers is not None:
self.headers.update(headers)
self.status = status
@staticmethod
def Ok(content, headers=None):
'''Returns an OK (200) response.
'''
return Response(content=content, headers=headers, status=200)
@staticmethod
def Redirect(url, permanent=False):
'''Returns a redirect (301 or 302) response.
'''
status = 301 if permanent else 302
return Response(headers={'Location': url}, status=status)
@staticmethod
def NotFound(content, headers=None):
'''Returns a not found (404) response.
'''
return Response(content=content, headers=headers, status=404)
@staticmethod
def NotModified(content, headers=None):
return Response(content=content, headers=headers, status=304)
@staticmethod
def InternalError(content, headers=None):
'''Returns an internal error (500) response.
'''
return Response(content=content, headers=headers, status=500)
@staticmethod
def ThrottledError(content, headers=None):
'''Returns an HTTP throttle error (429) response.
'''
return Response(content=content, headers=headers, status=429)
def Append(self, content):
'''Appends |content| to the response content.
'''
self.content.append(content)
def AddHeader(self, key, value):
'''Adds a header to the response.
'''
self.headers[key] = value
def AddHeaders(self, headers):
'''Adds several headers to the response.
'''
self.headers.update(headers)
def SetStatus(self, status):
self.status = status
def GetRedirect(self):
if self.headers.get('Location') is None:
return (None, None)
return (self.headers.get('Location'), self.status == 301)
def IsNotFound(self):
return self.status == 404
def __eq__(self, other):
return (isinstance(other, self.__class__) and
str(other.content) == str(self.content) and
other.headers == self.headers and
other.status == self.status)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return 'Response(content=%s bytes, status=%s, headers=%s)' % (
len(self.content), self.status, self.headers)
def __str__(self):
return repr(self)
class Servlet(object):
def __init__(self, request):
self._request = request
def Get(self):
'''Returns a Response.
'''
raise NotImplemented()
| bsd-3-clause |
maxkoryukov/headphones | lib/unidecode/x0b3.py | 253 | 4741 | data = (
'dae', # 0x00
'daeg', # 0x01
'daegg', # 0x02
'daegs', # 0x03
'daen', # 0x04
'daenj', # 0x05
'daenh', # 0x06
'daed', # 0x07
'dael', # 0x08
'daelg', # 0x09
'daelm', # 0x0a
'daelb', # 0x0b
'daels', # 0x0c
'daelt', # 0x0d
'daelp', # 0x0e
'daelh', # 0x0f
'daem', # 0x10
'daeb', # 0x11
'daebs', # 0x12
'daes', # 0x13
'daess', # 0x14
'daeng', # 0x15
'daej', # 0x16
'daec', # 0x17
'daek', # 0x18
'daet', # 0x19
'daep', # 0x1a
'daeh', # 0x1b
'dya', # 0x1c
'dyag', # 0x1d
'dyagg', # 0x1e
'dyags', # 0x1f
'dyan', # 0x20
'dyanj', # 0x21
'dyanh', # 0x22
'dyad', # 0x23
'dyal', # 0x24
'dyalg', # 0x25
'dyalm', # 0x26
'dyalb', # 0x27
'dyals', # 0x28
'dyalt', # 0x29
'dyalp', # 0x2a
'dyalh', # 0x2b
'dyam', # 0x2c
'dyab', # 0x2d
'dyabs', # 0x2e
'dyas', # 0x2f
'dyass', # 0x30
'dyang', # 0x31
'dyaj', # 0x32
'dyac', # 0x33
'dyak', # 0x34
'dyat', # 0x35
'dyap', # 0x36
'dyah', # 0x37
'dyae', # 0x38
'dyaeg', # 0x39
'dyaegg', # 0x3a
'dyaegs', # 0x3b
'dyaen', # 0x3c
'dyaenj', # 0x3d
'dyaenh', # 0x3e
'dyaed', # 0x3f
'dyael', # 0x40
'dyaelg', # 0x41
'dyaelm', # 0x42
'dyaelb', # 0x43
'dyaels', # 0x44
'dyaelt', # 0x45
'dyaelp', # 0x46
'dyaelh', # 0x47
'dyaem', # 0x48
'dyaeb', # 0x49
'dyaebs', # 0x4a
'dyaes', # 0x4b
'dyaess', # 0x4c
'dyaeng', # 0x4d
'dyaej', # 0x4e
'dyaec', # 0x4f
'dyaek', # 0x50
'dyaet', # 0x51
'dyaep', # 0x52
'dyaeh', # 0x53
'deo', # 0x54
'deog', # 0x55
'deogg', # 0x56
'deogs', # 0x57
'deon', # 0x58
'deonj', # 0x59
'deonh', # 0x5a
'deod', # 0x5b
'deol', # 0x5c
'deolg', # 0x5d
'deolm', # 0x5e
'deolb', # 0x5f
'deols', # 0x60
'deolt', # 0x61
'deolp', # 0x62
'deolh', # 0x63
'deom', # 0x64
'deob', # 0x65
'deobs', # 0x66
'deos', # 0x67
'deoss', # 0x68
'deong', # 0x69
'deoj', # 0x6a
'deoc', # 0x6b
'deok', # 0x6c
'deot', # 0x6d
'deop', # 0x6e
'deoh', # 0x6f
'de', # 0x70
'deg', # 0x71
'degg', # 0x72
'degs', # 0x73
'den', # 0x74
'denj', # 0x75
'denh', # 0x76
'ded', # 0x77
'del', # 0x78
'delg', # 0x79
'delm', # 0x7a
'delb', # 0x7b
'dels', # 0x7c
'delt', # 0x7d
'delp', # 0x7e
'delh', # 0x7f
'dem', # 0x80
'deb', # 0x81
'debs', # 0x82
'des', # 0x83
'dess', # 0x84
'deng', # 0x85
'dej', # 0x86
'dec', # 0x87
'dek', # 0x88
'det', # 0x89
'dep', # 0x8a
'deh', # 0x8b
'dyeo', # 0x8c
'dyeog', # 0x8d
'dyeogg', # 0x8e
'dyeogs', # 0x8f
'dyeon', # 0x90
'dyeonj', # 0x91
'dyeonh', # 0x92
'dyeod', # 0x93
'dyeol', # 0x94
'dyeolg', # 0x95
'dyeolm', # 0x96
'dyeolb', # 0x97
'dyeols', # 0x98
'dyeolt', # 0x99
'dyeolp', # 0x9a
'dyeolh', # 0x9b
'dyeom', # 0x9c
'dyeob', # 0x9d
'dyeobs', # 0x9e
'dyeos', # 0x9f
'dyeoss', # 0xa0
'dyeong', # 0xa1
'dyeoj', # 0xa2
'dyeoc', # 0xa3
'dyeok', # 0xa4
'dyeot', # 0xa5
'dyeop', # 0xa6
'dyeoh', # 0xa7
'dye', # 0xa8
'dyeg', # 0xa9
'dyegg', # 0xaa
'dyegs', # 0xab
'dyen', # 0xac
'dyenj', # 0xad
'dyenh', # 0xae
'dyed', # 0xaf
'dyel', # 0xb0
'dyelg', # 0xb1
'dyelm', # 0xb2
'dyelb', # 0xb3
'dyels', # 0xb4
'dyelt', # 0xb5
'dyelp', # 0xb6
'dyelh', # 0xb7
'dyem', # 0xb8
'dyeb', # 0xb9
'dyebs', # 0xba
'dyes', # 0xbb
'dyess', # 0xbc
'dyeng', # 0xbd
'dyej', # 0xbe
'dyec', # 0xbf
'dyek', # 0xc0
'dyet', # 0xc1
'dyep', # 0xc2
'dyeh', # 0xc3
'do', # 0xc4
'dog', # 0xc5
'dogg', # 0xc6
'dogs', # 0xc7
'don', # 0xc8
'donj', # 0xc9
'donh', # 0xca
'dod', # 0xcb
'dol', # 0xcc
'dolg', # 0xcd
'dolm', # 0xce
'dolb', # 0xcf
'dols', # 0xd0
'dolt', # 0xd1
'dolp', # 0xd2
'dolh', # 0xd3
'dom', # 0xd4
'dob', # 0xd5
'dobs', # 0xd6
'dos', # 0xd7
'doss', # 0xd8
'dong', # 0xd9
'doj', # 0xda
'doc', # 0xdb
'dok', # 0xdc
'dot', # 0xdd
'dop', # 0xde
'doh', # 0xdf
'dwa', # 0xe0
'dwag', # 0xe1
'dwagg', # 0xe2
'dwags', # 0xe3
'dwan', # 0xe4
'dwanj', # 0xe5
'dwanh', # 0xe6
'dwad', # 0xe7
'dwal', # 0xe8
'dwalg', # 0xe9
'dwalm', # 0xea
'dwalb', # 0xeb
'dwals', # 0xec
'dwalt', # 0xed
'dwalp', # 0xee
'dwalh', # 0xef
'dwam', # 0xf0
'dwab', # 0xf1
'dwabs', # 0xf2
'dwas', # 0xf3
'dwass', # 0xf4
'dwang', # 0xf5
'dwaj', # 0xf6
'dwac', # 0xf7
'dwak', # 0xf8
'dwat', # 0xf9
'dwap', # 0xfa
'dwah', # 0xfb
'dwae', # 0xfc
'dwaeg', # 0xfd
'dwaegg', # 0xfe
'dwaegs', # 0xff
)
| gpl-3.0 |
zakandrewking/cobrapy | cobra/util/version_info.py | 1 | 1926 | # -*- coding: utf-8 -*-
# Adapated from:
# https://github.com/pandas-dev/pandas/blob/master/pandas/util/_print_versions.py
# which is published under a BSD license.
from __future__ import absolute_import, print_function
from builtins import dict
import platform
import pip
__all__ = ("show_versions",)
SYS_ORDER = [
"OS",
"OS-release",
"Python"
]
PKG_ORDER = [
"pip",
"setuptools",
"cobra",
"future",
"swiglpk",
"optlang",
"ruamel.yaml",
"pandas",
"numpy",
"tabulate",
"python-libsbml",
"lxml",
"scipy",
"matplotlib",
"palettable",
"pymatbridge"
]
def get_sys_info():
"""Returns system information as a dict."""
blob = dict()
blob["OS"] = platform.system()
blob["OS-release"] = platform.release()
blob["Python"] = platform.python_version()
return blob
def get_pkg_info():
"""Returns Python package information as a dict."""
# TODO: Basically copying the requirements from setup.py is brittle,
# should come up with a better way in future, for example,
# using requirements files that can be read in.
dependencies = frozenset(PKG_ORDER)
blob = dict()
for dist in pip.get_installed_distributions():
if dist.project_name in dependencies:
blob[dist.project_name] = dist.version
return blob
def show_versions():
"""Print the formatted information to standard out."""
info = get_sys_info()
info.update(get_pkg_info())
format_str = "{:<%d} {:>%d}" % (max(map(len, info)),
max(map(len, info.values())))
print("\nSystem Information")
print("==================")
for name in SYS_ORDER:
print(format_str.format(name, info[name]))
print("\nPackage Versions")
print("================")
for name in PKG_ORDER:
if name in info:
print(format_str.format(name, info[name]))
| lgpl-2.1 |
alanjw/GreenOpenERP-Win-X86 | openerp/addons/base/module/wizard/base_module_update.py | 109 | 2185 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class base_module_update(osv.osv_memory):
""" Update Module """
_name = "base.module.update"
_description = "Update Module"
_columns = {
'update': fields.integer('Number of modules updated', readonly=True),
'add': fields.integer('Number of modules added', readonly=True),
'state':fields.selection([('init','init'),('done','done')], 'Status', readonly=True),
}
_defaults = {
'state': 'init',
}
def update_module(self, cr, uid, ids, context=None):
module_obj = self.pool.get('ir.module.module')
update, add = module_obj.update_list(cr, uid,)
self.write(cr, uid, ids, {'update': update, 'add': add, 'state': 'done'}, context=context)
return False
def action_module_open(self, cr, uid, ids, context):
res = {
'domain': str([]),
'name': 'Modules',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'ir.module.module',
'view_id': False,
'type': 'ir.actions.act_window',
}
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dalegregory/odoo | addons/account_anglo_saxon/product.py | 384 | 3035 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class product_category(osv.osv):
_inherit = "product.category"
_columns = {
'property_account_creditor_price_difference_categ': fields.property(
type='many2one',
relation='account.account',
string="Price Difference Account",
help="This account will be used to value price difference between purchase price and cost price."),
#Redefine fields to change help text for anglo saxon methodology.
'property_account_income_categ': fields.property(
type='many2one',
relation='account.account',
string="Income Account",
help="This account will be used to value outgoing stock using sale price."),
'property_account_expense_categ': fields.property(
type='many2one',
relation='account.account',
string="Expense Account",
help="This account will be used to value outgoing stock using cost price."),
}
class product_template(osv.osv):
_inherit = "product.template"
_columns = {
'property_account_creditor_price_difference': fields.property(
type='many2one',
relation='account.account',
string="Price Difference Account",
help="This account will be used to value price difference between purchase price and cost price."),
#Redefine fields to change help text for anglo saxon methodology.
'property_account_income': fields.property(
type='many2one',
relation='account.account',
string="Income Account",
help="This account will be used to value outgoing stock using sale price."),
'property_account_expense': fields.property(
type='many2one',
relation='account.account',
string="Expense Account",
help="This account will be used to value outgoing stock using cost price."),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
asm0dey/Flexget | flexget/plugins/urlrewrite_torrentleech.py | 5 | 6624 | from __future__ import unicode_literals, division, absolute_import
import re
import urllib
import logging
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.plugins.plugin_urlrewriting import UrlRewritingError
from flexget.utils import requests
from flexget.utils.soup import get_soup
from flexget.utils.search import torrent_availability, normalize_unicode
log = logging.getLogger('torrentleech')
CATEGORIES = {
'all': 0,
# Movies
'Cam': 8,
'TS': 9,
'R5': 10,
'DVDRip': 11,
'DVDR': 12,
'HD': 13,
'BDRip': 14,
'Movie Boxsets': 15,
'Documentaries': 29,
#TV
'Episodes': 26,
'TV Boxsets': 27,
'Episodes HD': 32
}
class UrlRewriteTorrentleech(object):
"""
Torrentleech urlrewriter and search plugin.
torrentleech:
rss_key: xxxxxxxxx (required)
username: xxxxxxxx (required)
password: xxxxxxxx (required)
category: HD
Category is any combination of: all, Cam, TS, R5,
DVDRip, DVDR, HD, BDRip, Movie Boxsets, Documentaries,
Episodes, TV BoxSets, Episodes HD
"""
schema = {
'type': 'object',
'properties': {
'rss_key': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'category': one_or_more({
'oneOf': [
{'type': 'integer'},
{'type': 'string', 'enum': list(CATEGORIES)},
]}),
},
'required': ['rss_key', 'username', 'password'],
'additionalProperties': False
}
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
if url.endswith('.torrent'):
return False
if url.startswith('http://torrentleech.org/'):
return True
return False
# urlrewriter API
def url_rewrite(self, task, entry):
if not 'url' in entry:
log.error("Didn't actually get a URL...")
else:
log.debug("Got the URL: %s" % entry['url'])
if entry['url'].startswith('http://torrentleech.org/torrents/browse/index/query/'):
# use search
results = self.search(entry)
if not results:
raise UrlRewritingError("No search results found")
# TODO: Search doesn't enforce close match to title, be more picky
entry['url'] = results[0]['url']
@plugin.internet(log)
def search(self, entry, config=None):
"""
Search for name from torrentleech.
"""
rss_key = config['rss_key']
# build the form request:
data = {'username': config['username'], 'password': config['password'], 'remember_me': 'on', 'submit': 'submit'}
# POST the login form:
login = requests.post('http://torrentleech.org/', data=data)
if not isinstance(config, dict):
config = {}
# sort = SORT.get(config.get('sort_by', 'seeds'))
# if config.get('sort_reverse'):
# sort += 1
categories = config.get('category', 'all')
# Make sure categories is a list
if not isinstance(categories, list):
categories = [categories]
# If there are any text categories, turn them into their id number
categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]
filter_url = '/categories/%s' % ','.join(str(c) for c in categories)
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
query = normalize_unicode(search_string)
# urllib.quote will crash if the unicode string has non ascii characters, so encode in utf-8 beforehand
url = ('http://torrentleech.org/torrents/browse/index/query/' +
urllib.quote(query.encode('utf-8')) + filter_url)
log.debug('Using %s as torrentleech search url' % url)
page = requests.get(url, cookies=login.cookies).content
soup = get_soup(page)
for tr in soup.find_all("tr", ["even", "odd"]):
# within each even or odd row, find the torrent names
link = tr.find("a", attrs={'href': re.compile('/torrent/\d+')})
log.debug('link phase: %s' % link.contents[0])
entry = Entry()
# extracts the contents of the <a>titlename/<a> tag
entry['title'] = link.contents[0]
# find download link
torrent_url = tr.find("a", attrs={'href': re.compile('/download/\d+/.*')}).get('href')
# parse link and split along /download/12345 and /name.torrent
download_url = re.search('(/download/\d+)/(.+\.torrent)', torrent_url)
# change link to rss and splice in rss_key
torrent_url = 'http://torrentleech.org/rss' + download_url.group(1) + '/' + rss_key + '/' + download_url.group(2)
log.debug('RSS-ified download link: %s' % torrent_url)
entry['url'] = torrent_url
# us tr object for seeders/leechers
seeders, leechers = tr.find_all('td', ["seeders", "leechers"])
entry['torrent_seeds'] = int(seeders.contents[0])
entry['torrent_leeches'] = int(leechers.contents[0])
entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])
# use tr object for size
size = tr.find("td", text=re.compile('([\.\d]+) ([GMK]?)B')).contents[0]
size = re.search('([\.\d]+) ([GMK]?)B', size)
if size:
if size.group(2) == 'G':
entry['content_size'] = int(float(size.group(1)) * 1000 ** 3 / 1024 ** 2)
elif size.group(2) == 'M':
entry['content_size'] = int(float(size.group(1)) * 1000 ** 2 / 1024 ** 2)
elif size.group(2) == 'K':
entry['content_size'] = int(float(size.group(1)) * 1000 / 1024 ** 2)
else:
entry['content_size'] = int(float(size.group(1)) / 1024 ** 2)
entries.add(entry)
return sorted(entries, reverse=True, key=lambda x: x.get('search_sort'))
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteTorrentleech, 'torrentleech', groups=['urlrewriter', 'search'], api_ver=2)
| mit |
TeslaProject/external_chromium_org | ppapi/generators/idl_thunk.py | 44 | 20936 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Generator for C++ style thunks """
import glob
import os
import re
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_node import IDLAttribute, IDLNode
from idl_ast import IDLAst
from idl_option import GetOption, Option, ParseOptions
from idl_outfile import IDLOutFile
from idl_parser import ParseFiles
from idl_c_proto import CGen, GetNodeComments, CommentLines, Comment
from idl_generator import Generator, GeneratorByFile
Option('thunkroot', 'Base directory of output',
default=os.path.join('..', 'thunk'))
class TGenError(Exception):
def __init__(self, msg):
self.value = msg
def __str__(self):
return repr(self.value)
class ThunkBodyMetadata(object):
"""Metadata about thunk body. Used for selecting which headers to emit."""
def __init__(self):
self._apis = set()
self._builtin_includes = set()
self._includes = set()
def AddApi(self, api):
self._apis.add(api)
def Apis(self):
return self._apis
def AddInclude(self, include):
self._includes.add(include)
def Includes(self):
return self._includes
def AddBuiltinInclude(self, include):
self._builtin_includes.add(include)
def BuiltinIncludes(self):
return self._builtin_includes
def _GetBaseFileName(filenode):
"""Returns the base name for output files, given the filenode.
Examples:
'dev/ppb_find_dev.h' -> 'ppb_find_dev'
'trusted/ppb_buffer_trusted.h' -> 'ppb_buffer_trusted'
"""
path, name = os.path.split(filenode.GetProperty('NAME'))
name = os.path.splitext(name)[0]
return name
def _GetHeaderFileName(filenode):
"""Returns the name for the header for this file."""
path, name = os.path.split(filenode.GetProperty('NAME'))
name = os.path.splitext(name)[0]
if path:
header = "ppapi/c/%s/%s.h" % (path, name)
else:
header = "ppapi/c/%s.h" % name
return header
def _GetThunkFileName(filenode, relpath):
"""Returns the thunk file name."""
path = os.path.split(filenode.GetProperty('NAME'))[0]
name = _GetBaseFileName(filenode)
# We don't reattach the path for thunk.
if relpath: name = os.path.join(relpath, name)
name = '%s%s' % (name, '_thunk.cc')
return name
def _StripFileName(filenode):
"""Strips path and dev, trusted, and private suffixes from the file name."""
api_basename = _GetBaseFileName(filenode)
if api_basename.endswith('_dev'):
api_basename = api_basename[:-len('_dev')]
if api_basename.endswith('_trusted'):
api_basename = api_basename[:-len('_trusted')]
if api_basename.endswith('_private'):
api_basename = api_basename[:-len('_private')]
return api_basename
def _StripApiName(api_name):
"""Strips Dev, Private, and Trusted suffixes from the API name."""
if api_name.endswith('Trusted'):
api_name = api_name[:-len('Trusted')]
if api_name.endswith('_Dev'):
api_name = api_name[:-len('_Dev')]
if api_name.endswith('_Private'):
api_name = api_name[:-len('_Private')]
return api_name
def _MakeEnterLine(filenode, interface, member, arg, handle_errors, callback,
meta):
"""Returns an EnterInstance/EnterResource string for a function."""
api_name = _StripApiName(interface.GetName()) + '_API'
if member.GetProperty('api'): # Override API name.
manually_provided_api = True
# TODO(teravest): Automatically guess the API header file.
api_name = member.GetProperty('api')
else:
manually_provided_api = False
if arg[0] == 'PP_Instance':
if callback is None:
arg_string = arg[1]
else:
arg_string = '%s, %s' % (arg[1], callback)
if interface.GetProperty('singleton') or member.GetProperty('singleton'):
if not manually_provided_api:
meta.AddApi('ppapi/thunk/%s_api.h' % _StripFileName(filenode))
return 'EnterInstanceAPI<%s> enter(%s);' % (api_name, arg_string)
else:
return 'EnterInstance enter(%s);' % arg_string
elif arg[0] == 'PP_Resource':
enter_type = 'EnterResource<%s>' % api_name
if not manually_provided_api:
meta.AddApi('ppapi/thunk/%s_api.h' % _StripFileName(filenode))
if callback is None:
return '%s enter(%s, %s);' % (enter_type, arg[1],
str(handle_errors).lower())
else:
return '%s enter(%s, %s, %s);' % (enter_type, arg[1],
callback,
str(handle_errors).lower())
else:
raise TGenError("Unknown type for _MakeEnterLine: %s" % arg[0])
def _GetShortName(interface, filter_suffixes):
"""Return a shorter interface name that matches Is* and Create* functions."""
parts = interface.GetName().split('_')[1:]
tail = parts[len(parts) - 1]
if tail in filter_suffixes:
parts = parts[:-1]
return ''.join(parts)
def _IsTypeCheck(interface, node, args):
"""Returns true if node represents a type-checking function."""
if len(args) == 0 or args[0][0] != 'PP_Resource':
return False
return node.GetName() == 'Is%s' % _GetShortName(interface, ['Dev', 'Private'])
def _GetCreateFuncName(interface):
"""Returns the creation function name for an interface."""
return 'Create%s' % _GetShortName(interface, ['Dev'])
def _GetDefaultFailureValue(t):
"""Returns the default failure value for a given type.
Returns None if no default failure value exists for the type.
"""
values = {
'PP_Bool': 'PP_FALSE',
'PP_Resource': '0',
'struct PP_Var': 'PP_MakeUndefined()',
'float': '0.0f',
'int32_t': 'enter.retval()',
'uint16_t': '0',
'uint32_t': '0',
'uint64_t': '0',
'void*': 'NULL'
}
if t in values:
return values[t]
return None
def _MakeCreateMemberBody(interface, member, args):
"""Returns the body of a Create() function.
Args:
interface - IDLNode for the interface
member - IDLNode for member function
args - List of arguments for the Create() function
"""
if args[0][0] == 'PP_Resource':
body = 'Resource* object =\n'
body += ' PpapiGlobals::Get()->GetResourceTracker()->'
body += 'GetResource(%s);\n' % args[0][1]
body += 'if (!object)\n'
body += ' return 0;\n'
body += 'EnterResourceCreation enter(object->pp_instance());\n'
elif args[0][0] == 'PP_Instance':
body = 'EnterResourceCreation enter(%s);\n' % args[0][1]
else:
raise TGenError('Unknown arg type for Create(): %s' % args[0][0])
body += 'if (enter.failed())\n'
body += ' return 0;\n'
arg_list = ', '.join([a[1] for a in args])
if member.GetProperty('create_func'):
create_func = member.GetProperty('create_func')
else:
create_func = _GetCreateFuncName(interface)
body += 'return enter.functions()->%s(%s);' % (create_func,
arg_list)
return body
def _GetOutputParams(member, release):
"""Returns output parameters (and their types) for a member function.
Args:
member - IDLNode for the member function
release - Release to get output parameters for
Returns:
A list of name strings for all output parameters of the member
function.
"""
out_params = []
callnode = member.GetOneOf('Callspec')
if callnode:
cgen = CGen()
for param in callnode.GetListOf('Param'):
mode = cgen.GetParamMode(param)
if mode == 'out':
# We use the 'store' mode when getting the parameter type, since we
# need to call sizeof() for memset().
_, pname, _, _ = cgen.GetComponents(param, release, 'store')
out_params.append(pname)
return out_params
def _MakeNormalMemberBody(filenode, release, node, member, rtype, args,
include_version, meta):
"""Returns the body of a typical function.
Args:
filenode - IDLNode for the file
release - release to generate body for
node - IDLNode for the interface
member - IDLNode for the member function
rtype - Return type for the member function
args - List of 4-tuple arguments for the member function
include_version - whether to include the version in the invocation
meta - ThunkBodyMetadata for header hints
"""
if len(args) == 0:
# Calling into the "Shared" code for the interface seems like a reasonable
# heuristic when we don't have any arguments; some thunk code follows this
# convention today.
meta.AddApi('ppapi/shared_impl/%s_shared.h' % _StripFileName(filenode))
return 'return %s::%s();' % (_StripApiName(node.GetName()) + '_Shared',
member.GetName())
is_callback_func = args[len(args) - 1][0] == 'struct PP_CompletionCallback'
if is_callback_func:
call_args = args[:-1] + [('', 'enter.callback()', '', '')]
meta.AddInclude('ppapi/c/pp_completion_callback.h')
else:
call_args = args
if args[0][0] == 'PP_Instance':
call_arglist = ', '.join(a[1] for a in call_args)
function_container = 'functions'
elif args[0][0] == 'PP_Resource':
call_arglist = ', '.join(a[1] for a in call_args[1:])
function_container = 'object'
else:
# Calling into the "Shared" code for the interface seems like a reasonable
# heuristic when the first argument isn't a PP_Instance or a PP_Resource;
# some thunk code follows this convention today.
meta.AddApi('ppapi/shared_impl/%s_shared.h' % _StripFileName(filenode))
return 'return %s::%s(%s);' % (_StripApiName(node.GetName()) + '_Shared',
member.GetName(),
', '.join(a[1] for a in args))
function_name = member.GetName()
if include_version:
version = node.GetVersion(release).replace('.', '_')
function_name += version
invocation = 'enter.%s()->%s(%s)' % (function_container,
function_name,
call_arglist)
handle_errors = not (member.GetProperty('report_errors') == 'False')
out_params = _GetOutputParams(member, release)
if is_callback_func:
body = '%s\n' % _MakeEnterLine(filenode, node, member, args[0],
handle_errors, args[len(args) - 1][1], meta)
failure_value = member.GetProperty('on_failure')
if failure_value is None:
failure_value = 'enter.retval()'
failure_return = 'return %s;' % failure_value
success_return = 'return enter.SetResult(%s);' % invocation
elif rtype == 'void':
body = '%s\n' % _MakeEnterLine(filenode, node, member, args[0],
handle_errors, None, meta)
failure_return = 'return;'
success_return = '%s;' % invocation # We don't return anything for void.
else:
body = '%s\n' % _MakeEnterLine(filenode, node, member, args[0],
handle_errors, None, meta)
failure_value = member.GetProperty('on_failure')
if failure_value is None:
failure_value = _GetDefaultFailureValue(rtype)
if failure_value is None:
raise TGenError('There is no default value for rtype %s. '
'Maybe you should provide an on_failure attribute '
'in the IDL file.' % rtype)
failure_return = 'return %s;' % failure_value
success_return = 'return %s;' % invocation
if member.GetProperty('always_set_output_parameters'):
body += 'if (enter.failed()) {\n'
for param in out_params:
body += ' memset(%s, 0, sizeof(*%s));\n' % (param, param)
body += ' %s\n' % failure_return
body += '}\n'
body += '%s' % success_return
meta.AddBuiltinInclude('string.h')
else:
body += 'if (enter.failed())\n'
body += ' %s\n' % failure_return
body += '%s' % success_return
return body
def DefineMember(filenode, node, member, release, include_version, meta):
"""Returns a definition for a member function of an interface.
Args:
filenode - IDLNode for the file
node - IDLNode for the interface
member - IDLNode for the member function
release - release to generate
include_version - include the version in emitted function name.
meta - ThunkMetadata for header hints
Returns:
A string with the member definition.
"""
cgen = CGen()
rtype, name, arrays, args = cgen.GetComponents(member, release, 'return')
log_body = '\"%s::%s()\";' % (node.GetName(), member.GetName())
if len(log_body) > 69: # Prevent lines over 80 characters.
body = 'VLOG(4) <<\n'
body += ' %s\n' % log_body
else:
body = 'VLOG(4) << %s\n' % log_body
if _IsTypeCheck(node, member, args):
body += '%s\n' % _MakeEnterLine(filenode, node, member, args[0], False,
None, meta)
body += 'return PP_FromBool(enter.succeeded());'
elif member.GetName() == 'Create' or member.GetName() == 'CreateTrusted':
body += _MakeCreateMemberBody(node, member, args)
else:
body += _MakeNormalMemberBody(filenode, release, node, member, rtype, args,
include_version, meta)
signature = cgen.GetSignature(member, release, 'return', func_as_ptr=False,
include_version=include_version)
return '%s\n%s\n}' % (cgen.Indent('%s {' % signature, tabs=0),
cgen.Indent(body, tabs=1))
def _IsNewestMember(member, members, releases):
"""Returns true if member is the newest node with its name in members.
Currently, every node in the AST only has one version. This means that we
will have two sibling nodes with the same name to represent different
versions.
See http://crbug.com/157017 .
Special handling is required for nodes which share their name with others,
but aren't the newest version in the IDL.
Args:
member - The member which is checked if it's newest
members - The list of members to inspect
releases - The set of releases to check for versions in.
"""
build_list = member.GetUniqueReleases(releases)
release = build_list[0] # Pick the oldest release.
same_name_siblings = filter(
lambda n: str(n) == str(member) and n != member, members)
for s in same_name_siblings:
sibling_build_list = s.GetUniqueReleases(releases)
sibling_release = sibling_build_list[0]
if sibling_release > release:
return False
return True
class TGen(GeneratorByFile):
def __init__(self):
Generator.__init__(self, 'Thunk', 'tgen', 'Generate the C++ thunk.')
def GenerateFile(self, filenode, releases, options):
savename = _GetThunkFileName(filenode, GetOption('thunkroot'))
my_min, my_max = filenode.GetMinMax(releases)
if my_min > releases[-1] or my_max < releases[0]:
if os.path.isfile(savename):
print "Removing stale %s for this range." % filenode.GetName()
os.remove(os.path.realpath(savename))
return False
do_generate = filenode.GetProperty('generate_thunk')
if not do_generate:
return False
thunk_out = IDLOutFile(savename)
body, meta = self.GenerateBody(thunk_out, filenode, releases, options)
# TODO(teravest): How do we handle repeated values?
if filenode.GetProperty('thunk_include'):
meta.AddInclude(filenode.GetProperty('thunk_include'))
self.WriteHead(thunk_out, filenode, releases, options, meta)
thunk_out.Write('\n\n'.join(body))
self.WriteTail(thunk_out, filenode, releases, options)
return thunk_out.Close()
def WriteHead(self, out, filenode, releases, options, meta):
__pychecker__ = 'unusednames=options'
cgen = CGen()
cright_node = filenode.GetChildren()[0]
assert(cright_node.IsA('Copyright'))
out.Write('%s\n' % cgen.Copyright(cright_node, cpp_style=True))
# Wrap the From ... modified ... comment if it would be >80 characters.
from_text = 'From %s' % (
filenode.GetProperty('NAME').replace(os.sep,'/'))
modified_text = 'modified %s.' % (
filenode.GetProperty('DATETIME'))
if len(from_text) + len(modified_text) < 74:
out.Write('// %s %s\n\n' % (from_text, modified_text))
else:
out.Write('// %s,\n// %s\n\n' % (from_text, modified_text))
if meta.BuiltinIncludes():
for include in sorted(meta.BuiltinIncludes()):
out.Write('#include <%s>\n' % include)
out.Write('\n')
# TODO(teravest): Don't emit includes we don't need.
includes = ['ppapi/c/pp_errors.h',
'ppapi/shared_impl/tracked_callback.h',
'ppapi/thunk/enter.h',
'ppapi/thunk/ppapi_thunk_export.h']
includes.append(_GetHeaderFileName(filenode))
for api in meta.Apis():
includes.append('%s' % api.lower())
for i in meta.Includes():
includes.append(i)
for include in sorted(includes):
out.Write('#include "%s"\n' % include)
out.Write('\n')
out.Write('namespace ppapi {\n')
out.Write('namespace thunk {\n')
out.Write('\n')
out.Write('namespace {\n')
out.Write('\n')
def GenerateBody(self, out, filenode, releases, options):
"""Generates a member function lines to be written and metadata.
Returns a tuple of (body, meta) where:
body - a list of lines with member function bodies
meta - a ThunkMetadata instance for hinting which headers are needed.
"""
__pychecker__ = 'unusednames=options'
out_members = []
meta = ThunkBodyMetadata()
for node in filenode.GetListOf('Interface'):
# Skip if this node is not in this release
if not node.InReleases(releases):
print "Skipping %s" % node
continue
# Generate Member functions
if node.IsA('Interface'):
members = node.GetListOf('Member')
for child in members:
build_list = child.GetUniqueReleases(releases)
# We have to filter out releases this node isn't in.
build_list = filter(lambda r: child.InReleases([r]), build_list)
if len(build_list) == 0:
continue
release = build_list[-1]
include_version = not _IsNewestMember(child, members, releases)
member = DefineMember(filenode, node, child, release, include_version,
meta)
if not member:
continue
out_members.append(member)
return (out_members, meta)
def WriteTail(self, out, filenode, releases, options):
__pychecker__ = 'unusednames=options'
cgen = CGen()
version_list = []
out.Write('\n\n')
for node in filenode.GetListOf('Interface'):
build_list = node.GetUniqueReleases(releases)
for build in build_list:
version = node.GetVersion(build).replace('.', '_')
thunk_name = 'g_' + node.GetName().lower() + '_thunk_' + \
version
thunk_type = '_'.join((node.GetName(), version))
version_list.append((thunk_type, thunk_name))
declare_line = 'const %s %s = {' % (thunk_type, thunk_name)
if len(declare_line) > 80:
declare_line = 'const %s\n %s = {' % (thunk_type, thunk_name)
out.Write('%s\n' % declare_line)
generated_functions = []
members = node.GetListOf('Member')
for child in members:
rtype, name, arrays, args = cgen.GetComponents(
child, build, 'return')
if child.InReleases([build]):
if not _IsNewestMember(child, members, releases):
version = child.GetVersion(
child.first_release[build]).replace('.', '_')
name += '_' + version
generated_functions.append(name)
out.Write(',\n'.join([' &%s' % f for f in generated_functions]))
out.Write('\n};\n\n')
out.Write('} // namespace\n')
out.Write('\n')
for thunk_type, thunk_name in version_list:
thunk_decl = ('PPAPI_THUNK_EXPORT const %s* Get%s_Thunk() {\n' %
(thunk_type, thunk_type))
if len(thunk_decl) > 80:
thunk_decl = ('PPAPI_THUNK_EXPORT const %s*\n Get%s_Thunk() {\n' %
(thunk_type, thunk_type))
out.Write(thunk_decl)
out.Write(' return &%s;\n' % thunk_name)
out.Write('}\n')
out.Write('\n')
out.Write('} // namespace thunk\n')
out.Write('} // namespace ppapi\n')
tgen = TGen()
def Main(args):
# Default invocation will verify the golden files are unchanged.
failed = 0
if not args:
args = ['--wnone', '--diff', '--test', '--thunkroot=.']
ParseOptions(args)
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_thunk', '*.idl')
filenames = glob.glob(idldir)
ast = ParseFiles(filenames)
if tgen.GenerateRange(ast, ['M13', 'M14', 'M15'], {}):
print "Golden file for M13-M15 failed."
failed = 1
else:
print "Golden file for M13-M15 passed."
return failed
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.2/Lib/encodings/mac_centeuro.py | 257 | 14102 | """ Python Character Mapping Codec mac_centeuro generated from 'MAPPINGS/VENDORS/APPLE/CENTEURO.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-centeuro',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> CONTROL CHARACTER
'\x01' # 0x01 -> CONTROL CHARACTER
'\x02' # 0x02 -> CONTROL CHARACTER
'\x03' # 0x03 -> CONTROL CHARACTER
'\x04' # 0x04 -> CONTROL CHARACTER
'\x05' # 0x05 -> CONTROL CHARACTER
'\x06' # 0x06 -> CONTROL CHARACTER
'\x07' # 0x07 -> CONTROL CHARACTER
'\x08' # 0x08 -> CONTROL CHARACTER
'\t' # 0x09 -> CONTROL CHARACTER
'\n' # 0x0A -> CONTROL CHARACTER
'\x0b' # 0x0B -> CONTROL CHARACTER
'\x0c' # 0x0C -> CONTROL CHARACTER
'\r' # 0x0D -> CONTROL CHARACTER
'\x0e' # 0x0E -> CONTROL CHARACTER
'\x0f' # 0x0F -> CONTROL CHARACTER
'\x10' # 0x10 -> CONTROL CHARACTER
'\x11' # 0x11 -> CONTROL CHARACTER
'\x12' # 0x12 -> CONTROL CHARACTER
'\x13' # 0x13 -> CONTROL CHARACTER
'\x14' # 0x14 -> CONTROL CHARACTER
'\x15' # 0x15 -> CONTROL CHARACTER
'\x16' # 0x16 -> CONTROL CHARACTER
'\x17' # 0x17 -> CONTROL CHARACTER
'\x18' # 0x18 -> CONTROL CHARACTER
'\x19' # 0x19 -> CONTROL CHARACTER
'\x1a' # 0x1A -> CONTROL CHARACTER
'\x1b' # 0x1B -> CONTROL CHARACTER
'\x1c' # 0x1C -> CONTROL CHARACTER
'\x1d' # 0x1D -> CONTROL CHARACTER
'\x1e' # 0x1E -> CONTROL CHARACTER
'\x1f' # 0x1F -> CONTROL CHARACTER
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> CONTROL CHARACTER
'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0100' # 0x81 -> LATIN CAPITAL LETTER A WITH MACRON
'\u0101' # 0x82 -> LATIN SMALL LETTER A WITH MACRON
'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0104' # 0x84 -> LATIN CAPITAL LETTER A WITH OGONEK
'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
'\u0105' # 0x88 -> LATIN SMALL LETTER A WITH OGONEK
'\u010c' # 0x89 -> LATIN CAPITAL LETTER C WITH CARON
'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
'\u010d' # 0x8B -> LATIN SMALL LETTER C WITH CARON
'\u0106' # 0x8C -> LATIN CAPITAL LETTER C WITH ACUTE
'\u0107' # 0x8D -> LATIN SMALL LETTER C WITH ACUTE
'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
'\u017a' # 0x90 -> LATIN SMALL LETTER Z WITH ACUTE
'\u010e' # 0x91 -> LATIN CAPITAL LETTER D WITH CARON
'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
'\u010f' # 0x93 -> LATIN SMALL LETTER D WITH CARON
'\u0112' # 0x94 -> LATIN CAPITAL LETTER E WITH MACRON
'\u0113' # 0x95 -> LATIN SMALL LETTER E WITH MACRON
'\u0116' # 0x96 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
'\u0117' # 0x98 -> LATIN SMALL LETTER E WITH DOT ABOVE
'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
'\u011a' # 0x9D -> LATIN CAPITAL LETTER E WITH CARON
'\u011b' # 0x9E -> LATIN SMALL LETTER E WITH CARON
'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
'\u2020' # 0xA0 -> DAGGER
'\xb0' # 0xA1 -> DEGREE SIGN
'\u0118' # 0xA2 -> LATIN CAPITAL LETTER E WITH OGONEK
'\xa3' # 0xA3 -> POUND SIGN
'\xa7' # 0xA4 -> SECTION SIGN
'\u2022' # 0xA5 -> BULLET
'\xb6' # 0xA6 -> PILCROW SIGN
'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
'\xae' # 0xA8 -> REGISTERED SIGN
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u2122' # 0xAA -> TRADE MARK SIGN
'\u0119' # 0xAB -> LATIN SMALL LETTER E WITH OGONEK
'\xa8' # 0xAC -> DIAERESIS
'\u2260' # 0xAD -> NOT EQUAL TO
'\u0123' # 0xAE -> LATIN SMALL LETTER G WITH CEDILLA
'\u012e' # 0xAF -> LATIN CAPITAL LETTER I WITH OGONEK
'\u012f' # 0xB0 -> LATIN SMALL LETTER I WITH OGONEK
'\u012a' # 0xB1 -> LATIN CAPITAL LETTER I WITH MACRON
'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
'\u0136' # 0xB5 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
'\u2211' # 0xB7 -> N-ARY SUMMATION
'\u0142' # 0xB8 -> LATIN SMALL LETTER L WITH STROKE
'\u013b' # 0xB9 -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u013c' # 0xBA -> LATIN SMALL LETTER L WITH CEDILLA
'\u013d' # 0xBB -> LATIN CAPITAL LETTER L WITH CARON
'\u013e' # 0xBC -> LATIN SMALL LETTER L WITH CARON
'\u0139' # 0xBD -> LATIN CAPITAL LETTER L WITH ACUTE
'\u013a' # 0xBE -> LATIN SMALL LETTER L WITH ACUTE
'\u0145' # 0xBF -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u0146' # 0xC0 -> LATIN SMALL LETTER N WITH CEDILLA
'\u0143' # 0xC1 -> LATIN CAPITAL LETTER N WITH ACUTE
'\xac' # 0xC2 -> NOT SIGN
'\u221a' # 0xC3 -> SQUARE ROOT
'\u0144' # 0xC4 -> LATIN SMALL LETTER N WITH ACUTE
'\u0147' # 0xC5 -> LATIN CAPITAL LETTER N WITH CARON
'\u2206' # 0xC6 -> INCREMENT
'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
'\xa0' # 0xCA -> NO-BREAK SPACE
'\u0148' # 0xCB -> LATIN SMALL LETTER N WITH CARON
'\u0150' # 0xCC -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
'\u0151' # 0xCE -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\u014c' # 0xCF -> LATIN CAPITAL LETTER O WITH MACRON
'\u2013' # 0xD0 -> EN DASH
'\u2014' # 0xD1 -> EM DASH
'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
'\xf7' # 0xD6 -> DIVISION SIGN
'\u25ca' # 0xD7 -> LOZENGE
'\u014d' # 0xD8 -> LATIN SMALL LETTER O WITH MACRON
'\u0154' # 0xD9 -> LATIN CAPITAL LETTER R WITH ACUTE
'\u0155' # 0xDA -> LATIN SMALL LETTER R WITH ACUTE
'\u0158' # 0xDB -> LATIN CAPITAL LETTER R WITH CARON
'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u0159' # 0xDE -> LATIN SMALL LETTER R WITH CARON
'\u0156' # 0xDF -> LATIN CAPITAL LETTER R WITH CEDILLA
'\u0157' # 0xE0 -> LATIN SMALL LETTER R WITH CEDILLA
'\u0160' # 0xE1 -> LATIN CAPITAL LETTER S WITH CARON
'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
'\u0161' # 0xE4 -> LATIN SMALL LETTER S WITH CARON
'\u015a' # 0xE5 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0xE6 -> LATIN SMALL LETTER S WITH ACUTE
'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
'\u0164' # 0xE8 -> LATIN CAPITAL LETTER T WITH CARON
'\u0165' # 0xE9 -> LATIN SMALL LETTER T WITH CARON
'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
'\u017d' # 0xEB -> LATIN CAPITAL LETTER Z WITH CARON
'\u017e' # 0xEC -> LATIN SMALL LETTER Z WITH CARON
'\u016a' # 0xED -> LATIN CAPITAL LETTER U WITH MACRON
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u016b' # 0xF0 -> LATIN SMALL LETTER U WITH MACRON
'\u016e' # 0xF1 -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
'\u016f' # 0xF3 -> LATIN SMALL LETTER U WITH RING ABOVE
'\u0170' # 0xF4 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\u0171' # 0xF5 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0172' # 0xF6 -> LATIN CAPITAL LETTER U WITH OGONEK
'\u0173' # 0xF7 -> LATIN SMALL LETTER U WITH OGONEK
'\xdd' # 0xF8 -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xfd' # 0xF9 -> LATIN SMALL LETTER Y WITH ACUTE
'\u0137' # 0xFA -> LATIN SMALL LETTER K WITH CEDILLA
'\u017b' # 0xFB -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u0141' # 0xFC -> LATIN CAPITAL LETTER L WITH STROKE
'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u0122' # 0xFE -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
SimtterCom/gyp | test/builddir/gyptest-all.py | 185 | 2706 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify the settings that cause a set of programs to be created in
a specific build directory, and that no intermediate built files
get created outside of that build directory hierarchy even when
referred to with deeply-nested ../../.. paths.
"""
import TestGyp
# TODO(mmoss): Make only supports (theoretically) a single, global build
# directory (through GYP_GENERATOR_FLAGS 'output_dir'), rather than
# gyp-file-specific settings (e.g. the stuff in builddir.gypi) that the other
# generators support, so this doesn't work yet for make.
# TODO(mmoss) Make also has the issue that the top-level Makefile is written to
# the "--depth" location, which is one level above 'src', but then this test
# moves 'src' somewhere else, leaving the Makefile behind, so make can't find
# its sources. I'm not sure if make is wrong for writing outside the current
# directory, or if the test is wrong for assuming everything generated is under
# the current directory.
# Android, Ninja, and CMake do not support setting the build directory.
test = TestGyp.TestGyp(formats=['!make', '!ninja', '!android', '!cmake'])
test.run_gyp('prog1.gyp', '--depth=..', chdir='src')
if test.format == 'msvs':
if test.uses_msbuild:
test.must_contain('src/prog1.vcxproj',
'<OutDir>..\\builddir\\Default\\</OutDir>')
else:
test.must_contain('src/prog1.vcproj',
'OutputDirectory="..\\builddir\\Default\\"')
test.relocate('src', 'relocate/src')
test.subdir('relocate/builddir')
# Make sure that all the built ../../etc. files only get put under builddir,
# by making all of relocate read-only and then making only builddir writable.
test.writable('relocate', False)
test.writable('relocate/builddir', True)
# Suppress the test infrastructure's setting SYMROOT on the command line.
test.build('prog1.gyp', test.ALL, SYMROOT=None, chdir='relocate/src')
expect1 = """\
Hello from prog1.c
Hello from func1.c
"""
expect2 = """\
Hello from subdir2/prog2.c
Hello from func2.c
"""
expect3 = """\
Hello from subdir2/subdir3/prog3.c
Hello from func3.c
"""
expect4 = """\
Hello from subdir2/subdir3/subdir4/prog4.c
Hello from func4.c
"""
expect5 = """\
Hello from subdir2/subdir3/subdir4/subdir5/prog5.c
Hello from func5.c
"""
def run_builddir(prog, expect):
dir = 'relocate/builddir/Default/'
test.run(program=test.workpath(dir + prog), stdout=expect)
run_builddir('prog1', expect1)
run_builddir('prog2', expect2)
run_builddir('prog3', expect3)
run_builddir('prog4', expect4)
run_builddir('prog5', expect5)
test.pass_test()
| bsd-3-clause |
DMLoy/ECommerceBasic | lib/python2.7/site-packages/django/utils/feedgenerator.py | 73 | 15643 | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from __future__ import unicode_literals
import datetime
try:
from urllib.parse import urlparse
except ImportError: # Python 2
from urlparse import urlparse
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.encoding import force_text, iri_to_uri
from django.utils import datetime_safe
from django.utils import six
from django.utils.six import StringIO
from django.utils.timezone import is_aware
def rfc2822_date(date):
# We can't use strftime() because it produces locale-dependant results, so
# we have to map english month and day names manually
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
dow = days[date.weekday()]
month = months[date.month - 1]
time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
if not six.PY3: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d%02d' % (hour, minute)
else:
return time_str + '-0000'
def rfc3339_date(date):
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
if not six.PY3: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d:%02d' % (hour, minute)
else:
return time_str + 'Z'
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [force_text(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, enclosure=None, categories=(), item_copyright=None,
ttl=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate, which is a datetime.datetime object, and
enclosure, which is an instance of the Enclosure class.
"""
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate. If none of them have a pubdate,
this returns the current date/time.
"""
updates = [i['pubdate'] for i in self.items if i['pubdate'] is not None]
if len(updates) > 0:
updates.sort()
return updates[-1]
else:
return datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
mime_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {"version": self._version,
"xmlns:atom": "http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None,
{"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" % \
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement("dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
handler.addQuickElement("guid", item['unique_id'])
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("enclosure", '',
{"url": item['enclosure'].url, "length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
mime_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement("updated", rfc3339_date(item['pubdate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("link", '',
{"rel": "enclosure",
"href": item['enclosure'].url,
"length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| mit |
qliu/mhcdashboard | mhcdashboard/mhcdashboardapp/migrations/0023_auto_20150302_1022.py | 2 | 1169 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mhcdashboardapp', '0022_auto_20150302_1019'),
]
operations = [
migrations.AddField(
model_name='organizationactivity',
name='q1_comment',
field=models.CharField(max_length=500, null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='organizationactivity',
name='q2_comment',
field=models.CharField(max_length=500, null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='organizationactivity',
name='q3_comment',
field=models.CharField(max_length=500, null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='organizationactivity',
name='q4_comment',
field=models.CharField(max_length=500, null=True, blank=True),
preserve_default=True,
),
]
| gpl-2.0 |
jack6215/kafka | kafka-patch-review.py | 71 | 8273 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import sys
import os
import time
import datetime
import tempfile
import commands
import getpass
from jira.client import JIRA
def get_jira_config():
# read the config file
home=jira_home=os.getenv('HOME')
home=home.rstrip('/')
if not (os.path.isfile(home + '/jira.ini')):
jira_user=raw_input('JIRA user :')
jira_pass=getpass.getpass('JIRA password :')
jira_config = {'user':jira_user, 'password':jira_pass}
return jira_config
else:
jira_config = dict(line.strip().split('=') for line in open(home + '/jira.ini'))
return jira_config
def get_jira(jira_config):
options = {
'server': 'https://issues.apache.org/jira'
}
jira = JIRA(options=options,basic_auth=(jira_config['user'], jira_config['password']))
# (Force) verify the auth was really done
jira_session=jira.session()
if (jira_session is None):
raise Exception("Failed to login to the JIRA instance")
return jira
def cmd_exists(cmd):
status, result = commands.getstatusoutput(cmd)
return status
def main():
''' main(), shut up, pylint '''
popt = argparse.ArgumentParser(description='Kafka patch review tool')
popt.add_argument('-b', '--branch', action='store', dest='branch', required=True, help='Tracking branch to create diff against')
popt.add_argument('-j', '--jira', action='store', dest='jira', required=True, help='JIRA corresponding to the reviewboard')
popt.add_argument('-s', '--summary', action='store', dest='summary', required=False, help='Summary for the reviewboard')
popt.add_argument('-d', '--description', action='store', dest='description', required=False, help='Description for reviewboard')
popt.add_argument('-r', '--rb', action='store', dest='reviewboard', required=False, help='Review board that needs to be updated')
popt.add_argument('-t', '--testing-done', action='store', dest='testing', required=False, help='Text for the Testing Done section of the reviewboard')
popt.add_argument('-db', '--debug', action='store_true', required=False, help='Enable debug mode')
opt = popt.parse_args()
post_review_tool = None
if (cmd_exists("post-review") == 0):
post_review_tool = "post-review"
elif (cmd_exists("rbt") == 0):
post_review_tool = "rbt post"
else:
print "please install RBTools"
sys.exit(1)
patch_file=tempfile.gettempdir() + "/" + opt.jira + ".patch"
if opt.reviewboard:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H:%M:%S')
patch_file=tempfile.gettempdir() + "/" + opt.jira + '_' + st + '.patch'
# first check if rebase is needed
git_branch_hash="git rev-parse " + opt.branch
p_now=os.popen(git_branch_hash)
branch_now=p_now.read()
p_now.close()
git_common_ancestor="git merge-base " + opt.branch + " HEAD"
p_then=os.popen(git_common_ancestor)
branch_then=p_then.read()
p_then.close()
if branch_now != branch_then:
print 'ERROR: Your current working branch is from an older version of ' + opt.branch + '. Please rebase first by using git pull --rebase'
sys.exit(1)
git_configure_reviewboard="git config reviewboard.url https://reviews.apache.org"
print "Configuring reviewboard url to https://reviews.apache.org"
p=os.popen(git_configure_reviewboard)
p.close()
git_remote_update="git remote update"
print "Updating your remote branches to pull the latest changes"
p=os.popen(git_remote_update)
p.close()
# Get JIRA configuration and login to JIRA to ensure the credentials work, before publishing the patch to the review board
print "Verifying JIRA connection configurations"
try:
jira_config=get_jira_config()
jira=get_jira(jira_config)
except:
print "Failed to login to the JIRA instance", sys.exc_info()[0], sys.exc_info()[1]
sys.exit(1)
git_command="git format-patch " + opt.branch + " --stdout > " + patch_file
if opt.debug:
print git_command
p=os.popen(git_command)
p.close()
print 'Getting latest patch attached to the JIRA'
tmp_dir = tempfile.mkdtemp()
get_latest_patch_command="python ./dev-utils/test-patch.py --get-latest-patch --defect " + opt.jira + " --output " + tmp_dir + " > /dev/null 2>&1"
p=os.popen(get_latest_patch_command)
p.close()
previous_patch=tmp_dir + "/" + opt.jira + ".patch"
diff_file=tmp_dir + "/" + opt.jira + ".diff"
if os.path.isfile(previous_patch) and os.stat(previous_patch).st_size > 0:
print 'Creating diff with previous version of patch uploaded to JIRA'
diff_command = "diff " + previous_patch+ " " + patch_file + " > " + diff_file
try:
p=os.popen(diff_command)
sys.stdout.flush()
p.close()
except:
pass
print 'Diff with previous version of patch uploaded to JIRA is saved to ' + diff_file
print 'Checking if the there are changes that need to be pushed'
if os.stat(diff_file).st_size == 0:
print 'No changes found on top of changes uploaded to JIRA'
print 'Aborting'
sys.exit(1)
rb_command= post_review_tool + " --publish --tracking-branch " + opt.branch + " --target-groups=kafka --bugs-closed=" + opt.jira
if opt.debug:
rb_command=rb_command + " --debug"
summary="Patch for " + opt.jira
if opt.summary:
summary=opt.summary
rb_command=rb_command + " --summary \"" + summary + "\""
if opt.description:
rb_command=rb_command + " --description \"" + opt.description + "\""
if opt.reviewboard:
rb_command=rb_command + " -r " + opt.reviewboard
if opt.testing:
rb_command=rb_command + " --testing-done=" + opt.testing
if opt.debug:
print rb_command
p=os.popen(rb_command)
rb_url=""
for line in p:
print line
if line.startswith('http'):
rb_url = line
elif line.startswith("There don't seem to be any diffs"):
print 'ERROR: Your reviewboard was not created/updated since there was no diff to upload. The reasons that can cause this issue are 1) Your diff is not checked into your local branch. Please check in the diff to the local branch and retry 2) You are not specifying the local branch name as part of the --branch option. Please specify the remote branch name obtained from git branch -r'
p.close()
sys.exit(1)
elif line.startswith("Your review request still exists, but the diff is not attached") and not opt.debug:
print 'ERROR: Your reviewboard was not created/updated. Please run the script with the --debug option to troubleshoot the problem'
p.close()
sys.exit(1)
if p.close() != None:
print 'ERROR: reviewboard update failed. Exiting.'
sys.exit(1)
if opt.debug:
print 'rb url=',rb_url
print 'Creating diff against', opt.branch, 'and uploading patch to JIRA',opt.jira
issue = jira.issue(opt.jira)
attachment=open(patch_file)
jira.add_attachment(issue,attachment)
attachment.close()
comment="Created reviewboard "
if not opt.reviewboard:
print 'Created a new reviewboard',rb_url,
else:
print 'Updated reviewboard',rb_url
comment="Updated reviewboard "
comment = comment + rb_url + ' against branch ' + opt.branch
jira.add_comment(opt.jira, comment)
#update the JIRA status to PATCH AVAILABLE
transitions = jira.transitions(issue)
transitionsMap ={}
for t in transitions:
transitionsMap[t['name']] = t['id']
if('Submit Patch' in transitionsMap):
jira.transition_issue(issue, transitionsMap['Submit Patch'] , assignee={'name': jira_config['user']} )
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
alfa-addon/addon | plugin.video.alfa/lib/cloudscraper/interpreters/jsunfuck.py | 3 | 3603 | MAPPING = {
'a': '(false+"")[1]',
'b': '([]["entries"]()+"")[2]',
'c': '([]["fill"]+"")[3]',
'd': '(undefined+"")[2]',
'e': '(true+"")[3]',
'f': '(false+"")[0]',
'g': '(false+[0]+String)[20]',
'h': '(+(101))["to"+String["name"]](21)[1]',
'i': '([false]+undefined)[10]',
'j': '([]["entries"]()+"")[3]',
'k': '(+(20))["to"+String["name"]](21)',
'l': '(false+"")[2]',
'm': '(Number+"")[11]',
'n': '(undefined+"")[1]',
'o': '(true+[]["fill"])[10]',
'p': '(+(211))["to"+String["name"]](31)[1]',
'q': '(+(212))["to"+String["name"]](31)[1]',
'r': '(true+"")[1]',
's': '(false+"")[3]',
't': '(true+"")[0]',
'u': '(undefined+"")[0]',
'v': '(+(31))["to"+String["name"]](32)',
'w': '(+(32))["to"+String["name"]](33)',
'x': '(+(101))["to"+String["name"]](34)[1]',
'y': '(NaN+[Infinity])[10]',
'z': '(+(35))["to"+String["name"]](36)',
'A': '(+[]+Array)[10]',
'B': '(+[]+Boolean)[10]',
'C': 'Function("return escape")()(("")["italics"]())[2]',
'D': 'Function("return escape")()([]["fill"])["slice"]("-1")',
'E': '(RegExp+"")[12]',
'F': '(+[]+Function)[10]',
'G': '(false+Function("return Date")()())[30]',
'I': '(Infinity+"")[0]',
'M': '(true+Function("return Date")()())[30]',
'N': '(NaN+"")[0]',
'O': '(NaN+Function("return{}")())[11]',
'R': '(+[]+RegExp)[10]',
'S': '(+[]+String)[10]',
'T': '(NaN+Function("return Date")()())[30]',
'U': '(NaN+Function("return{}")()["to"+String["name"]]["call"]())[11]',
' ': '(NaN+[]["fill"])[11]',
'"': '("")["fontcolor"]()[12]',
'%': 'Function("return escape")()([]["fill"])[21]',
'&': '("")["link"](0+")[10]',
'(': '(undefined+[]["fill"])[22]',
')': '([0]+false+[]["fill"])[20]',
'+': '(+(+!+[]+(!+[]+[])[!+[]+!+[]+!+[]]+[+!+[]]+[+[]]+[+[]])+[])[2]',
',': '([]["slice"]["call"](false+"")+"")[1]',
'-': '(+(.+[0000000001])+"")[2]',
'.': '(+(+!+[]+[+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+[!+[]+!+[]]+[+[]])+[])[+!+[]]',
'/': '(false+[0])["italics"]()[10]',
':': '(RegExp()+"")[3]',
';': '("")["link"](")[14]',
'<': '("")["italics"]()[0]',
'=': '("")["fontcolor"]()[11]',
'>': '("")["italics"]()[2]',
'?': '(RegExp()+"")[2]',
'[': '([]["entries"]()+"")[0]',
']': '([]["entries"]()+"")[22]',
'{': '(true+[]["fill"])[20]',
'}': '([]["fill"]+"")["slice"]("-1")'
}
SIMPLE = {
'false': '![]',
'true': '!![]',
'undefined': '[][[]]',
'NaN': '+[![]]',
'Infinity': '+(+!+[]+(!+[]+[])[!+[]+!+[]+!+[]]+[+!+[]]+[+[]]+[+[]]+[+[]])' # +"1e1000"
}
CONSTRUCTORS = {
'Array': '[]',
'Number': '(+[])',
'String': '([]+[])',
'Boolean': '(![])',
'Function': '[]["fill"]',
'RegExp': 'Function("return/"+false+"/")()'
}
def jsunfuck(jsfuckString):
for key in sorted(MAPPING, key=lambda k: len(MAPPING[k]), reverse=True):
if MAPPING.get(key) in jsfuckString:
jsfuckString = jsfuckString.replace(MAPPING.get(key), '"{}"'.format(key))
for key in sorted(SIMPLE, key=lambda k: len(SIMPLE[k]), reverse=True):
if SIMPLE.get(key) in jsfuckString:
jsfuckString = jsfuckString.replace(SIMPLE.get(key), '{}'.format(key))
# for key in sorted(CONSTRUCTORS, key=lambda k: len(CONSTRUCTORS[k]), reverse=True):
# if CONSTRUCTORS.get(key) in jsfuckString:
# jsfuckString = jsfuckString.replace(CONSTRUCTORS.get(key), '{}'.format(key))
return jsfuckString
| gpl-3.0 |
ML-KULeuven/socceraction | tests/spadl/test_opta.py | 1 | 8597 | import os
import pandas as pd
import pytest
import socceraction.spadl.config as spadlcfg
from socceraction.spadl import opta as opta
from socceraction.spadl.base import SPADLSchema
from socceraction.spadl.opta import (
OptaCompetitionSchema,
OptaEventSchema,
OptaGameSchema,
OptaPlayerSchema,
OptaTeamSchema,
)
class TestJSONOptaLoader:
def setup_method(self):
data_dir = os.path.join(os.path.dirname(__file__), os.pardir, 'data', 'opta')
self.loader = opta.OptaLoader(
root=data_dir,
parser='json',
feeds={
'f1': 'tournament-{season_id}-{competition_id}.json',
'f9': 'match-{season_id}-{competition_id}-{game_id}.json',
'f24': 'match-{season_id}-{competition_id}-{game_id}.json',
},
)
def test_competitions(self):
df_competitions = self.loader.competitions()
assert len(df_competitions) > 0
OptaCompetitionSchema.validate(df_competitions)
def test_games(self):
df_games = self.loader.games(8, 2017)
assert len(df_games) == 1
OptaGameSchema.validate(df_games)
def test_teams(self):
df_teams = self.loader.teams(918893)
assert len(df_teams) == 2
OptaTeamSchema.validate(df_teams)
def test_players(self):
df_players = self.loader.players(918893)
assert len(df_players) == 27
OptaPlayerSchema.validate(df_players)
def test_events(self):
df_events = self.loader.events(918893)
assert len(df_events) > 0
OptaEventSchema.validate(df_events)
class TestXMLOptaLoader:
def setup_method(self):
data_dir = os.path.join(os.path.dirname(__file__), os.pardir, 'data', 'opta')
self.loader = opta.OptaLoader(
root=data_dir,
parser='xml',
feeds={
'f7': 'f7-{competition_id}-{season_id}-{game_id}-matchresults.xml',
'f24': 'f24-{competition_id}-{season_id}-{game_id}-eventdetails.xml',
},
)
def test_competitions(self):
df_competitions = self.loader.competitions()
assert len(df_competitions) > 0
OptaCompetitionSchema.validate(df_competitions)
def test_games(self):
df_games = self.loader.games(23, 2018)
assert len(df_games) == 1
OptaGameSchema.validate(df_games)
def test_teams(self):
df_teams = self.loader.teams(1009316)
assert len(df_teams) == 2
OptaTeamSchema.validate(df_teams)
def test_players(self):
df_players = self.loader.players(1009316)
assert len(df_players) == 36
OptaPlayerSchema.validate(df_players)
def test_events(self):
df_events = self.loader.events(1009316)
assert len(df_events) > 0
OptaEventSchema.validate(df_events)
class TestWhoscoredLoader:
def setup_method(self):
data_dir = os.path.join(os.path.dirname(__file__), os.pardir, 'data', 'whoscored')
self.loader = opta.OptaLoader(
root=data_dir,
parser='whoscored',
feeds={'whoscored': '{game_id}.json'},
)
def test_competitions(self):
df_competitions = self.loader.competitions()
assert len(df_competitions) == 0
def test_games(self):
df_games = self.loader.games(23, 2018)
assert len(df_games) == 1
OptaGameSchema.validate(df_games)
def test_teams(self):
df_teams = self.loader.teams(1005916)
assert len(df_teams) == 2
OptaTeamSchema.validate(df_teams)
def test_players(self):
df_players = self.loader.players(1005916)
assert len(df_players) == 44
OptaPlayerSchema.validate(df_players)
def test_events(self):
df_events = self.loader.events(1005916)
assert len(df_events) > 0
OptaEventSchema.validate(df_events)
class TestSpadlConvertor:
def setup_method(self):
data_dir = os.path.join(os.path.dirname(__file__), os.pardir, 'data', 'opta')
loader = opta.OptaLoader(
root=data_dir,
parser='xml',
feeds={
'f7': 'f7-{competition_id}-{season_id}-{game_id}-matchresults.xml',
'f24': 'f24-{competition_id}-{season_id}-{game_id}-eventdetails.xml',
},
)
self.events = loader.events(1009316)
def test_convert_to_actions(self):
df_actions = opta.convert_to_actions(self.events, 174)
assert len(df_actions) > 0
SPADLSchema.validate(df_actions)
assert (df_actions.game_id == 1009316).all()
assert ((df_actions.team_id == 174) | (df_actions.team_id == 957)).all()
def test_convert_goalkick(self):
event = pd.DataFrame(
[
{
'game_id': 318175,
'event_id': 1619686768,
'type_id': 1,
'period_id': 1,
'minute': 2,
'second': 14,
'timestamp': '2010-01-27 19:47:14',
'player_id': 8786,
'team_id': 157,
'outcome': False,
'start_x': 5.0,
'start_y': 37.0,
'end_x': 73.0,
'end_y': 18.7,
'assist': False,
'keypass': False,
'qualifiers': {56: 'Right', 141: '18.7', 124: True, 140: '73.0', 1: True},
'type_name': 'pass',
}
]
)
action = opta.convert_to_actions(event, 0).iloc[0]
assert action['type_id'] == spadlcfg.actiontypes.index('goalkick')
def test_convert_own_goal(self):
event = pd.DataFrame(
[
{
'game_id': 318175,
'event_id': 1619686768,
'type_id': 16,
'period_id': 1,
'minute': 2,
'second': 14,
'timestamp': '2010-01-27 19:47:14',
'player_id': 8786,
'team_id': 157,
'outcome': 1,
'start_x': 5.0,
'start_y': 37.0,
'end_x': 73.0,
'end_y': 18.7,
'assist': False,
'keypass': False,
'qualifiers': {28: True},
'type_name': 'goal',
}
]
)
action = opta.convert_to_actions(event, 0).iloc[0]
assert action['type_id'] == spadlcfg.actiontypes.index('bad_touch')
assert action['result_id'] == spadlcfg.results.index('owngoal')
def test_extract_lineups_f7xml():
data_dir = os.path.join(os.path.dirname(__file__), os.pardir, 'data', 'opta')
parser = opta._F7XMLParser(os.path.join(data_dir, 'f7-23-2018-1009316-matchresults.xml'))
lineups = parser.extract_lineups()
for _, lineup in lineups.items():
# each team should have 11 starters
assert sum([p['is_starter'] for p in lineup['players'].values()]) == 11
# the summed match time of all players should equal the total time available
assert sum([p['minutes_played'] for p in lineup['players'].values()]) == 11 * 96
def test_extract_lineups_f9json():
data_dir = os.path.join(os.path.dirname(__file__), os.pardir, 'data', 'opta')
parser = opta._F9JSONParser(os.path.join(data_dir, 'match-2017-8-918893.json'))
lineups = parser.extract_lineups()
for _, lineup in lineups.items():
print([p['minutes_played'] for p in lineup['players'].values()])
# each team should have 11 starters
assert sum([p['is_starter'] for p in lineup['players'].values()]) == 11
# the summed match time of all players should equal the total time available
assert sum([p['minutes_played'] for p in lineup['players'].values()]) == 11 * 96
def test_extract_ids_from_path():
glob_pattern = '{competition_id}-{season_id}/{game_id}.json'
ffp = 'blah/blah/blah/1-2021/1234.json'
ids = opta._extract_ids_from_path(ffp, glob_pattern)
assert ids['competition_id'] == 1
assert ids['season_id'] == 2021
assert ids['game_id'] == 1234
def test_extract_ids_from_path_with_incorrect_pattern():
glob_pattern = '{competition_id}-{season_id}/{game_id}.json'
ffp = 'blah/blah/blah/1-2021/g1234.json'
with pytest.raises(ValueError):
opta._extract_ids_from_path(ffp, glob_pattern)
| mit |
Endika/odoo | addons/hr_timesheet_invoice/report/__init__.py | 433 | 1136 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_analytic_profit
import report_analytic
import hr_timesheet_invoice_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
amyhoo/django-oscar-alipay | alipay/conf.py | 2 | 5941 | # -*- coding:utf-8 -*-
#######################################################################################
#淘宝接口参数意义
#######################################################################################
from django.conf import settings
# 合作者身份 ID,以 2088 开头的 16 位纯数字组成
ALIPAY_PARTNER = ''
# 支付宝网关
ALIPAY_GATEWAY = 'https://mapi.alipay.com/gateway.do'
ALIPAY_WAP_GATEWAY = 'http://wappaygw.alipay.com/service/rest.htm'
#通知网关
NOTIFY_GATEWAY_URL = 'https://mapi.alipay.com/gateway.do?service=notify_verify&partner=%s¬ify_id=%s'
# COD Cash On Delivery 货到付款
#卖家信息
ALIPAY_SELL_EMAIL=None
ALIPAY_SELL_ID=''
#字符集
ALIPAY_INPUT_CHARSET = 'utf-8'
ALIPAY_SIGN_TYPE = 'MD5'
# 访问模式,根据自己的服务器是否支持ssl访问,若支持请选择https;若不支持请选择http
ALIPAY_TRANSPORT='https'
# 安全检验码,以数字和字母组成的32位字符
ALIPAY_KEY = ''
# 付完款后跳转的页面(同步通知) 要用 http://格式的完整路径,不允许加?id=123这类自定义参数
ALIPAY_RETURN_URL=''
# 交易过程中服务器异步通知的页面 要用 http://格式的完整路径,不允许加?id=123这类自定义参数
ALIPAY_NOTIFY_URL=''
ALIPAY_SHOW_URL=''
ALIPAY_DATE_FORMAT = ('%Y-%m-%d %H:%M:%S',)
#支付宝接口定义
SERVICE = (
'create_direct_pay_by_user', # 即时到帐
'create_partner_trade_by_buyer', # 担保交易
'send_goods_confirm_by_platform', # 确认发货
'trade_create_by_buyer', # 标准双接口
'alipay.mobile.qrcode.manage', #二维码管理
)
PAYMENT_TYPE = (
('商品购买','1'), #商品购买
('服务购买','2'), #服务购买
('网络拍卖','3'), #网络拍卖
('捐赠','4'), #捐赠
('邮费补偿','5'), #邮费补偿
('奖金','6'), #奖金
('基金购买','7'), #基金购买
('机票购买','8'), #机票购买
)
PAYMETHOD = (
'creditPay', # 'credit payment' # 需开通信用支付
'directPay', # 'direct payment' # 余额支付,不能设置 defaultbank 参数
'bankPay', # 'bank payment directly' # 需开通纯网关,需设置 defaultbank
'cash', # 'paid by cash'
'cartoon', # 'paid by bank card thourgh alipay gateway'
)
LOGISTICS_TYPE = (
'POST', # 平邮
'EXPRESS', # 其他快递
'EMS', # EMS
)
LOGISTICS_PAYMENT = (
'BUYER_PAY', # 物流买家承担运费
'SELLER_PAY', # 物流卖家承担运费
'BUYER_PAY_AFTER_RECEIVE', # 买家到货付款,运费显示但不计入总价
)
#交易状态
TRADE_STATUS = (
'WAIT_BUYER_PAY', #等待买家付款
'WAIT_SELLER_SEND_GOODS', #买家已付款,等待卖家发货
'WAIT_BUYER_CONFIRM_GOODS', #卖家已发货,等待买家收货
'TRADE_FINISHED', #买家已收货,交易完成
'TRADE_CLOSED', #交易中途关闭(已结束,未成功完成)
'COD_WAIT_SELLER_SEND_GOODS', # 等待卖家发货(货到付款)
'COD_WAIT_BUYER_PAY', # 等待买家签收付款(货到付款)
'COD_WAIT_SYS_PAY_SELLER', # 签收成功等待系统打款给卖家(货到付款)
)
#基本参数,所有标记为None值在业务逻辑执行时赋值,或者被删除
BASIC_PARAMS={
#基本参数
'_input_charset': ALIPAY_INPUT_CHARSET,
'partner': ALIPAY_PARTNER,
'payment_type': dict(PAYMENT_TYPE)['商品购买'],
'sign_type':ALIPAY_SIGN_TYPE, #加密方式
'sign':None,
#卖家参数,seller_id,seller_account_name,seller_email必须且只需要一个
'seller_id':ALIPAY_SELL_ID,
'seller_account_name':None,
'seller_email':ALIPAY_SELL_EMAIL,
#请求所需参数
'key':ALIPAY_KEY,
}
#业务信息
BIZ_PARAMS={
#接口类型
'service':None,
# 通知信息
'notify_url':None,
'return_url':None,
'show_url':None,#商户网站网址
#订单
'out_trade_no':None,# 请与贵网站订单系统中的唯一订单号匹配
'subject':None,# 订单名称,显示在支付宝收银台里的“商品名称”里,显示在支付宝的交易管理的“商品名称”的列表里。
'body':None,# 订单描述、订单详细、订单备注,显示在支付宝收银台里的“商品描述”里
'total_fee':None,# 订单总金额,显示在支付宝收银台里的“应付总额”里
'quantity':None,# 数量
'price':None,# 价格
'discount':None,# 折扣
#物流
'logistics_type':'EXPRESS',#其他快递
'logistics_payment':'BUYER_PAY',#买家承担
'logistics_fee':None,
}
EXTEND_PARAMS={
# 扩展功能参数——防钓鱼
'anti_phishing_key':None,
'exter_invoke_ip': None,
# 扩展功能参数——自定义参数
'buyer_email':None,
'extra_common_param':None,
# 扩展功能参数——分润
'royalty_type':None,
'royalty_parameters':None,
}
#即时到帐参数
INSTANT_PARAMS={
#即时到帐特有参数
'paymethod' : 'directPay', # 默认支付方式,四个值可选:bankPay(网银); cartoon(卡通); directPay(余额); CASH(网点支付)
'defaultbank' : None, # 默认网银代号,代号列表见http://club.alipay.com/read.php?tid=8681379
}
#确认发货接口参数
LOG_PARAMS={
#基本参数
'service':'send_goods_confirm_by_platform',
'partner':ALIPAY_PARTNER,
'input_charset':ALIPAY_INPUT_CHARSET,
'sign':None,
'sign_type':ALIPAY_SIGN_TYPE,
#业务参数
'trade_no':None,
'logistics_name':None,
'transport_type':None,
} | bsd-3-clause |
ganeti-github-testing/ganeti-test-1 | lib/utils/process.py | 3 | 31604 | #
#
# Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utility functions for processes.
"""
import os
import sys
import subprocess
import errno
import select
import logging
import signal
import resource
from cStringIO import StringIO
from ganeti import errors
from ganeti import constants
from ganeti import compat
from ganeti.utils import retry as utils_retry
from ganeti.utils import wrapper as utils_wrapper
from ganeti.utils import text as utils_text
from ganeti.utils import io as utils_io
from ganeti.utils import algo as utils_algo
#: when set to True, L{RunCmd} is disabled
_no_fork = False
(_TIMEOUT_NONE,
_TIMEOUT_TERM,
_TIMEOUT_KILL) = range(3)
def DisableFork():
"""Disables the use of fork(2).
"""
global _no_fork # pylint: disable=W0603
_no_fork = True
class RunResult(object):
"""Holds the result of running external programs.
@type exit_code: int
@ivar exit_code: the exit code of the program, or None (if the program
didn't exit())
@type signal: int or None
@ivar signal: the signal that caused the program to finish, or None
(if the program wasn't terminated by a signal)
@type stdout: str
@ivar stdout: the standard output of the program
@type stderr: str
@ivar stderr: the standard error of the program
@type failed: boolean
@ivar failed: True in case the program was
terminated by a signal or exited with a non-zero exit code
@type failed_by_timeout: boolean
@ivar failed_by_timeout: True in case the program was
terminated by timeout
@ivar fail_reason: a string detailing the termination reason
"""
__slots__ = ["exit_code", "signal", "stdout", "stderr",
"failed", "failed_by_timeout", "fail_reason", "cmd"]
def __init__(self, exit_code, signal_, stdout, stderr, cmd, timeout_action,
timeout):
self.cmd = cmd
self.exit_code = exit_code
self.signal = signal_
self.stdout = stdout
self.stderr = stderr
self.failed = (signal_ is not None or exit_code != 0)
self.failed_by_timeout = timeout_action != _TIMEOUT_NONE
fail_msgs = []
if self.signal is not None:
fail_msgs.append("terminated by signal %s" % self.signal)
elif self.exit_code is not None:
fail_msgs.append("exited with exit code %s" % self.exit_code)
else:
fail_msgs.append("unable to determine termination reason")
if timeout_action == _TIMEOUT_TERM:
fail_msgs.append("terminated after timeout of %.2f seconds" % timeout)
elif timeout_action == _TIMEOUT_KILL:
fail_msgs.append(("force termination after timeout of %.2f seconds"
" and linger for another %.2f seconds") %
(timeout, constants.CHILD_LINGER_TIMEOUT))
if fail_msgs and self.failed:
self.fail_reason = utils_text.CommaJoin(fail_msgs)
else:
self.fail_reason = None
if self.failed:
logging.debug("Command '%s' failed (%s); output: %s",
self.cmd, self.fail_reason, self.output)
def _GetOutput(self):
"""Returns the combined stdout and stderr for easier usage.
"""
return self.stdout + self.stderr
output = property(_GetOutput, None, None, "Return full output")
def _BuildCmdEnvironment(env, reset):
"""Builds the environment for an external program.
"""
if reset:
cmd_env = {}
else:
cmd_env = os.environ.copy()
cmd_env["LC_ALL"] = "C"
if env is not None:
cmd_env.update(env)
return cmd_env
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
interactive=False, timeout=None, noclose_fds=None,
input_fd=None, postfork_fn=None):
"""Execute a (shell) command.
The command should not read from its standard input, as it will be
closed.
@type cmd: string or list
@param cmd: Command to run
@type env: dict
@param env: Additional environment variables
@type output: str
@param output: if desired, the output of the command can be
saved in a file instead of the RunResult instance; this
parameter denotes the file name (if not None)
@type cwd: string
@param cwd: if specified, will be used as the working
directory for the command; the default will be /
@type reset_env: boolean
@param reset_env: whether to reset or keep the default os environment
@type interactive: boolean
@param interactive: whether we pipe stdin, stdout and stderr
(default behaviour) or run the command interactive
@type timeout: int
@param timeout: If not None, timeout in seconds until child process gets
killed
@type noclose_fds: list
@param noclose_fds: list of additional (fd >=3) file descriptors to leave
open for the child process
@type input_fd: C{file}-like object or numeric file descriptor
@param input_fd: File descriptor for process' standard input
@type postfork_fn: Callable receiving PID as parameter
@param postfork_fn: Callback run after fork but before timeout
@rtype: L{RunResult}
@return: RunResult instance
@raise errors.ProgrammerError: if we call this when forks are disabled
"""
if _no_fork:
raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
if output and interactive:
raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
" not be provided at the same time")
if not (output is None or input_fd is None):
# The current logic in "_RunCmdFile", which is used when output is defined,
# does not support input files (not hard to implement, though)
raise errors.ProgrammerError("Parameters 'output' and 'input_fd' can"
" not be used at the same time")
if isinstance(cmd, basestring):
strcmd = cmd
shell = True
else:
cmd = [str(val) for val in cmd]
strcmd = utils_text.ShellQuoteArgs(cmd)
shell = False
if output:
logging.info("RunCmd %s, output file '%s'", strcmd, output)
else:
logging.info("RunCmd %s", strcmd)
cmd_env = _BuildCmdEnvironment(env, reset_env)
try:
if output is None:
out, err, status, timeout_action = _RunCmdPipe(cmd, cmd_env, shell, cwd,
interactive, timeout,
noclose_fds, input_fd,
postfork_fn=postfork_fn)
else:
if postfork_fn:
raise errors.ProgrammerError("postfork_fn is not supported if output"
" should be captured")
assert input_fd is None
timeout_action = _TIMEOUT_NONE
status = _RunCmdFile(cmd, cmd_env, shell, output, cwd, noclose_fds)
out = err = ""
except OSError, err:
if err.errno == errno.ENOENT:
raise errors.OpExecError("Can't execute '%s': not found (%s)" %
(strcmd, err))
else:
raise
if status >= 0:
exitcode = status
signal_ = None
else:
exitcode = None
signal_ = -status
return RunResult(exitcode, signal_, out, err, strcmd, timeout_action, timeout)
def SetupDaemonEnv(cwd="/", umask=077):
"""Setup a daemon's environment.
This should be called between the first and second fork, due to
setsid usage.
@param cwd: the directory to which to chdir
@param umask: the umask to setup
"""
os.chdir(cwd)
os.umask(umask)
os.setsid()
def SetupDaemonFDs(output_file, output_fd):
"""Setups up a daemon's file descriptors.
@param output_file: if not None, the file to which to redirect
stdout/stderr
@param output_fd: if not None, the file descriptor for stdout/stderr
"""
# check that at most one is defined
assert [output_file, output_fd].count(None) >= 1
# Open /dev/null (read-only, only for stdin)
devnull_fd = os.open(os.devnull, os.O_RDONLY)
output_close = True
if output_fd is not None:
output_close = False
elif output_file is not None:
# Open output file
try:
output_fd = os.open(output_file,
os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
except EnvironmentError, err:
raise Exception("Opening output file failed: %s" % err)
else:
output_fd = os.open(os.devnull, os.O_WRONLY)
# Redirect standard I/O
os.dup2(devnull_fd, 0)
os.dup2(output_fd, 1)
os.dup2(output_fd, 2)
if devnull_fd > 2:
utils_wrapper.CloseFdNoError(devnull_fd)
if output_close and output_fd > 2:
utils_wrapper.CloseFdNoError(output_fd)
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
pidfile=None):
"""Start a daemon process after forking twice.
@type cmd: string or list
@param cmd: Command to run
@type env: dict
@param env: Additional environment variables
@type cwd: string
@param cwd: Working directory for the program
@type output: string
@param output: Path to file in which to save the output
@type output_fd: int
@param output_fd: File descriptor for output
@type pidfile: string
@param pidfile: Process ID file
@rtype: int
@return: Daemon process ID
@raise errors.ProgrammerError: if we call this when forks are disabled
"""
if _no_fork:
raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
" disabled")
if output and not (bool(output) ^ (output_fd is not None)):
raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
" specified")
if isinstance(cmd, basestring):
cmd = ["/bin/sh", "-c", cmd]
strcmd = utils_text.ShellQuoteArgs(cmd)
if output:
logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
else:
logging.debug("StartDaemon %s", strcmd)
cmd_env = _BuildCmdEnvironment(env, False)
# Create pipe for sending PID back
(pidpipe_read, pidpipe_write) = os.pipe()
try:
try:
# Create pipe for sending error messages
(errpipe_read, errpipe_write) = os.pipe()
try:
try:
# First fork
pid = os.fork()
if pid == 0:
try:
# Child process, won't return
_StartDaemonChild(errpipe_read, errpipe_write,
pidpipe_read, pidpipe_write,
cmd, cmd_env, cwd,
output, output_fd, pidfile)
finally:
# Well, maybe child process failed
os._exit(1) # pylint: disable=W0212
finally:
utils_wrapper.CloseFdNoError(errpipe_write)
# Wait for daemon to be started (or an error message to
# arrive) and read up to 100 KB as an error message
errormsg = utils_wrapper.RetryOnSignal(os.read, errpipe_read,
100 * 1024)
finally:
utils_wrapper.CloseFdNoError(errpipe_read)
finally:
utils_wrapper.CloseFdNoError(pidpipe_write)
# Read up to 128 bytes for PID
pidtext = utils_wrapper.RetryOnSignal(os.read, pidpipe_read, 128)
finally:
utils_wrapper.CloseFdNoError(pidpipe_read)
# Try to avoid zombies by waiting for child process
try:
os.waitpid(pid, 0)
except OSError:
pass
if errormsg:
raise errors.OpExecError("Error when starting daemon process: %r" %
errormsg)
try:
return int(pidtext)
except (ValueError, TypeError), err:
raise errors.OpExecError("Error while trying to parse PID %r: %s" %
(pidtext, err))
def _StartDaemonChild(errpipe_read, errpipe_write,
pidpipe_read, pidpipe_write,
args, env, cwd,
output, fd_output, pidfile):
"""Child process for starting daemon.
"""
try:
# Close parent's side
utils_wrapper.CloseFdNoError(errpipe_read)
utils_wrapper.CloseFdNoError(pidpipe_read)
# First child process
SetupDaemonEnv()
# And fork for the second time
pid = os.fork()
if pid != 0:
# Exit first child process
os._exit(0) # pylint: disable=W0212
# Make sure pipe is closed on execv* (and thereby notifies
# original process)
utils_wrapper.SetCloseOnExecFlag(errpipe_write, True)
# List of file descriptors to be left open
noclose_fds = [errpipe_write]
# Open PID file
if pidfile:
fd_pidfile = utils_io.WritePidFile(pidfile)
# Keeping the file open to hold the lock
noclose_fds.append(fd_pidfile)
utils_wrapper.SetCloseOnExecFlag(fd_pidfile, False)
else:
fd_pidfile = None
SetupDaemonFDs(output, fd_output)
# Send daemon PID to parent
utils_wrapper.RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
# Close all file descriptors except stdio and error message pipe
CloseFDs(noclose_fds=noclose_fds)
# Change working directory
os.chdir(cwd)
if env is None:
os.execvp(args[0], args)
else:
os.execvpe(args[0], args, env)
except: # pylint: disable=W0702
try:
# Report errors to original process
WriteErrorToFD(errpipe_write, str(sys.exc_info()[1]))
except: # pylint: disable=W0702
# Ignore errors in error handling
pass
os._exit(1) # pylint: disable=W0212
def WriteErrorToFD(fd, err):
"""Possibly write an error message to a fd.
@type fd: None or int (file descriptor)
@param fd: if not None, the error will be written to this fd
@param err: string, the error message
"""
if fd is None:
return
if not err:
err = "<unknown error>"
utils_wrapper.RetryOnSignal(os.write, fd, err)
def _CheckIfAlive(child):
"""Raises L{utils_retry.RetryAgain} if child is still alive.
@raises utils_retry.RetryAgain: If child is still alive
"""
if child.poll() is None:
raise utils_retry.RetryAgain()
def _WaitForProcess(child, timeout):
"""Waits for the child to terminate or until we reach timeout.
"""
try:
utils_retry.Retry(_CheckIfAlive, (1.0, 1.2, 5.0), max(0, timeout),
args=[child])
except utils_retry.RetryTimeout:
pass
def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout, noclose_fds,
input_fd, postfork_fn=None,
_linger_timeout=constants.CHILD_LINGER_TIMEOUT):
"""Run a command and return its output.
@type cmd: string or list
@param cmd: Command to run
@type env: dict
@param env: The environment to use
@type via_shell: bool
@param via_shell: if we should run via the shell
@type cwd: string
@param cwd: the working directory for the program
@type interactive: boolean
@param interactive: Run command interactive (without piping)
@type timeout: int
@param timeout: Timeout after the programm gets terminated
@type noclose_fds: list
@param noclose_fds: list of additional (fd >=3) file descriptors to leave
open for the child process
@type input_fd: C{file}-like object or numeric file descriptor
@param input_fd: File descriptor for process' standard input
@type postfork_fn: Callable receiving PID as parameter
@param postfork_fn: Function run after fork but before timeout
@rtype: tuple
@return: (out, err, status)
"""
poller = select.poll()
if interactive:
stderr = None
stdout = None
else:
stderr = subprocess.PIPE
stdout = subprocess.PIPE
if input_fd:
stdin = input_fd
elif interactive:
stdin = None
else:
stdin = subprocess.PIPE
if noclose_fds:
preexec_fn = lambda: CloseFDs(noclose_fds)
close_fds = False
else:
preexec_fn = None
close_fds = True
child = subprocess.Popen(cmd, shell=via_shell,
stderr=stderr,
stdout=stdout,
stdin=stdin,
close_fds=close_fds, env=env,
cwd=cwd,
preexec_fn=preexec_fn)
if postfork_fn:
postfork_fn(child.pid)
out = StringIO()
err = StringIO()
linger_timeout = None
if timeout is None:
poll_timeout = None
else:
poll_timeout = utils_algo.RunningTimeout(timeout, True).Remaining
msg_timeout = ("Command %s (%d) run into execution timeout, terminating" %
(cmd, child.pid))
msg_linger = ("Command %s (%d) run into linger timeout, killing" %
(cmd, child.pid))
timeout_action = _TIMEOUT_NONE
# subprocess: "If the stdin argument is PIPE, this attribute is a file object
# that provides input to the child process. Otherwise, it is None."
assert (stdin == subprocess.PIPE) ^ (child.stdin is None), \
"subprocess' stdin did not behave as documented"
if not interactive:
if child.stdin is not None:
child.stdin.close()
poller.register(child.stdout, select.POLLIN)
poller.register(child.stderr, select.POLLIN)
fdmap = {
child.stdout.fileno(): (out, child.stdout),
child.stderr.fileno(): (err, child.stderr),
}
for fd in fdmap:
utils_wrapper.SetNonblockFlag(fd, True)
while fdmap:
if poll_timeout:
pt = poll_timeout() * 1000
if pt < 0:
if linger_timeout is None:
logging.warning(msg_timeout)
if child.poll() is None:
timeout_action = _TIMEOUT_TERM
utils_wrapper.IgnoreProcessNotFound(os.kill, child.pid,
signal.SIGTERM)
linger_timeout = \
utils_algo.RunningTimeout(_linger_timeout, True).Remaining
pt = linger_timeout() * 1000
if pt < 0:
break
else:
pt = None
pollresult = utils_wrapper.RetryOnSignal(poller.poll, pt)
for fd, event in pollresult:
if event & select.POLLIN or event & select.POLLPRI:
data = fdmap[fd][1].read()
# no data from read signifies EOF (the same as POLLHUP)
if not data:
poller.unregister(fd)
del fdmap[fd]
continue
fdmap[fd][0].write(data)
if (event & select.POLLNVAL or event & select.POLLHUP or
event & select.POLLERR):
poller.unregister(fd)
del fdmap[fd]
if timeout is not None:
assert callable(poll_timeout)
# We have no I/O left but it might still run
if child.poll() is None:
_WaitForProcess(child, poll_timeout())
# Terminate if still alive after timeout
if child.poll() is None:
if linger_timeout is None:
logging.warning(msg_timeout)
timeout_action = _TIMEOUT_TERM
utils_wrapper.IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
lt = _linger_timeout
else:
lt = linger_timeout()
_WaitForProcess(child, lt)
# Okay, still alive after timeout and linger timeout? Kill it!
if child.poll() is None:
timeout_action = _TIMEOUT_KILL
logging.warning(msg_linger)
utils_wrapper.IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL)
out = out.getvalue()
err = err.getvalue()
status = child.wait()
return out, err, status, timeout_action
def _RunCmdFile(cmd, env, via_shell, output, cwd, noclose_fds):
"""Run a command and save its output to a file.
@type cmd: string or list
@param cmd: Command to run
@type env: dict
@param env: The environment to use
@type via_shell: bool
@param via_shell: if we should run via the shell
@type output: str
@param output: the filename in which to save the output
@type cwd: string
@param cwd: the working directory for the program
@type noclose_fds: list
@param noclose_fds: list of additional (fd >=3) file descriptors to leave
open for the child process
@rtype: int
@return: the exit status
"""
fh = open(output, "a")
if noclose_fds:
preexec_fn = lambda: CloseFDs(noclose_fds + [fh.fileno()])
close_fds = False
else:
preexec_fn = None
close_fds = True
try:
child = subprocess.Popen(cmd, shell=via_shell,
stderr=subprocess.STDOUT,
stdout=fh,
stdin=subprocess.PIPE,
close_fds=close_fds, env=env,
cwd=cwd,
preexec_fn=preexec_fn)
child.stdin.close()
status = child.wait()
finally:
fh.close()
return status
def RunParts(dir_name, env=None, reset_env=False):
"""Run Scripts or programs in a directory
@type dir_name: string
@param dir_name: absolute path to a directory
@type env: dict
@param env: The environment to use
@type reset_env: boolean
@param reset_env: whether to reset or keep the default os environment
@rtype: list of tuples
@return: list of (name, (one of RUNDIR_STATUS), RunResult)
"""
rr = []
try:
dir_contents = utils_io.ListVisibleFiles(dir_name)
except OSError, err:
logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
return rr
for relname in sorted(dir_contents):
fname = utils_io.PathJoin(dir_name, relname)
if not (constants.EXT_PLUGIN_MASK.match(relname) is not None and
utils_wrapper.IsExecutable(fname)):
rr.append((relname, constants.RUNPARTS_SKIP, None))
else:
try:
result = RunCmd([fname], env=env, reset_env=reset_env)
except Exception, err: # pylint: disable=W0703
rr.append((relname, constants.RUNPARTS_ERR, str(err)))
else:
rr.append((relname, constants.RUNPARTS_RUN, result))
return rr
def _GetProcStatusPath(pid):
"""Returns the path for a PID's proc status file.
@type pid: int
@param pid: Process ID
@rtype: string
"""
return "/proc/%d/status" % pid
def IsProcessAlive(pid):
"""Check if a given pid exists on the system.
@note: zombie status is not handled, so zombie processes
will be returned as alive
@type pid: int
@param pid: the process ID to check
@rtype: boolean
@return: True if the process exists
"""
def _TryStat(name):
try:
os.stat(name)
return True
except EnvironmentError, err:
if err.errno in (errno.ENOENT, errno.ENOTDIR):
return False
elif err.errno == errno.EINVAL:
raise utils_retry.RetryAgain(err)
raise
assert isinstance(pid, int), "pid must be an integer"
if pid <= 0:
return False
# /proc in a multiprocessor environment can have strange behaviors.
# Retry the os.stat a few times until we get a good result.
try:
return utils_retry.Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
args=[_GetProcStatusPath(pid)])
except utils_retry.RetryTimeout, err:
err.RaiseInner()
def IsDaemonAlive(name):
"""Determines whether a daemon is alive
@type name: string
@param name: daemon name
@rtype: boolean
@return: True if daemon is running, False otherwise
"""
return IsProcessAlive(utils_io.ReadPidFile(utils_io.DaemonPidFileName(name)))
def _ParseSigsetT(sigset):
"""Parse a rendered sigset_t value.
This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
function.
@type sigset: string
@param sigset: Rendered signal set from /proc/$pid/status
@rtype: set
@return: Set of all enabled signal numbers
"""
result = set()
signum = 0
for ch in reversed(sigset):
chv = int(ch, 16)
# The following could be done in a loop, but it's easier to read and
# understand in the unrolled form
if chv & 1:
result.add(signum + 1)
if chv & 2:
result.add(signum + 2)
if chv & 4:
result.add(signum + 3)
if chv & 8:
result.add(signum + 4)
signum += 4
return result
def _GetProcStatusField(pstatus, field):
"""Retrieves a field from the contents of a proc status file.
@type pstatus: string
@param pstatus: Contents of /proc/$pid/status
@type field: string
@param field: Name of field whose value should be returned
@rtype: string
"""
for line in pstatus.splitlines():
parts = line.split(":", 1)
if len(parts) < 2 or parts[0] != field:
continue
return parts[1].strip()
return None
def IsProcessHandlingSignal(pid, signum, status_path=None):
"""Checks whether a process is handling a signal.
@type pid: int
@param pid: Process ID
@type signum: int
@param signum: Signal number
@rtype: bool
"""
if status_path is None:
status_path = _GetProcStatusPath(pid)
try:
proc_status = utils_io.ReadFile(status_path)
except EnvironmentError, err:
# In at least one case, reading /proc/$pid/status failed with ESRCH.
if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
return False
raise
sigcgt = _GetProcStatusField(proc_status, "SigCgt")
if sigcgt is None:
raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
# Now check whether signal is handled
return signum in _ParseSigsetT(sigcgt)
def Daemonize(logfile):
"""Daemonize the current process.
This detaches the current process from the controlling terminal and
runs it in the background as a daemon.
@type logfile: str
@param logfile: the logfile to which we should redirect stdout/stderr
@rtype: tuple; (int, callable)
@return: File descriptor of pipe(2) which must be closed to notify parent
process and a callable to reopen log files
"""
# pylint: disable=W0212
# yes, we really want os._exit
# TODO: do another attempt to merge Daemonize and StartDaemon, or at
# least abstract the pipe functionality between them
# Create pipe for sending error messages
(rpipe, wpipe) = os.pipe()
# this might fail
pid = os.fork()
if (pid == 0): # The first child.
SetupDaemonEnv()
# this might fail
pid = os.fork() # Fork a second child.
if (pid == 0): # The second child.
utils_wrapper.CloseFdNoError(rpipe)
else:
# exit() or _exit()? See below.
os._exit(0) # Exit parent (the first child) of the second child.
else:
utils_wrapper.CloseFdNoError(wpipe)
# Wait for daemon to be started (or an error message to
# arrive) and read up to 100 KB as an error message
errormsg = utils_wrapper.RetryOnSignal(os.read, rpipe, 100 * 1024)
if errormsg:
sys.stderr.write("Error when starting daemon process: %r\n" % errormsg)
rcode = 1
else:
rcode = 0
os._exit(rcode) # Exit parent of the first child.
reopen_fn = compat.partial(SetupDaemonFDs, logfile, None)
# Open logs for the first time
reopen_fn()
return (wpipe, reopen_fn)
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
waitpid=False):
"""Kill a process given by its pid.
@type pid: int
@param pid: The PID to terminate.
@type signal_: int
@param signal_: The signal to send, by default SIGTERM
@type timeout: int
@param timeout: The timeout after which, if the process is still alive,
a SIGKILL will be sent. If not positive, no such checking
will be done
@type waitpid: boolean
@param waitpid: If true, we should waitpid on this process after
sending signals, since it's our own child and otherwise it
would remain as zombie
"""
def _helper(pid, signal_, wait):
"""Simple helper to encapsulate the kill/waitpid sequence"""
if utils_wrapper.IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
try:
os.waitpid(pid, os.WNOHANG)
except OSError:
pass
if pid <= 0:
# kill with pid=0 == suicide
raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
if not IsProcessAlive(pid):
return
_helper(pid, signal_, waitpid)
if timeout <= 0:
return
def _CheckProcess():
if not IsProcessAlive(pid):
return
try:
(result_pid, _) = os.waitpid(pid, os.WNOHANG)
except OSError:
raise utils_retry.RetryAgain()
if result_pid > 0:
return
raise utils_retry.RetryAgain()
try:
# Wait up to $timeout seconds
utils_retry.Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
except utils_retry.RetryTimeout:
pass
if IsProcessAlive(pid):
# Kill process if it's still alive
_helper(pid, signal.SIGKILL, waitpid)
def RunInSeparateProcess(fn, *args):
"""Runs a function in a separate process.
Note: Only boolean return values are supported.
@type fn: callable
@param fn: Function to be called
@rtype: bool
@return: Function's result
"""
pid = os.fork()
if pid == 0:
# Child process
try:
# In case the function uses temporary files
utils_wrapper.ResetTempfileModule()
# Call function
result = int(bool(fn(*args)))
assert result in (0, 1)
except: # pylint: disable=W0702
logging.exception("Error while calling function in separate process")
# 0 and 1 are reserved for the return value
result = 33
os._exit(result) # pylint: disable=W0212
# Parent process
# Avoid zombies and check exit code
(_, status) = os.waitpid(pid, 0)
if os.WIFSIGNALED(status):
exitcode = None
signum = os.WTERMSIG(status)
else:
exitcode = os.WEXITSTATUS(status)
signum = None
if not (exitcode in (0, 1) and signum is None):
raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
(exitcode, signum))
return bool(exitcode)
def CloseFDs(noclose_fds=None):
"""Close file descriptors.
This closes all file descriptors above 2 (i.e. except
stdin/out/err).
@type noclose_fds: list or None
@param noclose_fds: if given, it denotes a list of file descriptor
that should not be closed
"""
# Default maximum for the number of available file descriptors.
if 'SC_OPEN_MAX' in os.sysconf_names:
try:
MAXFD = os.sysconf('SC_OPEN_MAX')
if MAXFD < 0:
MAXFD = 1024
except OSError:
MAXFD = 1024
else:
MAXFD = 1024
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
# Iterate through and close all file descriptors (except the standard ones)
for fd in range(3, maxfd):
if noclose_fds and fd in noclose_fds:
continue
utils_wrapper.CloseFdNoError(fd)
| bsd-2-clause |
igors10099/ilona | p2pool/bitcoin/p2p.py | 36 | 5880 | '''
Implementation of Bitcoin's p2p protocol
'''
import random
import sys
import time
from twisted.internet import protocol, task
import p2pool
from . import data as bitcoin_data
from p2pool.util import deferral, p2protocol, pack, variable
class Protocol(p2protocol.Protocol):
def __init__(self, net):
p2protocol.Protocol.__init__(self, net.P2P_PREFIX, 1000000)
def connectionMade(self):
self.send_version(
version=32200,
services=1,
time=int(time.time()),
addr_to=dict(
services=1,
address=self.transport.getPeer().host,
port=self.transport.getPeer().port,
),
addr_from=dict(
services=1,
address=self.transport.getHost().host,
port=self.transport.getHost().port,
),
nonce=random.randrange(2**64),
sub_version_num='/P2Pool:%s/' % (p2pool.__version__,),
start_height=0,
)
message_version = pack.ComposedType([
('version', pack.IntType(32)),
('services', pack.IntType(64)),
('time', pack.IntType(64)),
('addr_to', bitcoin_data.address_type),
('addr_from', bitcoin_data.address_type),
('nonce', pack.IntType(64)),
('sub_version_num', pack.VarStrType()),
('start_height', pack.IntType(32)),
])
def handle_version(self, version, services, time, addr_to, addr_from, nonce, sub_version_num, start_height):
self.send_verack()
message_verack = pack.ComposedType([])
def handle_verack(self):
self.get_block = deferral.ReplyMatcher(lambda hash: self.send_getdata(requests=[dict(type='block', hash=hash)]))
self.get_block_header = deferral.ReplyMatcher(lambda hash: self.send_getheaders(version=1, have=[], last=hash))
if hasattr(self.factory, 'resetDelay'):
self.factory.resetDelay()
if hasattr(self.factory, 'gotConnection'):
self.factory.gotConnection(self)
self.pinger = task.LoopingCall(self.send_ping)
self.pinger.start(30)
message_inv = pack.ComposedType([
('invs', pack.ListType(pack.ComposedType([
('type', pack.EnumType(pack.IntType(32), {1: 'tx', 2: 'block'})),
('hash', pack.IntType(256)),
]))),
])
def handle_inv(self, invs):
for inv in invs:
if inv['type'] == 'tx':
self.send_getdata(requests=[inv])
elif inv['type'] == 'block':
self.factory.new_block.happened(inv['hash'])
else:
print 'Unknown inv type', inv
message_getdata = pack.ComposedType([
('requests', pack.ListType(pack.ComposedType([
('type', pack.EnumType(pack.IntType(32), {1: 'tx', 2: 'block'})),
('hash', pack.IntType(256)),
]))),
])
message_getblocks = pack.ComposedType([
('version', pack.IntType(32)),
('have', pack.ListType(pack.IntType(256))),
('last', pack.PossiblyNoneType(0, pack.IntType(256))),
])
message_getheaders = pack.ComposedType([
('version', pack.IntType(32)),
('have', pack.ListType(pack.IntType(256))),
('last', pack.PossiblyNoneType(0, pack.IntType(256))),
])
message_getaddr = pack.ComposedType([])
message_addr = pack.ComposedType([
('addrs', pack.ListType(pack.ComposedType([
('timestamp', pack.IntType(32)),
('address', bitcoin_data.address_type),
]))),
])
def handle_addr(self, addrs):
for addr in addrs:
pass
message_tx = pack.ComposedType([
('tx', bitcoin_data.tx_type),
])
def handle_tx(self, tx):
self.factory.new_tx.happened(tx)
message_block = pack.ComposedType([
('block', bitcoin_data.block_type),
])
def handle_block(self, block):
block_hash = bitcoin_data.hash256(bitcoin_data.block_header_type.pack(block['header']))
self.get_block.got_response(block_hash, block)
self.get_block_header.got_response(block_hash, block['header'])
message_headers = pack.ComposedType([
('headers', pack.ListType(bitcoin_data.block_type)),
])
def handle_headers(self, headers):
for header in headers:
header = header['header']
self.get_block_header.got_response(bitcoin_data.hash256(bitcoin_data.block_header_type.pack(header)), header)
self.factory.new_headers.happened([header['header'] for header in headers])
message_ping = pack.ComposedType([])
def handle_ping(self):
pass
message_alert = pack.ComposedType([
('message', pack.VarStrType()),
('signature', pack.VarStrType()),
])
def handle_alert(self, message, signature):
pass # print 'ALERT:', (message, signature)
def connectionLost(self, reason):
if hasattr(self.factory, 'gotConnection'):
self.factory.gotConnection(None)
if hasattr(self, 'pinger'):
self.pinger.stop()
if p2pool.DEBUG:
print >>sys.stderr, 'Bitcoin connection lost. Reason:', reason.getErrorMessage()
class ClientFactory(protocol.ReconnectingClientFactory):
protocol = Protocol
maxDelay = 1
def __init__(self, net):
self.net = net
self.conn = variable.Variable(None)
self.new_block = variable.Event()
self.new_tx = variable.Event()
self.new_headers = variable.Event()
def buildProtocol(self, addr):
p = self.protocol(self.net)
p.factory = self
return p
def gotConnection(self, conn):
self.conn.set(conn)
def getProtocol(self):
return self.conn.get_not_none()
| gpl-3.0 |
reflectometry/direfl | direfl/api/sld_profile.py | 1 | 8500 | import cmath
from math import pi, ceil
import numpy as np
from numpy import sin, cos
from scipy.interpolate import interp1d
"""
References:
[Majkrzak2003] C. F. Majkrzak, N. F. Berk: Physica B 336 (2003) 27-38
Phase sensitive reflectometry and the unambiguous determination
of scattering length density profiles
"""
def interpolate(x, fx):
return interp1d(x, fx, bounds_error=False, fill_value=0)
def refr_idx(q, sld):
"""
Calculates the refractive index with given SLD [\AA^{-2}] and wavevector transfer q [
\AA^{-1}]. The units can be arbitrary choosen, but they must satisfy that sld/q**2 has
unit [1]. The arguments should not be scaled by any constants.
For example
q = 0.01
sld = 1e-6
The refractive index is complex if q < q_c (being the critical edge) and it is
completely real if q >= q_c.
"""
return cmath.sqrt(1 - 16 * pi * sld / (q ** 2))
def reflection_matrix(q, sld, thickness, as_matrix=False):
"""
Calculates a reflection matrix used for calculating the reflectivity of
a slab of material (sld, thickness) for the wave vector transfer q.
See C.F. Majkrzak, N. F. Berk: Physical Review B Vol. 52 Nr 15, 1995:
Exact determination of the phase in neutron reflectometry, Equation (1)
If as_matrix is True, a matrix 2x2 will be returned, if not, then the matrix
indices are returned as a, b, c, d
"""
n = refr_idx(q, sld)
theta = 0.5 * q * n * thickness
a, b, c, d = cos(theta), 1 / n * sin(theta), -n * sin(theta), cos(theta)
if as_matrix:
return np.array([[a, b], [c, d]])
return a, b, c, d
class SLDProfile(object):
def __init__(self):
pass
def as_matrix(self, q):
"""
Returns the matrix coefficients in the abeles formalism.
Returns w, x, y, z corresponding to the matrix [[w, x], [y, z]]
"""
return 0, 0, 0, 0
class ConstantSLDProfile(SLDProfile):
def __init__(self, sld, thickness, sigma=0):
self._sld = float(sld)
self._d = float(thickness)
self._r = float(sigma)
if self._r > 0:
raise NotImplementedError("Roughness not implemented yet")
def as_matrix(self, q):
return reflection_matrix(q, self._sld, self._d)
class ConcatSLDProfile(SLDProfile):
"""
The first element in sld_profiles is closest to the substrate
"""
def __init__(self, sld_profiles, reverse=False):
self._slds = sld_profiles
self._reverse = reverse
def as_matrix(self, q):
m = len(self._slds) * [None]
for i in range(0, len(self._slds)):
a, b, c, d = self._slds[i].as_matrix(q)
m[i] = np.array([[a, b], [c, d]])
if self._reverse:
m = list(reversed(m))
m = np.linalg.multi_dot(m)
return m[0][0], m[0][1], m[1][0], m[1][1]
class FunctionSLDProfile(SLDProfile):
def __init__(self, function, support, dx=0.1):
self._f = function
self._supp = support
self._dx = dx
self._xspace = np.linspace(support[0], support[1],
ceil((support[1] - support[0]) * 1 / dx))
self._feval = [self._f(x) for x in self._xspace]
self._m = [ConstantSLDProfile(fx, dx) for fx in self._feval]
self._concat = ConcatSLDProfile(self._m, reverse=False)
def as_matrix(self, q):
return self._concat.as_matrix(q)
class SlabsSLDProfile(SLDProfile):
def __init__(self, z, rho):
self._z = z
self._rho = rho
@classmethod
def from_sample(cls, sample, dz=0.1, dA=1e-4, probe=None):
from refl1d.probe import NeutronProbe
from refl1d.profile import Microslabs
if probe is None:
# The values T and L do not matter for 'just' building the SLD profile
probe = NeutronProbe(T=[1.0], L=[1.0])
slabs = Microslabs(1, dz)
sample.render(probe, slabs)
slabs.finalize(True, dA)
# ignore the imaginary part, this should be zero anyway
z, rho, irho = slabs.smooth_profile(dz)
if any(irho >= 1e-2):
raise RuntimeWarning("Sample contains absorptive SLD (imag >= 1e-2). "
"Reconstruction techniques do not support this.")
# refl1d likes to use SLD * 1e6
return cls(z, rho * 1e-6)
@classmethod
def from_slabs(cls, thickness, sld, roughness, precision=1):
# You should rather use the from_sample method, since this easier to
# understand. This method here is just a kind of 'fallback'
# if you dont wanna have the overhead of building the Stacks in refl1d
# just to put the data in here..
# WARNING: from_slabs and from_sample do not create the same slab profile
# they are shifted profiles (by I'd guess 3*roughness[0])
from refl1d.profile import build_profile
w = thickness
sld = sld
# Means, the first layer is the substrate and we only have to include
# the roughness effect. To do so, select a proper thickness (> 0) such
# that the convolution with the gaussian kernel is sufficiently approximated
if w[0] == 0:
# refl1d uses 3 sigma usually
# why 3?
# that's 3 sigma and the gaussian smoothing is nearly zero out there
# thus the 'substrate' layer is big enough to be approximated by this
# ofc bigger sigma values (>= 5) are better, but they need more
# computation
w[0] = 3 * roughness[0]
z = np.linspace(0, sum(w) + roughness[-1] * 5, int(precision * sum(w)) + 1)
offsets = np.cumsum(w)
rho = build_profile(z, offsets, roughness, sld)
return cls(z, rho)
def thickness(self):
return max(self._z) - min(self._z)
def plot_profile(self, offset=0, reverse=False):
import pylab
rho = self._rho
if reverse:
rho = list(reversed(self._rho))
pylab.plot(self._z + offset, rho)
def as_matrix(self, q):
from functools import reduce
# len(dz) = len(self._z) - 1
dz = np.diff(self._z)
m = len(dz) * [None]
for idx in range(0, len(dz)):
m[idx] = reflection_matrix(q, self._rho[idx], dz[idx], as_matrix=True)
# There is still some potential here
# Whats happening here:
# m1 * m2 * m3 * m4 * m5 ... in a sequentially manner
# maybe it's faster if you do something like
# (m1 * m2) * (m3 * m4) * ...
# and redo the grouping in the next step. this should be then O(log n)
# compared to the seq. multiplication which is O(n)....
# BUT: this has to be done in C code, not in a python implementation :/
m = reduce(np.dot, m)
return m[0][0], m[0][1], m[1][0], m[1][1]
class Reflectivity(object):
def __init__(self, sld_profile, fronting, backing):
assert isinstance(sld_profile, SLDProfile)
self._sld = sld_profile
self._f, self._b = fronting, backing
# The input should be of the magnitude 1e-6 ... 1e-5
if any(abs(np.array([fronting, backing])) >= 1e-1):
raise RuntimeWarning("Given fronting/backing SLD values are too high")
def reflection(self, q_space, as_function=True):
r = np.ones(len(q_space), dtype=complex)
for idx, q in enumerate(q_space):
if abs(q) < 1e-10:
continue
# See [Majkrzak2003] equation (17)
f, h = refr_idx(q, self._f), refr_idx(q, self._b)
A, B, C, D = self._sld.as_matrix(q)
r[idx] = (f * h * B + C + 1j * (f * D - h * A)) / \
(f * h * B - C + 1j * (f * D + h * A))
if as_function:
return self.to_function(r, q_space, square=False)
else:
return r
@staticmethod
def to_function(r, q_space, square=False):
real = interpolate(q_space, r.real)
imag = interpolate(q_space, r.imag)
if square:
return lambda q: real(q)**2 + imag(q)**2
else:
return lambda q: real(q) + 1j * imag(q)
def reflectivity(self, q_space):
r = self.reflection(q_space)
return lambda q: abs(r(q)) ** 2
def plot(self, q_space):
import pylab
R = self.reflectivity(q_space)
pylab.plot(q_space, R(q_space))
return R
| mit |
wataro/midso | rule/cpplint.py | 163 | 241895 | #!/usr/bin/env python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuming that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
cpplint.py supports per-directory configurations specified in CPPLINT.cfg
files. CPPLINT.cfg file can contain a number of key=value pairs.
Currently the following options are supported:
set noparent
filter=+filter1,-filter2,...
exclude_files=regex
linelength=80
"set noparent" option prevents cpplint from traversing directory tree
upwards looking for more .cfg files in parent directories. This option
is usually placed in the top-level project directory.
The "filter" option is similar in function to --filter flag. It specifies
message filters in addition to the |_DEFAULT_FILTERS| and those specified
through --filter command-line flag.
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
through liner.
"linelength" allows to specify the allowed line length for the project.
CPPLINT.cfg has an effect on files in the same directory and all
sub-directories, unless overridden by a nested configuration file.
Example file:
filter=-build/include_order,+build/include_alpha
exclude_files=.*\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
processed by linter, in the current directory (where the .cfg
file is located) and all sub-directories.
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/strings',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/indentation_namespace',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo',
]
# These error categories are no longer enforced by cpplint, but for backwards-
# compatibility they may still appear in NOLINT comments.
_LEGACY_ERROR_CATEGORIES = [
'readability/streams',
]
# The default state of the category filter. This is overridden by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# These headers are excluded from [build/include] and [build/include_order]
# checks:
# - Anything not following google file name conventions (containing an
# uppercase character, such as Python.h or nsStringAPI.h, for example).
# - Lua headers.
_THIRD_PARTY_HEADERS_PATTERN = re.compile(
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
_regexp_compile_cache = {}
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
# The allowed extensions for file names
# This is set by --extensions flag.
_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh'])
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
elif category not in _LEGACY_ERROR_CATEGORIES:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"""Resets the set of NOLINT suppressions to empty."""
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(object):
"""Tracks line numbers for includes, and the order in which includes appear.
include_list contains list of lists of (header, line number) pairs.
It's a lists of lists rather than just one flat list to make it
easier to update across preprocessor boundaries.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
self.include_list = [[]]
self.ResetSection('')
def FindHeader(self, header):
"""Check if a header has already been included.
Args:
header: header to check.
Returns:
Line number of previous occurrence, or -1 if the header has not
been seen before.
"""
for section_list in self.include_list:
for f in section_list:
if f[0] == header:
return f[1]
return -1
def ResetSection(self, directive):
"""Reset section checking for preprocessor directive.
Args:
directive: preprocessor directive (e.g. "if", "else").
"""
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
# Update list of includes. Note that we never pop from the
# include list.
if directive in ('if', 'ifdef', 'ifndef'):
self.include_list.append([])
elif directive in ('else', 'elif'):
self.include_list[-1] = []
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
# backup of filter list. Used to restore the state after each file.
self._filters_backup = self.filters[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
self.AddFilters(filters)
def AddFilters(self, filters):
""" Adds more filters to the existing list of error-message filters. """
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def BackupFilters(self):
""" Saves the current filter list to backup storage."""
self._filters_backup = self.filters[:]
def RestoreFilters(self):
""" Restores filters previously backed up."""
self.filters = self._filters_backup[:]
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
def _AddFilters(filters):
"""Adds more filter overrides.
Unlike _SetFilters, this function does not reset the current list of filters
available.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.AddFilters(filters)
def _BackupFilters():
""" Saves the current filter list to backup storage."""
_cpplint_state.BackupFilters()
def _RestoreFilters():
""" Restores filters previously backed up."""
_cpplint_state.RestoreFilters()
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo(object):
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Match a single C style comment on the same line.
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if matched:
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
else:
break
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '/**/'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 4 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments.
2) lines member contains lines without comments.
3) raw_lines member contains all the lines without processing.
4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
strings removed.
All these members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
"""Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
stack.append(char)
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
"""Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
i -= 1
else:
# If there is a matching '>', we can pop the expression stack.
# Otherwise, ignore this '<' since it must be an operator.
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
# Found start of expression.
#
# If there are any unmatched '>' on the stack, they must be
# operators. Remove those.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '<', the matching '>' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
# Check last line
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find start of expression before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetIndentLevel(line):
"""Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
"""
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
# Replace 'c++' with 'cpp'.
filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, clean_lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
clean_lines: A CleansedLines instance containing the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
raw_lines = clean_lines.lines_without_raw_strings
for i in raw_lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = ''
ifndef_linenum = 0
define = ''
endif = ''
endif_linenum = 0
for linenum, line in enumerate(raw_lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef or not define or ifndef != define:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
# Check for "//" comments on endif line.
ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
error)
match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
if match:
if match.group(1) == '_':
# Issue low severity warning for deprecated double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif // %s"' % cppvar)
return
# Didn't find the corresponding "//" comment. If this file does not
# contain any "//" comments at all, it could be that the compiler
# only wants "/**/" comments, look for those instead.
no_single_line_comments = True
for i in xrange(1, len(raw_lines) - 1):
line = raw_lines[i]
if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
no_single_line_comments = False
break
if no_single_line_comments:
match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
if match:
if match.group(1) == '_':
# Low severity warning for double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif /* %s */"' % cppvar)
return
# Didn't find anything
error(filename, endif_linenum, 'build/header_guard', 5,
'#endif line should be "#endif // %s"' % cppvar)
def CheckHeaderFileIncluded(filename, include_state, error):
"""Logs an error if a .cc file does not include its header."""
# Do not check test files
if filename.endswith('_test.cc') or filename.endswith('_unittest.cc'):
return
fileinfo = FileInfo(filename)
headerfile = filename[0:len(filename) - 2] + 'h'
if not os.path.exists(headerfile):
return
headername = FileInfo(headerfile).RepositoryName()
first_include = 0
for section_list in include_state.include_list:
for f in section_list:
if headername in f[0] or f[0] in headername:
return
if not first_include:
first_include = f[1]
error(filename, first_include, 'build/include', 5,
'%s should include its header file %s' % (fileinfo.RepositoryName(),
headername))
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
if Search(r'^#define', clean_lines[linenum]):
return True
if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def IsBlockInfo(self):
"""Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
Returns:
True for this class, False for derived classes.
"""
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
"""Stores information about an 'extern "C"' block."""
def __init__(self):
_BlockInfo.__init__(self, True)
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
self.check_namespace_indentation = True
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# If there is a DISALLOW macro, it should appear near the end of
# the class.
seen_last_thing_in_class = False
for i in xrange(linenum - 1, self.starting_linenum, -1):
match = Search(
r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
self.name + r'\)',
clean_lines.elided[i])
if match:
if seen_last_thing_in_class:
error(filename, i, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
break
if not Match(r'^\s*$', clean_lines.elided[i]):
seen_last_thing_in_class = True
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, False)
self.name = name or ''
self.starting_linenum = linenum
self.check_namespace_indentation = True
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
else:
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Top of the previous stack before each Update().
#
# Because the nesting_stack is updated at the end of each line, we
# had to do some convoluted checks to find out what is the current
# scope at the beginning of the line. This check is simplified by
# saving the previous top of nesting stack.
#
# We could save the full stack, but we only need the top. Copying
# the full nesting stack would slow down cpplint by ~10%.
self.previous_stack_top = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def InExternC(self):
"""Check if we are currently one level inside an 'extern "C"' block.
Returns:
True if top of the stack is an extern block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
def InClassDeclaration(self):
"""Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ClassInfo)
def InAsmBlock(self):
"""Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
return self.stack and self.stack[-1].inline_asm != _NO_ASM
def InTemplateArgumentList(self, clean_lines, linenum, pos):
"""Check if current position is inside template argument list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: position just after the suspected template argument.
Returns:
True if (linenum, pos) is inside template arguments.
"""
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
# These things do not look like template argument list:
# class Suspect {
# class Suspect x; }
if token in ('{', '}', ';'): return False
# These things look like template argument list:
# template <class Suspect>
# template <class Suspect = default_value>
# template <class Suspect[]>
# template <class Suspect...>
if token in ('>', '=', '[', ']', '.'): return True
# Check if token is an unmatched '<'.
# If not, move on to the next character.
if token != '<':
pos += 1
if pos >= len(line):
linenum += 1
pos = 0
continue
# We can't be sure if we just find a single '<', and need to
# find the matching '>'.
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
if end_pos < 0:
# Not sure if template argument list or syntax error in file
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo())
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style. Also look for
# non-single-argument constructors which are also technically valid, but
# strongly suggest something is wrong.
explicit_constructor_match = Match(
r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 0,
'Constructors that require multiple arguments '
'should not be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
not Search(r'\bcase\s+\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
is_namespace_indent_item = (
len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
nesting_state.previous_stack_top == nesting_state.stack[-2])
if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
clean_lines.elided, line):
CheckItemIndentationInNamespace(filename, clean_lines.elided,
line, error)
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(line, filename, linenum, next_line_start, error):
"""Checks for common mistakes in comments.
Args:
line: The line in question.
filename: The name of the current file.
linenum: The number of the line to check.
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the // unless
# it's a /// or //! Doxygen comment.
if (Match(r'//[^ ]*\w', comment) and
not Match(r'(///|//\!)(\s+|$)', comment)):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'return []() {};'
if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if ((Search(r'[\w.]=', line) or
Search(r'=[\w.]', line))
and not Search(r'\b(if|while|for) ', line)
# Operators taken from [lex.operators] in C++11 standard.
and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
and not Search(r'operator=', line)):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. Those are checked separately
# in CheckRValueReference
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|[^\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\s,=<])', line)
if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
Search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
def CheckBracesSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
match = Match(r'^(.*[^ ({>]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise.
"""
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
def IsTemplateParameterList(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is the end of template<>.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is end of a template parameter list, False otherwise.
"""
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, column)
if (startpos > -1 and
Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])):
return True
return False
def IsRValueType(typenames, clean_lines, nesting_state, linenum, column):
"""Check if the token ending on (linenum, column) is a type.
Assumes that text to the right of the column is "&&" or a function
name.
Args:
typenames: set of type names from template-argument-list.
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is a type, False if we are not sure.
"""
prefix = clean_lines.elided[linenum][0:column]
# Get one word to the left. If we failed to do so, this is most
# likely not a type, since it's unlikely that the type name and "&&"
# would be split across multiple lines.
match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix)
if not match:
return False
# Check text following the token. If it's "&&>" or "&&," or "&&...", it's
# most likely a rvalue reference used inside a template.
suffix = clean_lines.elided[linenum][column:]
if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix):
return True
# Check for known types and end of templates:
# int&& variable
# vector<int>&& variable
#
# Because this function is called recursively, we also need to
# recognize pointer and reference types:
# int* Function()
# int& Function()
if (match.group(2) in typenames or
match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool',
'short', 'int', 'long', 'signed', 'unsigned',
'float', 'double', 'void', 'auto', '>', '*', '&']):
return True
# If we see a close parenthesis, look for decltype on the other side.
# decltype would unambiguously identify a type, anything else is
# probably a parenthesized expression and not a type.
if match.group(2) == ')':
return IsDecltype(
clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1)
# Check for casts and cv-qualifiers.
# match.group(1) remainder
# -------------- ---------
# const_cast< type&&
# const type&&
# type const&&
if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|'
r'reinterpret_cast\s*<|\w+\s)\s*$',
match.group(1)):
return True
# Look for a preceding symbol that might help differentiate the context.
# These are the cases that would be ambiguous:
# match.group(1) remainder
# -------------- ---------
# Call ( expression &&
# Declaration ( type&&
# sizeof ( type&&
# if ( expression &&
# while ( expression &&
# for ( type&&
# for( ; expression &&
# statement ; type&&
# block { type&&
# constructor { expression &&
start = linenum
line = match.group(1)
match_symbol = None
while start >= 0:
# We want to skip over identifiers and commas to get to a symbol.
# Commas are skipped so that we can find the opening parenthesis
# for function parameter lists.
match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line)
if match_symbol:
break
start -= 1
line = clean_lines.elided[start]
if not match_symbol:
# Probably the first statement in the file is an rvalue reference
return True
if match_symbol.group(2) == '}':
# Found closing brace, probably an indicate of this:
# block{} type&&
return True
if match_symbol.group(2) == ';':
# Found semicolon, probably one of these:
# for(; expression &&
# statement; type&&
# Look for the previous 'for(' in the previous lines.
before_text = match_symbol.group(1)
for i in xrange(start - 1, max(start - 6, 0), -1):
before_text = clean_lines.elided[i] + before_text
if Search(r'for\s*\([^{};]*$', before_text):
# This is the condition inside a for-loop
return False
# Did not find a for-init-statement before this semicolon, so this
# is probably a new statement and not a condition.
return True
if match_symbol.group(2) == '{':
# Found opening brace, probably one of these:
# block{ type&& = ... ; }
# constructor{ expression && expression }
# Look for a closing brace or a semicolon. If we see a semicolon
# first, this is probably a rvalue reference.
line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1]
end = start
depth = 1
while True:
for ch in line:
if ch == ';':
return True
elif ch == '{':
depth += 1
elif ch == '}':
depth -= 1
if depth == 0:
return False
end += 1
if end >= clean_lines.NumLines():
break
line = clean_lines.elided[end]
# Incomplete program?
return False
if match_symbol.group(2) == '(':
# Opening parenthesis. Need to check what's to the left of the
# parenthesis. Look back one extra line for additional context.
before_text = match_symbol.group(1)
if linenum > 1:
before_text = clean_lines.elided[linenum - 1] + before_text
before_text = match_symbol.group(1)
# Patterns that are likely to be types:
# [](type&&
# for (type&&
# sizeof(type&&
# operator=(type&&
#
if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text):
return True
# Patterns that are likely to be expressions:
# if (expression &&
# while (expression &&
# : initializer(expression &&
# , initializer(expression &&
# ( FunctionCall(expression &&
# + FunctionCall(expression &&
# + (expression &&
#
# The last '+' represents operators such as '+' and '-'.
if Search(r'(?:\bif|\bwhile|[-+=%^(<!?:,&*]\s*)$', before_text):
return False
# Something else. Check that tokens to the left look like
# return_type function_name
match_func = Match(r'^(.*\S.*)\s+\w(?:\w|::)*(?:<[^<>]*>)?\s*$',
match_symbol.group(1))
if match_func:
# Check for constructors, which don't have return types.
if Search(r'\b(?:explicit|inline)$', match_func.group(1)):
return True
implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix)
if (implicit_constructor and
implicit_constructor.group(1) == implicit_constructor.group(2)):
return True
return IsRValueType(typenames, clean_lines, nesting_state, linenum,
len(match_func.group(1)))
# Nothing before the function name. If this is inside a block scope,
# this is probably a function call.
return not (nesting_state.previous_stack_top and
nesting_state.previous_stack_top.IsBlockInfo())
if match_symbol.group(2) == '>':
# Possibly a closing bracket, check that what's on the other side
# looks like the start of a template.
return IsTemplateParameterList(
clean_lines, start, len(match_symbol.group(1)))
# Some other symbol, usually something like "a=b&&c". This is most
# likely not a type.
return False
def IsDeletedOrDefault(clean_lines, linenum):
"""Check if current constructor or operator is deleted or default.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if this is a deleted or default constructor.
"""
open_paren = clean_lines.elided[linenum].find('(')
if open_paren < 0:
return False
(close_line, _, close_paren) = CloseExpression(
clean_lines, linenum, open_paren)
if close_paren < 0:
return False
return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
def IsRValueAllowed(clean_lines, linenum, typenames):
"""Check if RValue reference is allowed on a particular line.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
typenames: set of type names from template-argument-list.
Returns:
True if line is within the region where RValue references are allowed.
"""
# Allow region marked by PUSH/POP macros
for i in xrange(linenum, 0, -1):
line = clean_lines.elided[i]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
if not line.endswith('PUSH'):
return False
for j in xrange(linenum, clean_lines.NumLines(), 1):
line = clean_lines.elided[j]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
return line.endswith('POP')
# Allow operator=
line = clean_lines.elided[linenum]
if Search(r'\boperator\s*=\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
# Allow constructors
match = Match(r'\s*(?:[\w<>]+::)*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line)
if match and match.group(1) == match.group(2):
return IsDeletedOrDefault(clean_lines, linenum)
if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
if Match(r'\s*[\w<>]+\s*\(', line):
previous_line = 'ReturnType'
if linenum > 0:
previous_line = clean_lines.elided[linenum - 1]
if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line):
return IsDeletedOrDefault(clean_lines, linenum)
# Reject types not mentioned in template-argument-list
while line:
match = Match(r'^.*?(\w+)\s*&&(.*)$', line)
if not match:
break
if match.group(1) not in typenames:
return False
line = match.group(2)
# All RValue types that were in template-argument-list should have
# been removed by now. Those were allowed, assuming that they will
# be forwarded.
#
# If there are no remaining RValue types left (i.e. types that were
# not found in template-argument-list), flag those as not allowed.
return line.find('&&') < 0
def GetTemplateArgs(clean_lines, linenum):
"""Find list of template arguments associated with this function declaration.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Line number containing the start of the function declaration,
usually one line after the end of the template-argument-list.
Returns:
Set of type names, or empty set if this does not appear to have
any template parameters.
"""
# Find start of function
func_line = linenum
while func_line > 0:
line = clean_lines.elided[func_line]
if Match(r'^\s*$', line):
return set()
if line.find('(') >= 0:
break
func_line -= 1
if func_line == 0:
return set()
# Collapse template-argument-list into a single string
argument_list = ''
match = Match(r'^(\s*template\s*)<', clean_lines.elided[func_line])
if match:
# template-argument-list on the same line as function name
start_col = len(match.group(1))
_, end_line, end_col = CloseExpression(clean_lines, func_line, start_col)
if end_col > -1 and end_line == func_line:
start_col += 1 # Skip the opening bracket
argument_list = clean_lines.elided[func_line][start_col:end_col]
elif func_line > 1:
# template-argument-list one line before function name
match = Match(r'^(.*)>\s*$', clean_lines.elided[func_line - 1])
if match:
end_col = len(match.group(1))
_, start_line, start_col = ReverseCloseExpression(
clean_lines, func_line - 1, end_col)
if start_col > -1:
start_col += 1 # Skip the opening bracket
while start_line < func_line - 1:
argument_list += clean_lines.elided[start_line][start_col:]
start_col = 0
start_line += 1
argument_list += clean_lines.elided[func_line - 1][start_col:end_col]
if not argument_list:
return set()
# Extract type names
typenames = set()
while True:
match = Match(r'^[,\s]*(?:typename|class)(?:\.\.\.)?\s+(\w+)(.*)$',
argument_list)
if not match:
break
typenames.add(match.group(1))
argument_list = match.group(2)
return typenames
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):
"""Check for rvalue references.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Find lines missing spaces around &&.
# TODO(unknown): currently we don't check for rvalue references
# with spaces surrounding the && to avoid false positives with
# boolean expressions.
line = clean_lines.elided[linenum]
match = Match(r'^(.*\S)&&', line)
if not match:
match = Match(r'(.*)&&\S', line)
if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)):
return
# Either poorly formed && or an rvalue reference, check the context
# to get a more accurate error message. Mostly we want to determine
# if what's to the left of "&&" is a type or not.
typenames = GetTemplateArgs(clean_lines, linenum)
and_pos = len(match.group(1))
if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos):
if not IsRValueAllowed(clean_lines, linenum, typenames):
error(filename, linenum, 'build/c++11', 3,
'RValue references are an unapproved C++ feature.')
else:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around &&')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on
# - Compound literals
# - Lambdas
# - alignas specifier with anonymous structs:
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
if not Match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for
# section labels, and also lines containing multi-line raw strings.
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckRValueReference(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
# Only do this check if the included header follows google naming
# conventions. If not, assume that it's a 3rd party API that
# requires special include conventions.
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the include convention.
match = Match(r'#include\s*"([^/]+\.h)"', line)
if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
elif (include.endswith('.cc') and
os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
error(filename, linenum, 'build/include', 4,
'Do not include .cc files from other packages')
elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(unknown): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
"""Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Match two lines at a time to support multiline declarations
if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Remove false positives:
# - String pointers (as opposed to values).
# string *pointer
# const string *pointer
# string const *pointer
# string *const pointer
#
# - Functions and template specializations.
# string Function<Type>(...
# string Class<Type>::Method(...
#
# - Operators. These are matched separately because operator names
# cross non-word boundaries, and trying to match both operators
# and functions at the same time would decrease accuracy of
# matching identifiers.
# string Class::operator*()
if (match and
not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
"""Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False
def IsOutOfLineMethodDefinition(clean_lines, linenum):
"""Check if current line contains an out-of-line method definition.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains an out-of-line method definition.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
return False
def IsInitializerList(clean_lines, linenum):
"""Check if current line is inside constructor initializer list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
for i in xrange(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
remove_function_body = Match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
if Search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
if Search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
if Search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
# - An opening brace, probably the start of current class or namespace.
#
# Current line is probably not inside an initializer list since
# we saw one of those things without seeing the starting colon.
return False
# Got to the beginning of the file without seeing the start of
# constructor initializer list.
return False
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# If a function is inherited, current function doesn't have much of
# a choice, so any non-const references should not be blamed on
# derived function.
if IsDerivedFunction(clean_lines, linenum):
return
# Don't warn on out-of-line method definitions, as we would warn on the
# in-line declaration, if it isn't marked with 'override'.
if IsOutOfLineMethodDefinition(clean_lines, linenum):
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
if (nesting_state.previous_stack_top and
not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
# Not at toplevel, not within a class, and not within a namespace
return
# Avoid initializer lists. We only need to scan back from the
# current line for something that starts with ':'.
#
# We don't need to check the current line, since the '&' would
# appear inside the second set of parentheses on the current line as
# opposed to the first set.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 10), -1):
previous_line = clean_lines.elided[i]
if not Search(r'[),]\s*$', previous_line):
break
if Match(r'^\s*:\s+\S', previous_line):
return
# Avoid preprocessors
if Search(r'\\\s*$', line):
return
# Avoid constructor initializer lists
if IsInitializerList(clean_lines, linenum):
return
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
"""Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
# matched_new_or_template is used to silence two false positives:
# - New operators
# - Template arguments with function types
#
# For template arguments, we match on types immediately following
# an opening bracket without any spaces. This is a fast way to
# silence the common case where the function type is the first
# template argument. False negative with less-than comparison is
# avoided because those operators are usually followed by a space.
#
# function<double(double)> // bracket + no space = false positive
# value < double(42) // bracket + space = true positive
matched_new_or_template = match.group(1)
# Avoid arrays by looking for brackets that come after the closing
# parenthesis.
if Match(r'\([^()]+\)\s*\[', match.group(3)):
return
# Other things to ignore:
# - Function pointers
# - Casts to pointer types
# - Placement new
# - Alias declarations
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
#
# Some non-identifier character is required before the '&' for the
# expression to be recognized as a cast. These are casts:
# expression = &static_cast<int*>(temporary());
# function(&(int*)(temporary()));
#
# This is not a cast:
# reference_type&(int* function_param);
match = Search(
r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match:
# Try a better error message when the & is bound to something
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
if context.endswith(' operator++') or context.endswith(' operator--'):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
# [](int) -> bool {
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# Function((function_pointer_arg)(int), int param)
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
if matched_zero and matched_zero.group(1) != '0':
return False
# Don't warn on function pointer declarations. For this we need
# to check what came before the "(type)" string.
if Match(r'.*\)\s*$', line[0:match.start(0)]):
return False
# Don't warn if the parameter is named with block comments, e.g.:
# Function(int /*unused_param*/);
raw_line = clean_lines.raw_lines[linenum]
if '/*' in raw_line:
return False
# Passed all filters, issue warning here.
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<tuple>', ('tuple',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_dict, io=codecs):
"""Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's flatten the include_state include_list and copy it into a dictionary.
include_dict = dict([item for sublist in include_state.include_list
for item in sublist])
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_dict.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_dict, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error):
"""Check that default lambda captures are not used.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# A lambda introducer specifies a default capture if it starts with "[="
# or if it starts with "[&" _not_ followed by an identifier.
match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line)
if match:
# Found a potential error, check what comes after the lambda-introducer.
# If it's not open parenthesis (for lambda-declarator) or open brace
# (for compound-statement), it's not a lambda.
line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1)))
if pos >= 0 and Match(r'^\s*[{(]', line[pos:]):
error(filename, linenum, 'build/c++11',
4, # 4 = high confidence
'Default lambda captures are an unapproved C++ feature.')
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
if not virtual: return
# Ignore "virtual" keywords that are near access-specifiers. These
# are only used in class base-specifier and do not apply to member
# functions.
if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
return
# Ignore the "virtual" keyword from virtual base classes. Usually
# there is a column on the same line in these cases (virtual base
# classes are rare in google3 because multiple inheritance is rare).
if Match(r'^.*[^:]:[^:].*$', line): return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(2))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for closing parenthesis nearby. We need one to confirm where
# the declarator ends and where the virt-specifier starts to avoid
# false positives.
line = clean_lines.elided[linenum]
declarator_end = line.rfind(')')
if declarator_end >= 0:
fragment = line[declarator_end:]
else:
if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
fragment = line
else:
return
# Check that at most one of "override" or "final" is present, not both
if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
# Returns true if we are at a new block, and it is directly
# inside of a namespace.
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
"""Checks that the new block is directly in a namespace.
Args:
nesting_state: The _NestingState object that contains info about our state.
is_forward_declaration: If the class is a forward declared class.
Returns:
Whether or not the new block is directly in a namespace.
"""
if is_forward_declaration:
if len(nesting_state.stack) >= 1 and (
isinstance(nesting_state.stack[-1], _NamespaceInfo)):
return True
else:
return False
return (len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.stack[-2], _NamespaceInfo))
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum):
"""This method determines if we should apply our namespace indentation check.
Args:
nesting_state: The current nesting state.
is_namespace_indent_item: If we just put a new class on the stack, True.
If the top of the stack is not a class, or we did not recently
add the class, False.
raw_lines_no_comments: The lines without the comments.
linenum: The current line number we are processing.
Returns:
True if we should apply our namespace indentation check. Currently, it
only works for classes and namespaces inside of a namespace.
"""
is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
linenum)
if not (is_namespace_indent_item or is_forward_declaration):
return False
# If we are in a macro, we do not want to check the namespace indentation.
if IsMacroDefinition(raw_lines_no_comments, linenum):
return False
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
# Call this method if the line is directly inside of a namespace.
# If the line above is blank (excluding comments) or the start of
# an inner namespace, it cannot be indented.
def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
error):
line = raw_lines_no_comments[linenum]
if Match(r'^\s+', line):
error(filename, linenum, 'runtime/indentation_namespace', 4,
'Do not indent within a namespace')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckDefaultLambdaCaptures(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
"""Flag those c++11 features that we only allow in certain places.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Flag unapproved C++11 headers.
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
if include and include.group(1) in ('cfenv',
'condition_variable',
'fenv.h',
'future',
'mutex',
'thread',
'chrono',
'ratio',
'regex',
'system_error',
):
error(filename, linenum, 'build/c++11', 5,
('<%s> is an unapproved C++11 header.') % include.group(1))
# The only place where we need to worry about C++11 keywords and library
# features in preprocessor directives is in macro definitions.
if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
# These are classes and free functions. The classes are always
# mentioned as std::*, but we only catch the free functions if
# they're not found by ADL. They're alphabetical by header.
for top_name in (
# type_traits
'alignment_of',
'aligned_union',
):
if Search(r'\bstd::%s\b' % top_name, line):
error(filename, linenum, 'build/c++11', 5,
('std::%s is an unapproved C++11 class or function. Send c-style '
'an example of where it would make your code more readable, and '
'they may let you use it.') % top_name)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
if file_extension == 'h':
CheckForHeaderGuard(filename, clean_lines, error)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# Check that the .cc file has included its header if it exists.
if file_extension == 'cc':
CheckHeaderFileIncluded(filename, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessConfigOverrides(filename):
""" Loads the configuration files and processes the config overrides.
Args:
filename: The name of the file being processed by the linter.
Returns:
False if the current |filename| should not be processed further.
"""
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
abs_path, base_name = os.path.split(abs_filename)
if not base_name:
break # Reached the root directory.
cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
abs_filename = abs_path
if not os.path.isfile(cfg_file):
continue
try:
with open(cfg_file) as file_handle:
for line in file_handle:
line, _, _ = line.partition('#') # Remove comments.
if not line.strip():
continue
name, _, val = line.partition('=')
name = name.strip()
val = val.strip()
if name == 'set noparent':
keep_looking = False
elif name == 'filter':
cfg_filters.append(val)
elif name == 'exclude_files':
# When matching exclude_files pattern, use the base_name of
# the current file name or the directory name we are processing.
# For example, if we are checking for lint errors in /foo/bar/baz.cc
# and we found the .cfg file at /foo/CPPLINT.cfg, then the config
# file's "exclude_files" filter is meant to be checked against "bar"
# and not "baz" nor "bar/baz.cc".
if base_name:
pattern = re.compile(val)
if pattern.match(base_name):
sys.stderr.write('Ignoring "%s": file excluded by "%s". '
'File path component "%s" matches '
'pattern "%s"\n' %
(filename, cfg_file, base_name, val))
return False
elif name == 'linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
sys.stderr.write('Line length must be numeric.')
else:
sys.stderr.write(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
sys.stderr.write(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
for filter in reversed(cfg_filters):
_AddFilters(filter)
return True
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
_BackupFilters()
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
sys.stderr.write('Done processing %s\n' % filename)
_RestoreFilters()
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| mit |
jrversteegh/softsailor | deps/numpy-1.6.1/numpy/core/tests/test_getlimits.py | 24 | 1761 | """ Test functions for limits module.
"""
from numpy.testing import *
from numpy.core import finfo, iinfo
from numpy import half, single, double, longdouble
import numpy as np
##################################################
class TestPythonFloat(TestCase):
def test_singleton(self):
ftype = finfo(float)
ftype2 = finfo(float)
assert_equal(id(ftype),id(ftype2))
class TestHalf(TestCase):
def test_singleton(self):
ftype = finfo(half)
ftype2 = finfo(half)
assert_equal(id(ftype),id(ftype2))
class TestSingle(TestCase):
def test_singleton(self):
ftype = finfo(single)
ftype2 = finfo(single)
assert_equal(id(ftype),id(ftype2))
class TestDouble(TestCase):
def test_singleton(self):
ftype = finfo(double)
ftype2 = finfo(double)
assert_equal(id(ftype),id(ftype2))
class TestLongdouble(TestCase):
def test_singleton(self,level=2):
ftype = finfo(longdouble)
ftype2 = finfo(longdouble)
assert_equal(id(ftype),id(ftype2))
class TestIinfo(TestCase):
def test_basic(self):
dts = zip(['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8'],
[np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64])
for dt1, dt2 in dts:
assert_equal(iinfo(dt1).min, iinfo(dt2).min)
assert_equal(iinfo(dt1).max, iinfo(dt2).max)
self.assertRaises(ValueError, iinfo, 'f4')
def test_unsigned_max(self):
types = np.sctypes['uint']
for T in types:
assert_equal(iinfo(T).max, T(-1))
def test_instances():
iinfo(10)
finfo(3.0)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
fluks/youtube-dl | youtube_dl/extractor/mitele.py | 22 | 2277 | from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urlparse,
)
from ..utils import (
get_element_by_attribute,
parse_duration,
strip_jsonp,
)
class MiTeleIE(InfoExtractor):
IE_NAME = 'mitele.es'
_VALID_URL = r'http://www\.mitele\.es/[^/]+/[^/]+/[^/]+/(?P<id>[^/]+)/'
_TEST = {
'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/',
'md5': '6a75fe9d0d3275bead0cb683c616fddb',
'info_dict': {
'id': '0fce117d',
'ext': 'mp4',
'title': 'Programa 144 - Tor, la web invisible',
'description': 'md5:3b6fce7eaa41b2d97358726378d9369f',
'display_id': 'programa-144',
'duration': 2913,
},
}
def _real_extract(self, url):
episode = self._match_id(url)
webpage = self._download_webpage(url, episode)
embed_data_json = self._search_regex(
r'(?s)MSV\.embedData\[.*?\]\s*=\s*({.*?});', webpage, 'embed data',
).replace('\'', '"')
embed_data = json.loads(embed_data_json)
domain = embed_data['mediaUrl']
if not domain.startswith('http'):
# only happens in telecinco.es videos
domain = 'http://' + domain
info_url = compat_urlparse.urljoin(
domain,
compat_urllib_parse.unquote(embed_data['flashvars']['host'])
)
info_el = self._download_xml(info_url, episode).find('./video/info')
video_link = info_el.find('videoUrl/link').text
token_query = compat_urllib_parse.urlencode({'id': video_link})
token_info = self._download_json(
embed_data['flashvars']['ov_tk'] + '?' + token_query,
episode,
transform_source=strip_jsonp
)
return {
'id': embed_data['videoId'],
'display_id': episode,
'title': info_el.find('title').text,
'url': token_info['tokenizedUrl'],
'description': get_element_by_attribute('class', 'text', webpage),
'thumbnail': info_el.find('thumb').text,
'duration': parse_duration(info_el.find('duration').text),
}
| unlicense |
spectralDNS/shenfun | demo/Gray_Scott_fractal.py | 1 | 4395 | r"""
Solve Gray-Scott's equations on (-1, 1)x(-1, 1) with periodic bcs.
The equations to solve are
u_t = -e1*(-div(grad(u)))**(alpha1/2) + b*(1-u) - u*v**2 (1)
v_t = -e2*(-div(grad(v)))**(alpha2/2) - (b+kappa)*v + u*v**2 (2)
Using Fourier basis F and a vector tensor product space for u and v
The tensor product space is FF = F \otimes F, and the vector space is
W = FF \times FF.
The constant diffusion coefficients are e1 and e2. Furthermore, b and
kappa are two model constants. The parameters alpha1 and alpha2 represent
coefficients for fractional derivatives on the Laplacian.
The variational problem reads: Find uv = (u, v) in W such that
(qr, uv_t) = (qr, (e1, e2)*(-div(grad(uv)))**((alpha1, alpha2)/2)) \\
+ b*(q, 1-u) -(b+kappa)*(r, v) - (q, u*v**2) + (r, u*v**2)
for all qr = (q, r) in W
Initial conditions are given as
u(t=0) = 1 for abs(x) > 0.04 and 0.50 for abs(x) < 0.04
v(t=0) = 0 for abs(x) > 0.04 and 0.25 for abs(x) < 0.04
and for stability they are approximated using error functions.
"""
from sympy import symbols
from sympy.functions import erf
import numpy as np
import matplotlib.pyplot as plt
from mpi4py_fft import generate_xdmf
from shenfun import inner, div, grad, TestFunction, TrialFunction, Function, \
HDF5File, ETDRK4, TensorProductSpace, VectorSpace, FunctionSpace, Array, \
comm
# Use sympy to set up initial condition
x, y = symbols("x,y", real=True)
# Initial conditions
a = 0.0001
u0 = 0.5*(1-((0.5*(erf((x-0.04)/a)+1) - 0.5*(erf((x+0.04)/a)+1))*(0.5*(erf((y-0.04)/a)+1) - 0.5*(erf((y+0.04)/a)+1))))+0.5
v0 = 0.25*(0.5*(erf((x-0.04)/a)+1) - 0.5*(erf((x+0.04)/a)+1))*(0.5*(erf((y-0.04)/a)+1) - 0.5*(erf((y+0.04)/a)+1))
# Size of discretization
N = (200, 200)
K0 = FunctionSpace(N[0], 'F', dtype='D', domain=(-1., 1.))
K1 = FunctionSpace(N[1], 'F', dtype='d', domain=(-1., 1.))
T = TensorProductSpace(comm, (K0, K1))
u = TrialFunction(T)
v = TestFunction(T)
# For nonlinear term we can use the 3/2-rule with padding
Tp = T.get_dealiased((1.5, 1.5))
# Turn on padding by commenting
#Tp = T
# Create vector spaces and a test function for the regular vector space
TV = VectorSpace(T)
TVp = VectorSpace(Tp)
vv = TestFunction(TV)
uu = TrialFunction(TV)
# Declare solution arrays and work arrays
UV = Array(TV, buffer=(u0, v0))
UVp = Array(TVp)
U, V = UV # views into vector components
UV_hat = Function(TV)
w0 = Function(TV) # Work array spectral space
w1 = Array(TVp) # Work array physical space
e1 = 0.00002
e2 = 0.00001
b0 = 0.03
#initialize
UV_hat = UV.forward(UV_hat)
def LinearRHS(self, u, alpha1, alpha2, **params):
L = inner(vv, (e1, e2)*div(grad(u)))
L = np.array([-(-L[0].scale)**(alpha1/2),
-(-L[1].scale)**(alpha2/2)])
return L
def NonlinearRHS(self, uv, uv_hat, rhs, kappa, **params):
global b0, UVp, w0, w1, TVp
rhs.fill(0)
UVp = TVp.backward(uv_hat, UVp) # 3/2-rule dealiasing for nonlinear term
w1[0] = b0*(1-UVp[0]) - UVp[0]*UVp[1]**2
w1[1] = -(b0+kappa)*UVp[1] + UVp[0]*UVp[1]**2
w0 = TVp.forward(w1, w0)
rhs += w0
return rhs
plt.figure()
X = T.local_mesh(True)
image = plt.contourf(X[0], X[1], U.real, 100)
plt.draw()
plt.pause(1)
uv0 = np.zeros_like(UV)
def update(self, uv, uv_hat, t, tstep, **params):
if tstep % params['plot_step'] == 0 and params['plot_step'] > 0:
uv = uv_hat.backward(uv)
image.ax.clear()
image.ax.contourf(X[0], X[1], uv[0].real, 100)
plt.pause(1e-6)
print(np.linalg.norm(uv[0]-uv0[0]),
np.linalg.norm(uv[1]-uv0[1]),
np.linalg.norm(uv[0]),
np.linalg.norm(uv[1]))
uv0[:] = uv
if tstep % params['write_tstep'][0] == 0:
uv = uv_hat.backward(uv)
params['file'].write(tstep, params['write_tstep'][1], as_scalar=True)
if __name__ == '__main__':
file0 = HDF5File("Gray_Scott_{}.h5".format(N[0]), mode='w')
par = {'plot_step': 200,
'write_tstep': (200, {'uv': [UV]}),
'file': file0,
'kappa': 0.061,
'alpha1': 1.5,
'alpha2': 1.9}
dt = 10.
end_time = 1000000
integrator = ETDRK4(TV, L=LinearRHS, N=NonlinearRHS, update=update, **par)
integrator.setup(dt)
UV_hat = integrator.solve(UV, UV_hat, dt, (0, end_time))
generate_xdmf("Gray_Scott_{}.h5".format(N[0]))
| bsd-2-clause |
Hehwang/Leetcode-Python | code/593 Valid Square.py | 1 | 1345 | class Solution:
def validSquare(self, p1, p2, p3, p4):
"""
:type p1: List[int]
:type p2: List[int]
:type p3: List[int]
:type p4: List[int]
:rtype: bool
"""
list1=[p2,p3,p4]
length_list=[]
vector_list=[]
for i in range(3):
a=list1[i][0]
b=list1[i][1]
if a==p1[0] and b==p1[1]:
return False
length=(a-p1[0])**2+(b-p1[1])**2
vector=(a-p1[0],b-p1[1])
length_list.append(length)
vector_list.append(vector)
max_length=max(length_list)
max_index=length_list.index(max_length)
max_vector=vector_list[max_index]
others=list(range(max_index))+list(range(max_index+1,3))
for i in others:
if length_list[i]*2!=max_length:
return False
cross=vector_list[i][0]*vector_list[max_index][0]+vector_list[i][1]*vector_list[max_index][1]
tmp=(vector_list[i][0]**2+vector_list[i][1]**2)*(vector_list[max_index][0]**2+vector_list[max_index][1]**2)
if cross<0 or cross**2*2!=tmp:
return False
vec1,vec2=vector_list[others[0]],vector_list[others[1]]
if vec1[0]*vec2[0]+vec1[1]*vec2[1]!=0:
return False
return True | mit |
AXAz0r/apex-sigma | sigma/plugins/moderation/permissions/unpermitrole.py | 2 | 3448 | import discord
from sigma.core.permission import check_admin
from .nodes.permission_data import get_all_perms, generate_cmd_data
from sigma.core.rolecheck import matching_role
async def unpermitrole(cmd, message, args):
if args:
if len(args) >= 2:
if not check_admin(message.author, message.channel):
response = discord.Embed(title='⛔ Unpermitted. Server Admin Only.', color=0xDB0000)
else:
target_name = args[1]
target = matching_role(message.guild, target_name)
if target:
error_response = discord.Embed(color=0xDB0000, title='❗ Bad Input')
try:
perm_mode, cmd_name = args[0].split(':')
except:
await message.channel.send(embed=error_response)
return
cmd_name = cmd_name.lower()
perm_mode = perm_mode.lower()
if perm_mode == 'c':
exception_group = 'CommandExceptions'
check_group = cmd.bot.plugin_manager.commands
check_alts = True
elif perm_mode == 'm':
exception_group = 'ModuleExceptions'
check_group = cmd.bot.module_list
check_alts = False
else:
await message.channel.send(embed=error_response)
return
if check_alts:
if cmd_name in cmd.bot.alts:
cmd_name = cmd.bot.alts[cmd_name]
if cmd_name in check_group:
perms = get_all_perms(cmd.db, message)
cmd_exc = perms[exception_group]
if cmd_name in perms[exception_group]:
inner_exc = cmd_exc[cmd_name]
else:
inner_exc = generate_cmd_data(cmd_name)[cmd_name]
exc_usrs = inner_exc['Roles']
if target.id in exc_usrs:
exc_usrs.remove(target.id)
inner_exc.update({'Roles': exc_usrs})
cmd_exc.update({cmd_name: inner_exc})
perms.update({exception_group: cmd_exc})
cmd.db.update_one('Permissions', {'ServerID': message.guild.id}, {'$set': perms})
response = discord.Embed(color=0x66CC66,
title=f'✅ `{target.name}` can no longer use `{cmd_name}`.')
else:
response = discord.Embed(color=0xFF9900,
title=f'⚠ {target.name} is not able to use `{cmd_name}`')
else:
response = discord.Embed(color=0x696969, title='🔍 Command/Module Not Found')
else:
response = discord.Embed(color=0x696969, title=f'🔍 No {target_name} Role Found')
else:
response = discord.Embed(color=0xDB0000, title='❗ Not Enough Arguments')
else:
response = discord.Embed(color=0xDB0000, title='❗ Not Arguments Given')
await message.channel.send(embed=response)
| gpl-3.0 |
mcanthony/rethinkdb | scripts/generate_serialize_macros.py | 29 | 9043 | #!/usr/bin/env python
# Copyright 2010-2014 RethinkDB, all rights reserved.
import sys
"""This script is used to generate the RDB_MAKE_SERIALIZABLE_*() and
RDB_MAKE_ME_SERIALIZABLE_*() macro definitions. Because there are so
many variations, and because they are so similar, it's easier to just
have a Python script to generate them.
This script is meant to be run as follows (assuming you are in the
"rethinkdb/src" directory):
$ ../scripts/generate_serialize_macros.py > rpc/serialize_macros.hpp
"""
def generate_make_serializable_macro(nfields):
fields = "".join(", field%d" % (i + 1) for i in xrange(nfields))
zeroarg = ("UNUSED " if nfields == 0 else "")
print "#define RDB_MAKE_SERIALIZABLE_%d(type_t%s) \\" % \
(nfields, fields)
print " template <cluster_version_t W> \\"
print " void serialize(%swrite_message_t *wm, %sconst type_t &thing) { \\" % (zeroarg, zeroarg)
for i in xrange(nfields):
print " serialize<W>(wm, thing.field%d); \\" % (i + 1)
print " } \\"
print " template <cluster_version_t W> \\"
print " archive_result_t deserialize(%sread_stream_t *s, %stype_t *thing) { \\" % (zeroarg, zeroarg)
print " archive_result_t res = archive_result_t::SUCCESS; \\"
for i in xrange(nfields):
print " res = deserialize<W>(s, deserialize_deref(thing->field%d)); \\" % (i + 1)
print " if (bad(res)) { return res; } \\"
print " return res; \\"
print " } \\"
print " extern int dont_use_RDB_MAKE_SERIALIZABLE_within_a_class_body"
print
print "#define RDB_MAKE_SERIALIZABLE_%d_FOR_CLUSTER(type_t%s) \\" % \
(nfields, fields)
print " template <> \\"
print " void serialize<cluster_version_t::CLUSTER>( \\"
print " %swrite_message_t *wm, %sconst type_t &thing) { \\" % (zeroarg, zeroarg)
for i in xrange(nfields):
print " serialize<cluster_version_t::CLUSTER>(wm, thing.field%d); \\" % (i + 1)
print " } \\"
print " template <> \\"
print " archive_result_t deserialize<cluster_version_t::CLUSTER>( \\"
print " %sread_stream_t *s, %stype_t *thing) { \\" % (zeroarg, zeroarg)
print " archive_result_t res = archive_result_t::SUCCESS; \\"
for i in xrange(nfields):
print " res = deserialize<cluster_version_t::CLUSTER>( \\"
print " s, deserialize_deref(thing->field%d)); \\" % (i + 1)
print " if (bad(res)) { return res; } \\"
print " return res; \\"
print " } \\"
print " extern int dont_use_RDB_MAKE_SERIALIZABLE_FOR_CLUSTER_within_a_class_body"
print
# See the note in the comment below.
print "#define RDB_IMPL_SERIALIZABLE_%d(type_t%s) RDB_MAKE_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields, nfields, fields)
print
print "#define RDB_IMPL_SERIALIZABLE_%d_FOR_CLUSTER(type_t%s) \\" % (nfields, fields)
print " RDB_MAKE_SERIALIZABLE_%d_FOR_CLUSTER(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_FOR_CLUSTER(type_t);"
print
print "#define RDB_IMPL_SERIALIZABLE_%d_SINCE_v1_13(type_t%s) \\" % (nfields, fields)
print " RDB_IMPL_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_SINCE_v1_13(type_t)"
print
print "#define RDB_IMPL_SERIALIZABLE_%d_SINCE_v1_16(type_t%s) \\" % (nfields, fields)
print " RDB_IMPL_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_SINCE_v1_16(type_t)"
print
print "#define RDB_IMPL_SERIALIZABLE_%d_SINCE_v2_1(type_t%s) \\" % (nfields, fields)
print " RDB_IMPL_SERIALIZABLE_%d(type_t%s); \\" % (nfields, fields)
print " INSTANTIATE_SERIALIZABLE_SINCE_v2_1(type_t)"
print "#define RDB_MAKE_ME_SERIALIZABLE_%d(type_t%s) \\" % \
(nfields, fields)
print " template <cluster_version_t W> \\"
print " friend void serialize(%swrite_message_t *wm, %sconst type_t &thing) { \\" % (zeroarg, zeroarg)
for i in xrange(nfields):
print " serialize<W>(wm, thing.field%d); \\" % (i + 1)
print " } \\"
print " template <cluster_version_t W> \\"
print " friend archive_result_t deserialize(%sread_stream_t *s, %stype_t *thing) { \\" % (zeroarg, zeroarg)
print " archive_result_t res = archive_result_t::SUCCESS; \\"
for i in xrange(nfields):
print " res = deserialize<W>(s, deserialize_deref(thing->field%d)); \\" % (i + 1)
print " if (bad(res)) { return res; } \\"
print " return res; \\"
print " }"
if __name__ == "__main__":
print "// Copyright 2010-2014 RethinkDB, all rights reserved."
print "#ifndef RPC_SERIALIZE_MACROS_HPP_"
print "#define RPC_SERIALIZE_MACROS_HPP_"
print
print "/* This file is automatically generated by '%s'." % " ".join(sys.argv)
print "Please modify '%s' instead of modifying this file.*/" % sys.argv[0]
print
print "#include <type_traits>"
print
print "#include \"containers/archive/archive.hpp\""
print "#include \"containers/archive/versioned.hpp\""
print "#include \"errors.hpp\""
print "#include \"version.hpp\""
print
print """
/* The purpose of these macros is to make it easier to serialize and
unserialize data types that consist of a simple series of fields, each
of which is serializable. Suppose we have a type "struct point_t {
int32_t x, y; }" that we want to be able to serialize. To make it
serializable automatically, either write
RDB_MAKE_SERIALIZABLE_2(point_t, x, y) at the global scope, or write
RDB_MAKE_ME_SERIALIZABLE_2(point_t, x, y) within the body of the
point_t type.
The _FOR_CLUSTER variants of the macros exist to indicate that a type
can only be serialized for use within the cluster, thus should not be
serialized to disk.
The _SINCE_v1_13 variants of the macros exist to make the conversion to
versioned serialization easier. They must only be used for types which
serialization format has not changed since version 1.13.0.
Once the format changes, you can still use the macros without
the _SINCE_v1_13 suffix and instantiate the serialize() and deserialize()
functions explicitly for a certain version.
We use dummy "extern int" declarations to force a compile error in
macros that should not be used inside of class bodies. */
""".strip()
print "namespace helper {"
print
print "/* When a `static_assert` is used within a templated class or function,"
print " * but does not depend on any template parameters the C++ compiler is free"
print " * to evaluate the assert even before instantiating that template. This"
print " * helper class allows a `static_assert(false, ...)` to depend on the"
print " * `cluster_version_t` template parameter."
print " * Also see http://stackoverflow.com/a/14637534. */"
print "template <cluster_version_t W>"
print "struct always_false"
print " : std::false_type { };"
print
print "} // namespace helper"
print
print "#define RDB_DECLARE_SERIALIZABLE(type_t) \\"
print " template <cluster_version_t W> \\"
print " void serialize(write_message_t *, const type_t &); \\"
print " template <cluster_version_t W> \\"
print " archive_result_t deserialize(read_stream_t *s, type_t *thing); \\"
print " extern int dont_use_RDB_DECLARE_SERIALIZABLE_within_a_class_body"
print
print "#define RDB_DECLARE_SERIALIZABLE_FOR_CLUSTER(type_t) \\"
print " template <cluster_version_t W> \\"
print " void serialize(write_message_t *, const type_t &) { \\"
print " static_assert(helper::always_false<W>::value, \\"
print " \"This type is only serializable for cluster.\"); \\"
print " unreachable(); \\"
print " } \\"
print " template <> \\"
print " void serialize<cluster_version_t::CLUSTER>( \\"
print " write_message_t *, const type_t &); \\"
print " template <cluster_version_t W> \\"
print " archive_result_t deserialize(read_stream_t *, type_t *) { \\"
print " static_assert(helper::always_false<W>::value, \\"
print " \"This type is only deserializable for cluster.\"); \\"
print " unreachable(); \\"
print " } \\"
print " template <> \\"
print " archive_result_t deserialize<cluster_version_t::CLUSTER>( \\"
print " read_stream_t *s, type_t *thing)"
print
print "#define RDB_DECLARE_ME_SERIALIZABLE(type_t) \\"
print " template <cluster_version_t W> \\"
print " friend void serialize(write_message_t *, const type_t &); \\"
print " template <cluster_version_t W> \\"
print " friend archive_result_t deserialize(read_stream_t *s, type_t *thing)"
for nfields in xrange(20):
generate_make_serializable_macro(nfields)
print
print "#endif // RPC_SERIALIZE_MACROS_HPP_"
| agpl-3.0 |
b-dollery/testing | v2/ansible/playbook/handler.py | 13 | 1256 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from v2.errors import AnsibleError
from v2.inventory import Host
from v2.playbook import Task
class Handler(Task):
def __init__(self):
pass
def flag_for_host(self, host):
assert instanceof(host, Host)
pass
def has_triggered(self):
return self._triggered
def set_triggered(self, triggered):
assert instanceof(triggered, bool)
self._triggered = triggered
| gpl-3.0 |
terrychenism/shadowsocks | shadowsocks/encrypt.py | 990 | 5180 | #!/usr/bin/env python
#
# Copyright 2012-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks import common
from shadowsocks.crypto import rc4_md5, openssl, sodium, table
method_supported = {}
method_supported.update(rc4_md5.ciphers)
method_supported.update(openssl.ciphers)
method_supported.update(sodium.ciphers)
method_supported.update(table.ciphers)
def random_string(length):
return os.urandom(length)
cached_keys = {}
def try_cipher(key, method=None):
Encryptor(key, method)
def EVP_BytesToKey(password, key_len, iv_len):
# equivalent to OpenSSL's EVP_BytesToKey() with count 1
# so that we make the same key and iv as nodejs version
cached_key = '%s-%d-%d' % (password, key_len, iv_len)
r = cached_keys.get(cached_key, None)
if r:
return r
m = []
i = 0
while len(b''.join(m)) < (key_len + iv_len):
md5 = hashlib.md5()
data = password
if i > 0:
data = m[i - 1] + password
md5.update(data)
m.append(md5.digest())
i += 1
ms = b''.join(m)
key = ms[:key_len]
iv = ms[key_len:key_len + iv_len]
cached_keys[cached_key] = (key, iv)
return key, iv
class Encryptor(object):
def __init__(self, key, method):
self.key = key
self.method = method
self.iv = None
self.iv_sent = False
self.cipher_iv = b''
self.decipher = None
method = method.lower()
self._method_info = self.get_method_info(method)
if self._method_info:
self.cipher = self.get_cipher(key, method, 1,
random_string(self._method_info[1]))
else:
logging.error('method %s not supported' % method)
sys.exit(1)
def get_method_info(self, method):
method = method.lower()
m = method_supported.get(method)
return m
def iv_len(self):
return len(self.cipher_iv)
def get_cipher(self, password, method, op, iv):
password = common.to_bytes(password)
m = self._method_info
if m[0] > 0:
key, iv_ = EVP_BytesToKey(password, m[0], m[1])
else:
# key_length == 0 indicates we should use the key directly
key, iv = password, b''
iv = iv[:m[1]]
if op == 1:
# this iv is for cipher not decipher
self.cipher_iv = iv[:m[1]]
return m[2](method, key, iv, op)
def encrypt(self, buf):
if len(buf) == 0:
return buf
if self.iv_sent:
return self.cipher.update(buf)
else:
self.iv_sent = True
return self.cipher_iv + self.cipher.update(buf)
def decrypt(self, buf):
if len(buf) == 0:
return buf
if self.decipher is None:
decipher_iv_len = self._method_info[1]
decipher_iv = buf[:decipher_iv_len]
self.decipher = self.get_cipher(self.key, self.method, 0,
iv=decipher_iv)
buf = buf[decipher_iv_len:]
if len(buf) == 0:
return buf
return self.decipher.update(buf)
def encrypt_all(password, method, op, data):
result = []
method = method.lower()
(key_len, iv_len, m) = method_supported[method]
if key_len > 0:
key, _ = EVP_BytesToKey(password, key_len, iv_len)
else:
key = password
if op:
iv = random_string(iv_len)
result.append(iv)
else:
iv = data[:iv_len]
data = data[iv_len:]
cipher = m(method, key, iv, op)
result.append(cipher.update(data))
return b''.join(result)
CIPHERS_TO_TEST = [
'aes-128-cfb',
'aes-256-cfb',
'rc4-md5',
'salsa20',
'chacha20',
'table',
]
def test_encryptor():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
encryptor = Encryptor(b'key', method)
decryptor = Encryptor(b'key', method)
cipher = encryptor.encrypt(plain)
plain2 = decryptor.decrypt(cipher)
assert plain == plain2
def test_encrypt_all():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
cipher = encrypt_all(b'key', method, 1, plain)
plain2 = encrypt_all(b'key', method, 0, cipher)
assert plain == plain2
if __name__ == '__main__':
test_encrypt_all()
test_encryptor()
| apache-2.0 |
strogo/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/hplink.py | 61 | 2390 | """SCons.Tool.hplink
Tool-specific initialization for the HP linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/hplink.py 5134 2010/08/16 23:02:40 bdeegan"
import os
import os.path
import SCons.Util
import link
ccLinker = None
# search for the acc compiler and linker front end
try:
dirs = os.listdir('/opt')
except (IOError, OSError):
# Not being able to read the directory because it doesn't exist
# (IOError) or isn't readable (OSError) is okay.
dirs = []
for dir in dirs:
linker = '/opt/' + dir + '/bin/aCC'
if os.path.exists(linker):
ccLinker = linker
break
def generate(env):
"""
Add Builders and construction variables for Visual Age linker to
an Environment.
"""
link.generate(env)
env['LINKFLAGS'] = SCons.Util.CLVar('-Wl,+s -Wl,+vnocompatwarnings')
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -b')
env['SHLIBSUFFIX'] = '.sl'
def exists(env):
return ccLinker
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
rex-xxx/mt6572_x201 | external/webkit/Tools/TestResultServer/handlers/testfilehandler.py | 15 | 8734 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import urllib
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from model.jsonresults import JsonResults
from model.testfile import TestFile
PARAM_MASTER = "master"
PARAM_BUILDER = "builder"
PARAM_DIR = "dir"
PARAM_FILE = "file"
PARAM_NAME = "name"
PARAM_KEY = "key"
PARAM_TEST_TYPE = "testtype"
PARAM_INCREMENTAL = "incremental"
PARAM_TEST_LIST_JSON = "testlistjson"
class DeleteFile(webapp.RequestHandler):
"""Delete test file for a given builder and name from datastore."""
def get(self):
key = self.request.get(PARAM_KEY)
master = self.request.get(PARAM_MASTER)
builder = self.request.get(PARAM_BUILDER)
test_type = self.request.get(PARAM_TEST_TYPE)
name = self.request.get(PARAM_NAME)
logging.debug(
"Deleting File, master: %s, builder: %s, test_type: %s, name: %s, key: %s.",
master, builder, test_type, name, key)
TestFile.delete_file(key, master, builder, test_type, name, 100)
# Display file list after deleting the file.
self.redirect("/testfile?master=%s&builder=%s&testtype=%s&name=%s"
% (master, builder, test_type, name))
class GetFile(webapp.RequestHandler):
"""Get file content or list of files for given builder and name."""
def _get_file_list(self, master, builder, test_type, name):
"""Get and display a list of files that matches builder and file name.
Args:
builder: builder name
test_type: type of the test
name: file name
"""
files = TestFile.get_files(
master, builder, test_type, name, load_data=False, limit=100)
if not files:
logging.info("File not found, master: %s, builder: %s, test_type: %s, name: %s.",
master, builder, test_type, name)
self.response.out.write("File not found")
return
template_values = {
"admin": users.is_current_user_admin(),
"master": master,
"builder": builder,
"test_type": test_type,
"name": name,
"files": files,
}
self.response.out.write(template.render("templates/showfilelist.html",
template_values))
def _get_file_content(self, master, builder, test_type, name):
"""Return content of the file that matches builder and file name.
Args:
builder: builder name
test_type: type of the test
name: file name
"""
files = TestFile.get_files(
master, builder, test_type, name, load_data=True, limit=1)
if not files:
logging.info("File not found, master %s, builder: %s, test_type: %s, name: %s.",
master, builder, test_type, name)
return None
return files[0].data
def _get_test_list_json(self, master, builder, test_type):
"""Return json file with test name list only, do not include test
results and other non-test-data .
Args:
builder: builder name.
test_type: type of test results.
"""
json = self._get_file_content(master, builder, test_type, "results.json")
if not json:
return None
return JsonResults.get_test_list(builder, json)
def get(self):
master = self.request.get(PARAM_MASTER)
builder = self.request.get(PARAM_BUILDER)
test_type = self.request.get(PARAM_TEST_TYPE)
name = self.request.get(PARAM_NAME)
dir = self.request.get(PARAM_DIR)
test_list_json = self.request.get(PARAM_TEST_LIST_JSON)
logging.debug(
"Getting files, master %s, builder: %s, test_type: %s, name: %s.",
master, builder, test_type, name)
# If parameter "dir" is specified or there is no builder or filename
# specified in the request, return list of files, otherwise, return
# file content.
if dir or not builder or not name:
return self._get_file_list(master, builder, test_type, name)
if name == "results.json" and test_list_json:
json = self._get_test_list_json(master, builder, test_type)
else:
json = self._get_file_content(master, builder, test_type, name)
if json:
self.response.headers["Content-Type"] = "text/plain; charset=utf-8"
self.response.out.write(json)
else:
self.error(404)
class Upload(webapp.RequestHandler):
"""Upload test results file to datastore."""
def post(self):
file_params = self.request.POST.getall(PARAM_FILE)
if not file_params:
self.response.out.write("FAIL: missing upload file field.")
return
builder = self.request.get(PARAM_BUILDER)
if not builder:
self.response.out.write("FAIL: missing builder parameter.")
return
master = self.request.get(PARAM_MASTER)
test_type = self.request.get(PARAM_TEST_TYPE)
incremental = self.request.get(PARAM_INCREMENTAL)
logging.debug(
"Processing upload request, master: %s, builder: %s, test_type: %s.",
master, builder, test_type)
# There are two possible types of each file_params in the request:
# one file item or a list of file items.
# Normalize file_params to a file item list.
files = []
logging.debug("test: %s, type:%s", file_params, type(file_params))
for item in file_params:
if not isinstance(item, list) and not isinstance(item, tuple):
item = [item]
files.extend(item)
errors = []
for file in files:
filename = file.filename.lower()
if ((incremental and filename == "results.json") or
(filename == "incremental_results.json")):
# Merge incremental json results.
update_succeeded = JsonResults.update(master, builder, test_type, file.value)
else:
update_succeeded = TestFile.update(
master, builder, test_type, file.filename, file.value)
if not update_succeeded:
errors.append(
"Upload failed, master: %s, builder: %s, test_type: %s, name: %s." %
(master, builder, test_type, file.filename))
if errors:
messages = "FAIL: " + "; ".join(errors)
logging.warning(messages)
self.response.set_status(500, messages)
self.response.out.write("FAIL")
else:
self.response.set_status(200)
self.response.out.write("OK")
class UploadForm(webapp.RequestHandler):
"""Show a form so user can upload a file."""
def get(self):
template_values = {
"upload_url": "/testfile/upload",
}
self.response.out.write(template.render("templates/uploadform.html",
template_values))
| gpl-2.0 |
smart-m3/sib-daemon | python/wilbur_m3.py | 1 | 20587 |
# Copyright (c) 2009, Nokia Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Nokia nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# wilbur.py
#
# Author: Ora Lassila mailto:[email protected]
# Copyright (c) 2001-2008 Nokia. All Rights Reserved.
#
import piglet
import iso8601
import re
import os
class DB(object):
def __init__(self, dbfile, seed=False):
self.dbfile = dbfile
self.nodeCache = {}
self.memberProps = []
self.home = os.getenv("PIGLET_HOME", os.getenv("PWD", "/tmp"))
self.qe = self.makeQueryEngine()
self.db = self.m3_opendb(dbfile)
self.type = self['rdf:type']
self.subprop = self['rdfs:subPropertyOf']
self.subclass = self['rdfs:subClassOf']
self.resource = self['rdfs:Resource']
self.sa = self['owl:sameAs']
self.reasoner = 0 # self['piglet:Reasoner']
self.literalParser = LiteralParser(self)
self.bootstrap()
(sources, namespaces, triples) = self.seedData()
if seed:
for source in set(sources):
self.load(source, True, True)
for (prefix, uri) in set(namespaces):
self.addNamespace(prefix, uri)
for (s, p, o, temp) in set(triples):
self.add(s, p, o, 0, temp)
self.postProcess(0)
def m3_opendb(self, dbfile):
if '/' == self.home[-1]:
rval = piglet.open(self.home+dbfile)
else:
rval = piglet.open(self.home+'/'+dbfile)
if rval != None:
return rval
else:
raise Error("Could not open database")
def bootstrap(self): pass
def postProcess(self, source): pass
def m3_get_db(self):
return self.db
def makeQueryEngine(self):
return WQL(self)
def seedData(self):
if self.home:
if self.home[-1] != '/':
rval = (["file://%s/website/piglet.rdf" % self.home,
"http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"http://www.w3.org/2000/01/rdf-schema#",
"http://www.w3.org/2002/07/owl#"],
[],
[])
else:
rval = (["file://%swebsite/piglet.rdf" % self.home,
"http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"http://www.w3.org/2000/01/rdf-schema#",
"http://www.w3.org/2002/07/owl#"],
[],
[])
return rval
else:
raise Error("No PIGLET_HOME environment variable defined")
def newMemberProp(self, i):
prop = self.node("http://www.w3.org/1999/02/22-rdf-syntax-ns#_%d" % (i))
self.memberProps.append(prop)
return prop
def getMemberProp(self, i):
n = len(self.memberProps) + 1
if i < n:
return self.memberProps[i-1]
elif i == n:
return self.newMemberProp(i)
else:
raise Error("Out-of-sequence member index %d" % (i))
def __getitem__(self, qname):
n = self.nodeCache.get(qname)
if not n:
n = self.node(self.db.expand(qname))
self.nodeCache[qname] = n
return n
def values(self, node, path, reasoner=False):
if isinstance(path, int):
return [o for (s, p, o) in self.query(node, path, 0)]
elif isinstance(path, SpecialPathNode):
return path.values(self, node, reasoner)
elif isinstance(path, list):
return self.values(node, self.qe.fsa(path), reasoner)
elif isinstance(path, PathFSA):
collector = Collector(self, path)
collector.walk(node)
return list(collector.results)
elif isinstance(path, str):
return self.values(node, self.qe.canonicalize(path), reasoner)
else:
raise UnsupportedPath(path)
def related(self, source, path, sink, reasoner=False):
if isinstance(path, int):
return self.db.count(source, path, sink, 0)
elif isinstance(path, SpecialPathNode):
return path.related(self, source, sink, reasoner)
elif isinstance(path, list):
return self.related(source, self.qe.fsa(path), sink, reasoner)
elif isinstance(path, PathFSA):
return Reacher(self, path, sink).walk(source)
elif isinstance(path, str):
return self.related(source, self.qe.canonicalize(path), sink, reasoner)
else:
raise UnsupportedPath(path)
def query(self, s, p, o, source=0):
return self.db.query(s, p, o, source)
def sources(self, s, p, o):
return self.db.sources(s, p, o)
def count(self, s, p, o, source=0):
return self.db.count(s, p, o, source)
def add(self, s, p, o, source=0, temporary=False):
return self.db.add(s, p, o, source, 1 if temporary else 0)
def delete(self, s, p, o, source=0, temporary=False):
return self.db.delete(s, p, o, source, 1 if temporary else 0)
def load(self, source, verbose=True, seed=False):
if isinstance(source, str):
source = self.node(source)
return self.db.load(source, 0, 1 if verbose else 0)
def info(self, node):
return self.db.info(node)
def node(self, uri):
return self.db.node(uri)
def literal(self, contents, dt=0, language=""):
return self.db.literal(contents, dt, language)
def augmentLiteral(self, node, datatype):
return self.db.augmentLiteral(node, datatype)
def expand_m3(self, qname):
tmp = self.db.expand_m3(qname)
if tmp:
return tmp
else:
return qname
def expand(self, qname):
return self.db.expand(qname)
def abbreviate(self, uri):
return self.db.abbreviate(uri)
def addNamespace(self, prefix, uri):
return self.db.addNamespace(prefix, uri)
def delNamespace(self, prefix):
return self.db.delNamespace(prefix)
def match(self, pattern):
return self.db.match(pattern)
def literalValue(self, node, datatype=None):
if node < 0:
return self.literalParser.parseLiteral(node, datatype)
else:
raise Error("Node %s is not a literal" % self.info(node))
def translate(self, node, db):
if node > 0:
return db.node(self.info(node))
elif node < 0:
(string, dt, lang) = self.info(node)
return db.literal(string, dt, lang)
else:
return 0
class WQL:
specialTokens = ['any', 'members', 'self', 'p-of-o', 'p-of-s']
def __init__(self, db):
self.db = db
self.fsaCache = {}
def fsa(self, path):
p = self.canonicalize(path)
r = repr(p)
f = self.fsaCache.get(r)
if f == None:
f = PathFSA(p)
self.fsaCache[r] = f
return f
def canonicalize(self, path):
if isinstance(path, list):
(op, arg, args) = (path[0], path[1], path[2:])
if op == 'rep*':
return [op, self.canonicalize(arg)]
elif op == 'rep+':
return self.canonicalize(['seq', arg, ['rep*', arg]])
elif op == 'inv':
return self.canonicalize(self.invert(arg))
elif op == 'value':
return DefaultValue(arg)
elif op == 'norewrite':
return self.canonicalize(arg)
elif op == 'filter':
return StringFilter(arg)
elif op in ['seq', 'seq+', 'or']:
if args:
# incomplete!
if len(args) > 1:
return self.canonicalize([op, arg, [op] + args])
else:
return [op, self.canonicalize(arg), self.canonicalize(args[0])]
else:
return self.canonicalize(arg)
elif path in self.specialTokens:
return SpecialToken(path)
elif path == 'all':
return 0
elif not isinstance(path, str):
return path
else:
raise UnsupportedPath(path)
def isDefaultValue(self, path):
return (isinstance(path, DefaultValue)
or (isinstance(path, list) and path[0] == 'value'))
def invert(self, path):
if isinstance(path, list):
return ([path[0]] +
[self.invert(i) for i in path[:0:-1] if not self.isDefaultValue(i)])
elif isinstance(path, InverseNode):
return path.node
elif path == 'self':
return 'self'
else:
return InverseNode(self.canonicalize(path))
class Error(Exception, object): pass
class UnsupportedPath(Error):
def __init__(self, path):
super(UnsupportedPath, self).__init__("Unsupported path '%s'" % (path))
class UnknownToken(Error):
def __init__(self, token):
super(UnknownToken, self).__init__("Unknown token '%s'" % (token))
class SpecialPathNode:
def __init__(self, node): self.node = node
def __repr__(self): return "%s(%s)" % (self.__class__, self.node)
def values(self, db, node, reasoner=False):
raise Error("No 'values' method defined for %s" % (self))
def related(self, db, source, sink, reasoner=False):
raise Error("No 'related' method defined for %s" % (self))
class InverseNode(SpecialPathNode):
def values(self, db, node, reasoner=False):
path = self.node
if isinstance(path, SpecialToken):
path = path.node
if path == 'any':
return [s for (s, p, o) in db.query(0, 0, node)]
elif path == 'members':
results = set()
for p in self.memberProps(db):
v = db.values(node, InverseNode(p), False)
if v:
results.add(v)
else:
break
return list(results)
elif path == 'p-of-s':
return [s for (s, p, o) in db.query(0, node, 0)]
elif path == 'p-of-o':
return [o for (s, p, o) in db.query(0, node, 0)]
else:
raise UnknownToken(path)
else:
return [s for (s, p, o) in db.query(0, path, node)]
def related(self, db, source, sink, reasoner=False):
return db.related(sink, self.node, source, reasoner)
class DefaultValue(SpecialPathNode):
def values(self, db, node, reasoner=False):
return [self.node]
def related(self, db, source, sink, reasoner=False):
return (self.node == sink)
class SpecialToken(SpecialPathNode):
def memberProps(self, db, start=1):
while True:
yield db.getMemberProp(start)
start += 1
def values(self, db, node, reasoner=False):
path = self.node
if path == 'any':
return [o for (s, p, o) in db.query(node, 0, 0)]
elif path == 'members':
results = []
for p in self.memberProps(db):
v = db.values(node, p, False)
if v:
results += v
else:
break
return results
elif path == 'self':
return [node]
elif path == 'p-of-s':
return [p for (s, p, o) in db.query(node, 0, 0)]
elif path == 'p-of-o':
return [p for (s, p, o) in db.query(0, 0, node)]
else:
raise UnknownToken(path)
def related(self, db, source, sink, reasoner=False):
path = self.node
if path == 'any':
return db.db.count(source, path, sink, 0)
elif path == 'members':
for p in self.memberProps(db):
if db.related(source, p, sink, False):
return True
return False
elif path == 'self':
return (source == sink)
elif path == 'p-of-s':
return db.db.count(source, sink, 0, 0)
elif path == 'p-of-o':
return db.db.count(0, sink, source, 0)
else:
raise UnknownToken(path)
class PathFilter(SpecialPathNode):
def match(self, db, node): raise Error("No 'match' method defined for %s" % (self))
def values(self, db, node, reasoner=False):
return (node if self.match(db, node) else [])
class StringFilter(PathFilter):
def __init__(self, node):
super(StringFilter, self).__init__(node)
self.re = re.compile(node)
def match(self, db, node):
str = db.info(node)
return self.re.search(str if node > 0 else str[0])
class Walker(object):
def __init__(self, db, fsa):
self.db = db
self.fsa = fsa
self.states = {}
def visited(self, node, i):
if not node in self.states:
self.states[node] = []
return False
else:
return i in self.states[node]
def walk(self, node, i=0):
if not self.visited(node, i):
self.states[node].append(i)
state = self.fsa.fsa[i]
if state.terminal and self.collect(node):
return True
if isinstance(node, int):
for tr in state.transitions:
for val in self.db.values(node, tr.input, False):
if self.walk(val, tr.index):
return True
return False
class Collector(Walker):
def __init__(self, db, fsa):
super(Collector, self).__init__(db, fsa)
self.results = set()
def collect(self, node):
self.results.add(node)
return False
class Reacher(Walker):
def __init__(self, db, fsa, sink):
super(Reacher, self).__init__(db, fsa)
self.sink = sink
def collect(self, node):
return (node == self.sink)
class PathFSA:
def __init__(self, path):
self.expr = path
self.inputs = []
self.states = []
self.fsa = self.construct()
def __repr__(self): return "<fsa %s>" % self.expr
def decorate(self, x):
if not isinstance(x, list):
node = set([self.PathNode(x)])
if not x in self.inputs:
self.inputs = [x] + self.inputs
return (node, node, False)
else:
op = x[0]
(first, last, null) = self.decorate(x[1])
if op == 'seq':
(first2, last2, null2) = self.decorate(x[2])
self.addFollowers(last, first2)
return ((first | first2) if null else first,
(last | last2) if null2 else last2,
null and null2)
elif op == 'seq+':
(first2, last2, null2) = self.decorate(x[2])
self.addFollowers(last, first2)
return ((first | first2) if null else first, last | last2, null)
elif op == 'or':
(first2, last2, null2) = self.decorate(x[2])
return (first | first2, last | last2, null or null2)
elif op == 'rep*':
self.addFollowers(last, first)
return (first, last, True)
def addFollowers(self, f, t):
for i in f:
i.follows |= t
def addState(self, positions):
for item in self.states:
if item.positions == positions:
return self.states.index(item)
self.states.append(self.TempState(positions))
return len(self.states)-1
def construct(self):
self.addState(self.decorate(['seq', self.expr, None])[0])
i = 0
while (i < len(self.states)):
state = self.states[i]
for input in self.inputs:
positions = set()
for p in state.positions:
if p.link is input:
positions |= p.follows
if positions:
j = self.addState(positions)
state.transitions = [self.Transition(input, j)] + state.transitions
i += 1
result = []
for s in self.states:
terminal = False
for n in s.positions:
if n.link == None:
terminal = True
break
result.append(self.State(terminal, s.transitions[::-1]))
return result
class PathNode:
def __init__(self, link): (self.link, self.follows) = (link, set())
def __repr__(self): return "<node %s>" % (self.link)
class TempState:
def __init__(self, positions):
self.positions = positions
self.transitions = []
class Transition:
def __init__(self, input, index): (self.input, self.index) = (input, index)
def __repr__(self): return "<trans %s-->%s>" % (self.input, self.index)
class State:
def __init__(self, terminal, transitions):
self.terminal = terminal
self.transitions = transitions
def __repr__(self): return "<state %s, %s>" % (self.transitions, self.terminal)
class LiteralParser:
def __init__(self, db):
self.db = db
dts = {'xsd:string': lambda self, c: c,
'xsd:boolean': lambda self, c: self.parseBoolean(c),
'xsd:float': lambda self, c: float(c),
'xsd:double': lambda self, c: float(c),
'xsd:dateTime': lambda self, c: self.parseDateTime(c),
'xsd:date': lambda self, c: self.parseDateTime(c, False),
'xsd:normalizedString': lambda self, c: self.parseNormalizedString(c),
'xsd:integer': lambda self, c: int(c),
'xsd:int': lambda self, c: int(c) }
self.datatypes = {}
for dt in dts:
self.datatypes[self.db[dt]] = dts[dt]
self.iso8601 = iso8601.iso8601()
def parseLiteral(self, literal, datatype):
(contents, dt, lang) = self.db.info(literal)
return (self.datatypes.get(datatype or dt, lambda s, c: c))(self, contents)
def parseBoolean(self, contents):
if contents == "1" or contents == "true":
return True
elif contents == "0" or contents == "false":
return False
else:
raise Error("Illegal xsd:boolean value '%'" % (contents))
def parseNormalizedString(self, contents):
# must write this, someday...
return contents
def parseDateTime(self, contents, includeTime=True):
(date, hasTime) = self.iso8601.parse(contents)
if not date:
raise Error("Unable parse '%s' as a date" % contents)
elif includeTime != hasTime:
raise Error("Parsed time '%s' does not match datatype" % contents)
else:
return date
| gpl-2.0 |
ibab/tensorflow | tensorflow/contrib/linear_optimizer/python/kernel_tests/sdca_ops_test.py | 8 | 30879 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SdcaModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.platform import googletest
_MAX_ITERATIONS = 100
def make_example_proto(feature_dict, target, value=1.0):
e = tf.train.Example()
features = e.features
features.feature['target'].float_list.value.append(target)
for key, values in feature_dict.items():
features.feature[key + '_indices'].int64_list.value.extend(values)
features.feature[key + '_values'].float_list.value.extend([value] *
len(values))
return e
def make_example_dict(example_protos, example_weights):
def parse_examples(example_protos):
features = {
'target': tf.FixedLenFeature(shape=[1],
dtype=tf.float32,
default_value=0),
'age_indices': tf.VarLenFeature(dtype=tf.int64),
'age_values': tf.VarLenFeature(dtype=tf.float32),
'gender_indices': tf.VarLenFeature(dtype=tf.int64),
'gender_values': tf.VarLenFeature(dtype=tf.float32)
}
return tf.parse_example(
[e.SerializeToString() for e in example_protos], features)
sparse_merge = lambda ids, values: tf.sparse_merge(ids, values, ids.shape[1])
parsed = parse_examples(example_protos)
sparse_features = [
sparse_merge(parsed['age_indices'], parsed['age_values']),
sparse_merge(parsed['gender_indices'], parsed['gender_values'])
]
return dict(sparse_features=sparse_features,
dense_features=[],
example_weights=example_weights,
example_labels=tf.reshape(parsed['target'], [-1]),
example_ids=['%d' % i for i in xrange(0, len(example_protos))])
def make_dense_examples_dict(dense_feature_values, weights, labels):
dense_feature_tensors = ([
tf.convert_to_tensor(values,
dtype=tf.float32) for values in dense_feature_values
])
return dict(sparse_features=[],
dense_features=dense_feature_tensors,
example_weights=weights,
example_labels=labels,
example_ids=['%d' % i for i in xrange(0, len(labels))])
def make_variable_dict(max_age, max_gender):
# TODO(sibyl-toe9oF2e): Figure out how to derive max_age & max_gender from
# examples_dict.
age_weights = tf.Variable(tf.zeros([max_age + 1], dtype=tf.float32))
gender_weights = tf.Variable(tf.zeros([max_gender + 1], dtype=tf.float32))
return dict(sparse_features_weights=[age_weights, gender_weights],
dense_features_weights=[])
def make_dense_variable_dict(num_dense_features, num_examples):
feature_weights = ([
tf.Variable(tf.zeros([1],
dtype=tf.float32))
for _ in xrange(0, num_dense_features)
])
return dict(sparse_features_weights=[],
dense_features_weights=feature_weights,
dual=tf.Variable(tf.zeros(
[num_examples],
dtype=tf.float32)),
primal_loss=tf.Variable(tf.zeros(
[],
dtype=tf.float64)))
def get_binary_predictions_for_logistic(predictions, cutoff=0.5):
return tf.cast(
tf.greater_equal(predictions, tf.ones_like(predictions) * cutoff),
dtype=tf.int32)
def get_binary_predictions_for_hinge(predictions):
return tf.cast(
tf.greater_equal(predictions, tf.zeros_like(predictions)),
dtype=tf.int32)
# Setup the single container shared across all tests. This is testing proper
# isolation across optimizers instantiated in each of the tests below.
CONTAINER = uuid.uuid4().hex
# Clear the shared container.
def tearDown():
# TODO(sibyl-Mooth6ku): Proper cleanup of Containers when possible.
pass
# TODO(sibyl-Mooth6ku): Add tests that exercise L1 and Shrinking.
# TODO(sibyl-vie3Poto): Refactor tests to avoid repetition of boilerplate code.
class SdcaOptimizerTest(TensorFlowTestCase):
"""Base SDCA optimizer test class for any loss type."""
def _single_threaded_test_session(self):
config = tf.ConfigProto(inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
return self.test_session(use_gpu=False, config=config)
# The following tests, check that operations raise errors when certain
# preconditions on the input data are not satisfied. These errors are raised
# regardless of the loss type.
def testNoWeightedExamples(self):
# Setup test data with 1 positive, and 1 negative example.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
# Zeroed out example weights.
example_weights = [0.0, 0.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
self.assertAllClose([0.5, 0.5], lr.predictions(examples).eval())
lr.minimize().run()
self.assertAllClose([0.5, 0.5], lr.predictions(examples).eval())
with self.assertRaisesOpError(
'No examples found or all examples have zero weight.'):
lr.approximate_duality_gap().eval()
class SdcaWithLogisticLossTest(SdcaOptimizerTest):
"""SDCA optimizer test class for logistic loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in xrange(_MAX_ITERATIONS):
train_op.run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
def testSimpleNoL2(self):
# Same as test above (so comments from above apply) but without an L2.
# The algorithm should behave as if we have an L2 of 1 in optimization but
# 0 in regularized_loss.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=0,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in xrange(_MAX_ITERATIONS):
train_op.run()
# There is neither L1 nor L2 loss, so regularized and unregularized losses
# should be exactly the same.
self.assertAllClose(0.40244, unregularized_loss.eval(), atol=0.01)
self.assertAllClose(0.40244, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
def testSomeUnweightedExamples(self):
# Setup test data with 4 examples, but should produce the same
# results as testSimple.
example_protos = [
# Will be used.
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
# Will be ignored.
make_example_proto(
{'age': [1],
'gender': [0]}, 0),
# Will be used.
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
# Will be ignored.
make_example_proto(
{'age': [1],
'gender': [0]}, 1),
]
example_weights = [1.0, 0.0, 1.0, 0.0]
with self._single_threaded_test_session():
# Only use examples 0 and 2
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in xrange(_MAX_ITERATIONS):
train_op.run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
def testFractionalExampleLabel(self):
# Setup test data with 1 positive, and 1 mostly-negative example.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0.1),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
with self.assertRaisesOpError(
'Only labels of 0.0 or 1.0 are supported right now.'):
lr.minimize().run()
def testImbalanced(self):
# Setup test data with 1 positive, and 3 negative examples.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [2],
'gender': [0]}, 0),
make_example_proto(
{'age': [3],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(3, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in xrange(_MAX_ITERATIONS):
train_op.run()
self.assertAllClose(0.226487 + 0.102902,
unregularized_loss.eval(),
atol=0.08)
self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
def testImbalancedWithExampleWeights(self):
# Setup test data with 1 positive, and 1 negative example.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [3.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in xrange(_MAX_ITERATIONS):
train_op.run()
self.assertAllClose(0.284860, unregularized_loss.eval(), atol=0.08)
self.assertAllClose(0.408044, loss.eval(), atol=0.012)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
def testInstancesOfOneClassOnly(self):
# Setup test data with 1 positive (ignored), and 1 negative example.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [0]}, 1), # Shares gender with the instance above.
]
example_weights = [1.0, 0.0] # Second example "omitted" from training.
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in xrange(_MAX_ITERATIONS):
train_op.run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0], predicted_labels.eval())
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
# TODO(katsiaspis): add a test for the case when examples at the end of an
# epoch are repeated, since example id may be duplicated.
class SdcaWithLinearLossTest(SdcaOptimizerTest):
"""SDCA optimizer test class for linear (squared) loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in xrange(_MAX_ITERATIONS):
train_op.run()
# Predictions should be 2/3 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
self.assertAllClose([-20.0 / 3.0, 28.0 / 3.0],
predictions.eval(),
rtol=0.005)
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
def testL2Regularization(self):
# Setup test data
example_protos = [
# 2 identical examples
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
# 2 more identical examples
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=16,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in xrange(_MAX_ITERATIONS):
train_op.run()
# Predictions should be 1/5 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 + L2 * 16 * weight^2
optimal1 = -10.0 / 5.0
optimal2 = 14.0 / 5.0
self.assertAllClose(
[optimal1, optimal1, optimal2, optimal2],
predictions.eval(),
rtol=0.01)
def testL1Regularization(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=4.0,
loss_type='squared_loss')
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
prediction = lr.predictions(examples)
loss = lr.regularized_loss(examples)
train_op = lr.minimize()
for _ in xrange(_MAX_ITERATIONS):
train_op.run()
# Predictions should be -4.0, 48/5 due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2 + L1 * 4 * weight
self.assertAllClose([-4.0, 20.0 / 3.0], prediction.eval(), rtol=0.08)
# Loss should be the sum of the regularized loss value from above per
# example after plugging in the optimal weights.
self.assertAllClose(308.0 / 6.0, loss.eval(), atol=0.01)
def testFeatureValues(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0, -2.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0, 2.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in xrange(_MAX_ITERATIONS):
train_op.run()
# Predictions should be 8/9 of label due to minimizing regularized loss:
# (label - 2 * 2 * weight)^2 / 2 + L2 * 2 * weight^2
self.assertAllClose([-10.0 * 8 / 9, 14.0 * 8 / 9],
predictions.eval(),
rtol=0.07)
def testDenseFeatures(self):
with self._single_threaded_test_session():
examples = make_dense_examples_dict(
dense_feature_values=[[-2.0, 0.0], [0.0, 2.0]],
weights=[1.0, 1.0],
labels=[-10.0, 14.0])
variables = make_dense_variable_dict(2, 2)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in xrange(_MAX_ITERATIONS):
train_op.run()
# Predictions should be 4/5 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * weight^2
self.assertAllClose([-10.0 * 4 / 5, 14.0 * 4 / 5],
predictions.eval(),
rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(148.0 / 10.0, loss.eval(), atol=0.01)
class SdcaWithHingeLossTest(SdcaOptimizerTest):
"""SDCA optimizer test class for hinge loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
example_weights = [1.0, 1.0]
with self.test_session(use_gpu=False):
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
predictions = model.predictions(examples)
self.assertAllClose([0.0, 0.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# After minimization, the model separates perfectly the data points. There
# are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
# and w4). Solving the system w1 + w3 = 1.0, w2 + w4 = -1.0 and minimizing
# wrt to \|\vec{w}\|_2, gives w1=w3=1/2 and w2=w4=-1/2. This gives 0.0
# unregularized loss and 0.25 L2 loss.
train_op = model.minimize()
for _ in xrange(_MAX_ITERATIONS):
train_op.run()
binary_predictions = get_binary_predictions_for_hinge(predictions)
self.assertAllEqual([-1.0, 1.0], predictions.eval())
self.assertAllEqual([0, 1], binary_predictions.eval())
self.assertAllClose(0.0, unregularized_loss.eval())
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.05)
def testDenseFeaturesPerfectlySeparable(self):
with self._single_threaded_test_session():
examples = make_dense_examples_dict(
dense_feature_values=[[1.0, 1.0], [1.0, -1.0]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
variables = make_dense_variable_dict(2, 2)
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in xrange(_MAX_ITERATIONS):
train_op.run()
self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
# (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is,
# the SVM's functional margin >=1), so the unregularized loss is ~0.0.
# There is only loss due to l2-regularization. For these datapoints, it
# turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25.
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesSeparableWithinMargins(self):
with self._single_threaded_test_session():
examples = make_dense_examples_dict(
dense_feature_values=[[1.0, 1.0], [0.5, -0.5]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
variables = make_dense_variable_dict(2, 2)
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in xrange(_MAX_ITERATIONS):
train_op.run()
# (1.0, 0.5) and (1.0, -0.5) are separable by x-axis but the datapoints
# are within the margins so there is unregularized loss (1/2 per example).
# For these datapoints, optimal weights are w_1~=0.0 and w_2~=1.0 which
# gives an L2 loss of ~0.25.
self.assertAllClose([0.5, -0.5], predictions.eval(), rtol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.5, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.75, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesWeightedExamples(self):
with self._single_threaded_test_session():
examples = make_dense_examples_dict(
dense_feature_values=[[1.0, 1.0], [0.5, -0.5]],
weights=[3.0, 1.0],
labels=[1.0, 0.0])
variables = make_dense_variable_dict(2, 2)
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in xrange(_MAX_ITERATIONS):
train_op.run()
# Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
# try to increase the margin from (1.0, 0.5). Due to regularization,
# (1.0, -0.5) will be within the margin. For these points and example
# weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
# loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
# correct, but the boundary will be much closer to the 2nd point than the
# first one.
self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
mitsuhiko/babel | scripts/make-release.py | 4 | 4149 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
make-release
~~~~~~~~~~~~
Helper script that performs a release. Does pretty much everything
automatically for us.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import re
from datetime import datetime, date
from subprocess import Popen, PIPE
_date_clean_re = re.compile(r'(\d+)(st|nd|rd|th)')
def parse_changelog():
with open('CHANGES') as f:
lineiter = iter(f)
for line in lineiter:
match = re.search(r'^Version\s+(.*)', line.strip())
if match is None:
continue
version = match.group(1).strip()
if lineiter.next().count('-') != len(match.group(0)):
continue
while 1:
change_info = lineiter.next().strip()
if change_info:
break
match = re.search(r'released on (\w+\s+\d+\w+\s+\d+)'
r'(?:, codename (.*))?', change_info,
flags=re.IGNORECASE)
if match is None:
continue
datestr, codename = match.groups()
return version, parse_date(datestr), codename
def bump_version(version):
try:
parts = map(int, version.split('.'))
except ValueError:
fail('Current version is not numeric')
if parts[-1] != 0:
parts[-1] += 1
else:
parts[0] += 1
return '.'.join(map(str, parts))
def parse_date(string):
string = _date_clean_re.sub(r'\1', string)
return datetime.strptime(string, '%B %d %Y')
def set_filename_version(filename, version_number, pattern):
changed = []
def inject_version(match):
before, old, after = match.groups()
changed.append(True)
return before + version_number + after
with open(filename) as f:
contents = re.sub(r"^(\s*%s\s*=\s*')(.+?)(')" % pattern,
inject_version, f.read(),
flags=re.DOTALL | re.MULTILINE)
if not changed:
fail('Could not find %s in %s', pattern, filename)
with open(filename, 'w') as f:
f.write(contents)
def set_init_version(version):
info('Setting __init__.py version to %s', version)
set_filename_version('babel/__init__.py', version, '__version__')
def set_setup_version(version):
info('Setting setup.py version to %s', version)
set_filename_version('setup.py', version, 'version')
def build_and_upload():
Popen([sys.executable, 'setup.py', 'release', 'sdist', 'upload']).wait()
def fail(message, *args):
print >> sys.stderr, 'Error:', message % args
sys.exit(1)
def info(message, *args):
print >> sys.stderr, message % args
def get_git_tags():
return set(Popen(['git', 'tag'], stdout=PIPE).communicate()[0].splitlines())
def git_is_clean():
return Popen(['git', 'diff', '--quiet']).wait() == 0
def make_git_commit(message, *args):
message = message % args
Popen(['git', 'commit', '-am', message]).wait()
def make_git_tag(tag):
info('Tagging "%s"', tag)
Popen(['git', 'tag', tag]).wait()
def main():
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
rv = parse_changelog()
if rv is None:
fail('Could not parse changelog')
version, release_date, codename = rv
dev_version = bump_version(version) + '-dev'
info('Releasing %s (codename %s, release date %s)',
version, codename, release_date.strftime('%d/%m/%Y'))
tags = get_git_tags()
if version in tags:
fail('Version "%s" is already tagged', version)
if release_date.date() != date.today():
fail('Release date is not today (%s != %s)')
if not git_is_clean():
fail('You have uncommitted changes in git')
set_init_version(version)
set_setup_version(version)
make_git_commit('Bump version number to %s', version)
make_git_tag(version)
build_and_upload()
set_init_version(dev_version)
set_setup_version(dev_version)
if __name__ == '__main__':
main()
| bsd-3-clause |
ryano144/intellij-community | python/lib/Lib/pydoc.py | 69 | 90393 | #!/usr/bin/env python
# -*- coding: Latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://www.python.org/doc/current/lib/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <[email protected]>"
__date__ = "26 February 2001"
__version__ = "$Revision: 54366 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
if _re_stripid.search(repr(Exception)):
return _re_stripid.sub(r'\1', text)
return text
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
if name in ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__'): return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup((name, kind, cls, value)):
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo', '$py.class'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (0, None))
if lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, (exc, value, tb)):
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and \
split(lower(str(value)))[:2] == ['no', 'module']:
# The module was not found.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"http://www.python.org/doc/current/lib")
basedir = os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages'))))):
htmlfile = "module-%s.html" % object.__name__
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), htmlfile)
else:
docloc = os.path.join(docloc, htmlfile)
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''
<!doctype html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)/cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100/cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, (name, path, ispackage, shadowed)):
"""Make a link for a module or package to display in an index."""
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/peps/pep-%04d.html' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda (key, value), s=self: s.modulelink(value))
result = result + self.bigsection(
'Modules', '#fffff', '#aa55cc', contents)
if classes:
classlist = map(lambda (key, value): value, classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name), name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if callable(value) or inspect.isdatadescriptor(value):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda (name, kind, cls, value): visiblename(name),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
value = getattr(object, key)
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
if classes:
classlist = map(lambda (key, value): value, classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
return result
def docclass(self, object, name=None, mod=None):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name),
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if callable(value) or inspect.isdatadescriptor(value):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda (name, kind, cls, value): visiblename(name),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
filter(lambda t: not t[0].startswith('_'), attrs)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if sys.platform.startswith('java'):
return plainpager
if type(sys.stdout) is not types.FileType:
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more %s' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' ' + filename)
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = split(plain(text), '\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
for part in parts[n:]:
try: object = getattr(object, part)
except AttributeError: return None
return object
else:
if hasattr(__builtin__, path):
return getattr(__builtin__, path)
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
return thing, getattr(thing, '__name__', None)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
pager(title % desc + '\n\n' + text.document(object, name))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('ref/assert', ''),
'break': ('ref/break', 'while for'),
'class': ('ref/class', 'CLASSES SPECIALMETHODS'),
'continue': ('ref/continue', 'while for'),
'def': ('ref/function', ''),
'del': ('ref/del', 'BASICMETHODS'),
'elif': 'if',
'else': ('ref/if', 'while for'),
'except': 'try',
'exec': ('ref/exec', ''),
'finally': 'try',
'for': ('ref/for', 'break continue while'),
'from': 'import',
'global': ('ref/global', 'NAMESPACES'),
'if': ('ref/if', 'TRUTHVALUE'),
'import': ('ref/import', 'MODULES'),
'in': ('ref/comparisons', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('ref/lambdas', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('ref/pass', ''),
'print': ('ref/print', ''),
'raise': ('ref/raise', 'EXCEPTIONS'),
'return': ('ref/return', 'FUNCTIONS'),
'try': ('ref/try', 'EXCEPTIONS'),
'while': ('ref/while', 'break continue if TRUTHVALUE'),
'with': ('ref/with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('ref/yield', ''),
}
topics = {
'TYPES': ('ref/types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('ref/strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING TYPES'),
'STRINGMETHODS': ('lib/string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('lib/typesseq-strings', 'OPERATORS'),
'UNICODE': ('ref/strings', 'encodings unicode SEQUENCES STRINGMETHODS FORMATTING TYPES'),
'NUMBERS': ('ref/numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('ref/integers', 'int range'),
'FLOAT': ('ref/floating', 'float math'),
'COMPLEX': ('ref/imaginary', 'complex cmath'),
'SEQUENCES': ('lib/typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('lib/typesfunctions', 'def TYPES'),
'METHODS': ('lib/typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('lib/bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('lib/bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('lib/bltin-null-object', ''),
'ELLIPSIS': ('lib/bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('lib/bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('lib/specialattrs', ''),
'CLASSES': ('ref/types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('lib/typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('ref/summary', 'lambda or and not in is BOOLEAN COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('ref/objects', 'TYPES'),
'SPECIALMETHODS': ('ref/specialnames', 'BASICMETHODS ATTRIBUTEMETHODS CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('ref/customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('ref/attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('ref/callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('ref/sequence-types', 'SEQUENCES SEQUENCEMETHODS2 SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('ref/sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 SPECIALMETHODS'),
'MAPPINGMETHODS': ('ref/sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('ref/numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT SPECIALMETHODS'),
'EXECUTION': ('ref/execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('ref/naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('ref/dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('ref/exceptions', 'try except finally raise'),
'COERCIONS': ('ref/coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('ref/conversions', 'COERCIONS'),
'IDENTIFIERS': ('ref/identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('ref/id-classes', ''),
'PRIVATENAMES': ('ref/atom-identifiers', ''),
'LITERALS': ('ref/atom-literals', 'STRINGS BACKQUOTES NUMBERS TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('ref/exprlists', 'TUPLES LITERALS'),
'LISTS': ('lib/typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('ref/lists', 'LISTS LITERALS'),
'DICTIONARIES': ('lib/typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('ref/dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('ref/string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('ref/attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('ref/subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('ref/slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('ref/calls', 'EXPRESSIONS'),
'POWER': ('ref/power', 'EXPRESSIONS'),
'UNARY': ('ref/unary', 'EXPRESSIONS'),
'BINARY': ('ref/binary', 'EXPRESSIONS'),
'SHIFTING': ('ref/shifting', 'EXPRESSIONS'),
'BITWISE': ('ref/bitwise', 'EXPRESSIONS'),
'COMPARISON': ('ref/comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('ref/Booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('ref/assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('ref/augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('ref/compound', 'for while break continue'),
'TRUTHVALUE': ('lib/truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('lib/module-pdb', 'pdb'),
'CONTEXTMANAGERS': ('ref/context-managers', 'with'),
}
def __init__(self, input, output):
self.input = input
self.output = output
self.docdir = None
if sys.executable is None:
execdir = os.getcwd()
else:
execdir = os.path.dirname(sys.executable)
homedir = os.environ.get('PYTHONHOME')
for dir in [os.environ.get('PYTHONDOCS'),
homedir and os.path.join(homedir, 'doc'),
os.path.join(execdir, 'doc'),
'/usr/doc/python-docs-' + split(sys.version)[0],
'/usr/doc/python-' + split(sys.version)[0],
'/usr/doc/python-docs-' + sys.version[:3],
'/usr/doc/python-' + sys.version[:3],
os.path.join(sys.prefix, 'Resources/English.lproj/Documentation')]:
if dir and os.path.isdir(os.path.join(dir, 'lib')):
self.docdir = dir
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
def __call__(self, request=None):
if request is not None:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://www.python.org/doc/tut/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % sys.version[:3])
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic):
if not self.docdir:
self.output.write('''
Sorry, topic and keyword documentation is not available because the Python
HTML documentation files could not be found. If you have installed them,
please set the environment variable PYTHONDOCS to indicate their location.
On the Microsoft Windows operating system, the files can be built by
running "hh -decompile . PythonNN.chm" in the C:\PythonNN\Doc> directory.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target)
filename, xrefs = target
filename = self.docdir + '/' + filename + '.html'
try:
file = open(filename)
except:
self.output.write('could not read docs from %s\n' % filename)
return
divpat = re.compile('<div[^>]*navigat.*?</div.*?>', re.I | re.S)
addrpat = re.compile('<address.*?>.*?</address.*?>', re.I | re.S)
document = re.sub(addrpat, '', re.sub(divpat, '', file.read()))
file.close()
import htmllib, formatter, StringIO
buffer = StringIO.StringIO()
parser = htmllib.HTMLParser(
formatter.AbstractFormatter(formatter.DumbWriter(buffer)))
parser.start_table = parser.do_p
parser.end_table = lambda parser=parser: parser.do_p({})
parser.start_tr = parser.do_br
parser.start_td = parser.start_th = lambda a, b=buffer: b.write('\t')
parser.feed(document)
buffer = replace(buffer.getvalue(), '\xa0', ' ', '\n', '\n ')
pager(' ' + strip(buffer) + '\n')
if xrefs:
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
ModuleScanner().run(callback)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper(sys.stdin, sys.stdout)
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
desc = split(__import__(modname).__doc__ or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages():
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
try: import warnings
except ImportError: pass
else: warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <[email protected]></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost'
self.address = ('', port)
self.url = 'http://%s:%d/' % (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import sys
if sys.platform.startswith('java'):
from select import cpython_compatible_select as select
else:
from select import select
self.quit = False
while not self.quit:
rd, wr, ex = select([self.socket], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
elif sys.platform == 'mac':
try: import ic
except ImportError: pass
else: ic.launchurl(url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default.
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
| apache-2.0 |
matmutant/sl4a | python/src/Lib/test/mapping_tests.py | 56 | 22395 | # tests common to dict and UserDict
import unittest
import UserDict
class BasicTestMappingProtocol(unittest.TestCase):
# This base class can be used to check that an object conforms to the
# mapping protocol
# Functions that can be useful to override to adapt to dictionary
# semantics
type2test = None # which class is being tested (overwrite in subclasses)
def _reference(self):
"""Return a dictionary of values which are invariant by storage
in the object under test."""
return {1:2, "key1":"value1", "key2":(1,2,3)}
def _empty_mapping(self):
"""Return an empty mapping object"""
return self.type2test()
def _full_mapping(self, data):
"""Return a mapping object with the value contained in data
dictionary"""
x = self._empty_mapping()
for key, value in data.items():
x[key] = value
return x
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
self.reference = self._reference().copy()
# A (key, value) pair not in the mapping
key, value = self.reference.popitem()
self.other = {key:value}
# A (key, value) pair in the mapping
key, value = self.reference.popitem()
self.inmapping = {key:value}
self.reference[key] = value
def test_read(self):
# Test for read only operations on mapping
p = self._empty_mapping()
p1 = dict(p) #workaround for singleton objects
d = self._full_mapping(self.reference)
if d is p:
p = p1
#Indexing
for key, value in self.reference.items():
self.assertEqual(d[key], value)
knownkey = self.other.keys()[0]
self.failUnlessRaises(KeyError, lambda:d[knownkey])
#len
self.assertEqual(len(p), 0)
self.assertEqual(len(d), len(self.reference))
#has_key
for k in self.reference:
self.assert_(d.has_key(k))
self.assert_(k in d)
for k in self.other:
self.failIf(d.has_key(k))
self.failIf(k in d)
#cmp
self.assertEqual(cmp(p,p), 0)
self.assertEqual(cmp(d,d), 0)
self.assertEqual(cmp(p,d), -1)
self.assertEqual(cmp(d,p), 1)
#__non__zero__
if p: self.fail("Empty mapping must compare to False")
if not d: self.fail("Full mapping must compare to True")
# keys(), items(), iterkeys() ...
def check_iterandlist(iter, lst, ref):
self.assert_(hasattr(iter, 'next'))
self.assert_(hasattr(iter, '__iter__'))
x = list(iter)
self.assert_(set(x)==set(lst)==set(ref))
check_iterandlist(d.iterkeys(), d.keys(), self.reference.keys())
check_iterandlist(iter(d), d.keys(), self.reference.keys())
check_iterandlist(d.itervalues(), d.values(), self.reference.values())
check_iterandlist(d.iteritems(), d.items(), self.reference.items())
#get
key, value = d.iteritems().next()
knownkey, knownvalue = self.other.iteritems().next()
self.assertEqual(d.get(key, knownvalue), value)
self.assertEqual(d.get(knownkey, knownvalue), knownvalue)
self.failIf(knownkey in d)
def test_write(self):
# Test for write operations on mapping
p = self._empty_mapping()
#Indexing
for key, value in self.reference.items():
p[key] = value
self.assertEqual(p[key], value)
for key in self.reference.keys():
del p[key]
self.failUnlessRaises(KeyError, lambda:p[key])
p = self._empty_mapping()
#update
p.update(self.reference)
self.assertEqual(dict(p), self.reference)
items = p.items()
p = self._empty_mapping()
p.update(items)
self.assertEqual(dict(p), self.reference)
d = self._full_mapping(self.reference)
#setdefault
key, value = d.iteritems().next()
knownkey, knownvalue = self.other.iteritems().next()
self.assertEqual(d.setdefault(key, knownvalue), value)
self.assertEqual(d[key], value)
self.assertEqual(d.setdefault(knownkey, knownvalue), knownvalue)
self.assertEqual(d[knownkey], knownvalue)
#pop
self.assertEqual(d.pop(knownkey), knownvalue)
self.failIf(knownkey in d)
self.assertRaises(KeyError, d.pop, knownkey)
default = 909
d[knownkey] = knownvalue
self.assertEqual(d.pop(knownkey, default), knownvalue)
self.failIf(knownkey in d)
self.assertEqual(d.pop(knownkey, default), default)
#popitem
key, value = d.popitem()
self.failIf(key in d)
self.assertEqual(value, self.reference[key])
p=self._empty_mapping()
self.assertRaises(KeyError, p.popitem)
def test_constructor(self):
self.assertEqual(self._empty_mapping(), self._empty_mapping())
def test_bool(self):
self.assert_(not self._empty_mapping())
self.assert_(self.reference)
self.assert_(bool(self._empty_mapping()) is False)
self.assert_(bool(self.reference) is True)
def test_keys(self):
d = self._empty_mapping()
self.assertEqual(d.keys(), [])
d = self.reference
self.assert_(self.inmapping.keys()[0] in d.keys())
self.assert_(self.other.keys()[0] not in d.keys())
self.assertRaises(TypeError, d.keys, None)
def test_values(self):
d = self._empty_mapping()
self.assertEqual(d.values(), [])
self.assertRaises(TypeError, d.values, None)
def test_items(self):
d = self._empty_mapping()
self.assertEqual(d.items(), [])
self.assertRaises(TypeError, d.items, None)
def test_len(self):
d = self._empty_mapping()
self.assertEqual(len(d), 0)
def test_getitem(self):
d = self.reference
self.assertEqual(d[self.inmapping.keys()[0]], self.inmapping.values()[0])
self.assertRaises(TypeError, d.__getitem__)
def test_update(self):
# mapping argument
d = self._empty_mapping()
d.update(self.other)
self.assertEqual(d.items(), self.other.items())
# No argument
d = self._empty_mapping()
d.update()
self.assertEqual(d, self._empty_mapping())
# item sequence
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(d.items(), self.other.items())
# Iterator
d = self._empty_mapping()
d.update(self.other.iteritems())
self.assertEqual(d.items(), self.other.items())
# FIXME: Doesn't work with UserDict
# self.assertRaises((TypeError, AttributeError), d.update, None)
self.assertRaises((TypeError, AttributeError), d.update, 42)
outerself = self
class SimpleUserDict:
def __init__(self):
self.d = outerself.reference
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
i1 = d.items()
i2 = self.reference.items()
i1.sort()
i2.sort()
self.assertEqual(i1, i2)
class Exc(Exception): pass
d = self._empty_mapping()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d.clear()
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def next(self):
if self.i:
self.i = 0
return 'a'
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def next(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d = self._empty_mapping()
class badseq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
self.assertRaises(Exc, d.update, badseq())
self.assertRaises(ValueError, d.update, [(1, 2, 3)])
# no test_fromkeys or test_copy as both os.environ and selves don't support it
def test_get(self):
d = self._empty_mapping()
self.assert_(d.get(self.other.keys()[0]) is None)
self.assertEqual(d.get(self.other.keys()[0], 3), 3)
d = self.reference
self.assert_(d.get(self.other.keys()[0]) is None)
self.assertEqual(d.get(self.other.keys()[0], 3), 3)
self.assertEqual(d.get(self.inmapping.keys()[0]), self.inmapping.values()[0])
self.assertEqual(d.get(self.inmapping.keys()[0], 3), self.inmapping.values()[0])
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
d = self._empty_mapping()
self.assertRaises(TypeError, d.setdefault)
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
self.assertRaises(TypeError, d.popitem, 42)
def test_pop(self):
d = self._empty_mapping()
k, v = self.inmapping.items()[0]
d[k] = v
self.assertRaises(KeyError, d.pop, self.other.keys()[0])
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
class TestMappingProtocol(BasicTestMappingProtocol):
def test_constructor(self):
BasicTestMappingProtocol.test_constructor(self)
self.assert_(self._empty_mapping() is not self._empty_mapping())
self.assertEqual(self.type2test(x=1, y=2), {"x": 1, "y": 2})
def test_bool(self):
BasicTestMappingProtocol.test_bool(self)
self.assert_(not self._empty_mapping())
self.assert_(self._full_mapping({"x": "y"}))
self.assert_(bool(self._empty_mapping()) is False)
self.assert_(bool(self._full_mapping({"x": "y"})) is True)
def test_keys(self):
BasicTestMappingProtocol.test_keys(self)
d = self._empty_mapping()
self.assertEqual(d.keys(), [])
d = self._full_mapping({'a': 1, 'b': 2})
k = d.keys()
self.assert_('a' in k)
self.assert_('b' in k)
self.assert_('c' not in k)
def test_values(self):
BasicTestMappingProtocol.test_values(self)
d = self._full_mapping({1:2})
self.assertEqual(d.values(), [2])
def test_items(self):
BasicTestMappingProtocol.test_items(self)
d = self._full_mapping({1:2})
self.assertEqual(d.items(), [(1, 2)])
def test_has_key(self):
d = self._empty_mapping()
self.assert_(not d.has_key('a'))
d = self._full_mapping({'a': 1, 'b': 2})
k = d.keys()
k.sort()
self.assertEqual(k, ['a', 'b'])
self.assertRaises(TypeError, d.has_key)
def test_contains(self):
d = self._empty_mapping()
self.assert_(not ('a' in d))
self.assert_('a' not in d)
d = self._full_mapping({'a': 1, 'b': 2})
self.assert_('a' in d)
self.assert_('b' in d)
self.assert_('c' not in d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
BasicTestMappingProtocol.test_len(self)
d = self._full_mapping({'a': 1, 'b': 2})
self.assertEqual(len(d), 2)
def test_getitem(self):
BasicTestMappingProtocol.test_getitem(self)
d = self._full_mapping({'a': 1, 'b': 2})
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
del d['b']
self.assertEqual(d, {'a': 4, 'c': 3})
self.assertRaises(TypeError, d.__getitem__)
def test_clear(self):
d = self._full_mapping({1:1, 2:2, 3:3})
d.clear()
self.assertEqual(d, {})
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
BasicTestMappingProtocol.test_update(self)
# mapping argument
d = self._empty_mapping()
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
self.assertEqual(d, {1:1, 2:2, 3:3})
# no argument
d.update()
self.assertEqual(d, {1:1, 2:2, 3:3})
# keyword arguments
d = self._empty_mapping()
d.update(x=100)
d.update(y=20)
d.update(x=1, y=2, z=3)
self.assertEqual(d, {"x":1, "y":2, "z":3})
# item sequence
d = self._empty_mapping()
d.update([("x", 100), ("y", 20)])
self.assertEqual(d, {"x":100, "y":20})
# Both item sequence and keyword arguments
d = self._empty_mapping()
d.update([("x", 100), ("y", 20)], x=1, y=2)
self.assertEqual(d, {"x":1, "y":2})
# iterator
d = self._full_mapping({1:3, 2:4})
d.update(self._full_mapping({1:2, 3:4, 5:6}).iteritems())
self.assertEqual(d, {1:2, 2:4, 3:4, 5:6})
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {1:1, 2:2, 3:3})
def test_fromkeys(self):
self.assertEqual(self.type2test.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
d = self._empty_mapping()
self.assert_(not(d.fromkeys('abc') is d))
self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1:None})
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(self.type2test): pass
self.assertEqual(dictlike.fromkeys('a'), {'a':None})
self.assertEqual(dictlike().fromkeys('a'), {'a':None})
self.assert_(dictlike.fromkeys('a').__class__ is dictlike)
self.assert_(dictlike().fromkeys('a').__class__ is dictlike)
# FIXME: the following won't work with UserDict, because it's an old style class
# self.assert_(type(dictlike.fromkeys('a')) is dictlike)
class mydict(self.type2test):
def __new__(cls):
return UserDict.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
# FIXME: the following won't work with UserDict, because it's an old style class
# self.assert_(isinstance(ud, UserDict.UserDict))
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception): pass
class baddict1(self.type2test):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
self.assertRaises(Exc, self.type2test.fromkeys, BadSeq())
class baddict2(self.type2test):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
def test_copy(self):
d = self._full_mapping({1:1, 2:2, 3:3})
self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
d = self._empty_mapping()
self.assertEqual(d.copy(), d)
self.assert_(isinstance(d.copy(), d.__class__))
self.assertRaises(TypeError, d.copy, None)
def test_get(self):
BasicTestMappingProtocol.test_get(self)
d = self._empty_mapping()
self.assert_(d.get('c') is None)
self.assertEqual(d.get('c', 3), 3)
d = self._full_mapping({'a' : 1, 'b' : 2})
self.assert_(d.get('c') is None)
self.assertEqual(d.get('c', 3), 3)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('a', 3), 1)
def test_setdefault(self):
BasicTestMappingProtocol.test_setdefault(self)
d = self._empty_mapping()
self.assert_(d.setdefault('key0') is None)
d.setdefault('key0', [])
self.assert_(d.setdefault('key0') is None)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
def test_popitem(self):
BasicTestMappingProtocol.test_popitem(self)
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = self._empty_mapping()
b = self._empty_mapping()
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, int(ka))
kb, vb = tb = b.popitem()
self.assertEqual(vb, int(kb))
self.assert_(not(copymode < 0 and ta != tb))
self.assert_(not a)
self.assert_(not b)
def test_pop(self):
BasicTestMappingProtocol.test_pop(self)
# Tests for pop with specified key
d = self._empty_mapping()
k, v = 'abc', 'def'
# verify longs/ints get same value when key > 32 bits (for 64-bit archs)
# see SF bug #689659
x = 4503599627370496L
y = 4503599627370496
h = self._full_mapping({x: 'anything', y: 'something else'})
self.assertEqual(h[x], h[y])
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
class TestHashMappingProtocol(TestMappingProtocol):
def test_getitem(self):
TestMappingProtocol.test_getitem(self)
class Exc(Exception): pass
class BadEq(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 24
d = self._empty_mapping()
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
d = self._empty_mapping()
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_fromkeys(self):
TestMappingProtocol.test_fromkeys(self)
class mydict(self.type2test):
def __new__(cls):
return UserDict.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assert_(isinstance(ud, UserDict.UserDict))
def test_pop(self):
TestMappingProtocol.test_pop(self)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
d = self._empty_mapping()
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
def test_mutatingiteration(self):
d = self._empty_mapping()
d[1] = 1
try:
for i in d:
d[i+1] = 1
except RuntimeError:
pass
else:
self.fail("changing dict size during iteration doesn't raise Error")
def test_repr(self):
d = self._empty_mapping()
self.assertEqual(repr(d), '{}')
d[1] = 2
self.assertEqual(repr(d), '{1: 2}')
d = self._empty_mapping()
d[1] = d
self.assertEqual(repr(d), '{1: {...}}')
class Exc(Exception): pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = self._full_mapping({1: BadRepr()})
self.assertRaises(Exc, repr, d)
def test_le(self):
self.assert_(not (self._empty_mapping() < self._empty_mapping()))
self.assert_(not (self._full_mapping({1: 2}) < self._full_mapping({1L: 2L})))
class Exc(Exception): pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 42
d1 = self._full_mapping({BadCmp(): 1})
d2 = self._full_mapping({1: 1})
try:
d1 < d2
except Exc:
pass
else:
self.fail("< didn't raise Exc")
def test_setdefault(self):
TestMappingProtocol.test_setdefault(self)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
d = self._empty_mapping()
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
| apache-2.0 |
udrg/kalibr | aslam_offline_calibration/kalibr/python/kalibr_common/ImageDatasetReader.py | 3 | 5820 | import cv_bridge
import cv2
import rosbag
import os
import numpy as np
import pylab as pl
import aslam_cv as acv
import sm
class BagImageDatasetReaderIterator(object):
def __init__(self, dataset, indices=None):
self.dataset = dataset
if indices is None:
self.indices = np.arange(dataset.numImages())
else:
self.indices = indices
self.iter = self.indices.__iter__()
def __iter__(self):
return self
def next(self):
idx = self.iter.next()
return self.dataset.getImage(idx)
class BagImageDatasetReader(object):
def __init__(self, bagfile, imagetopic, bag_from_to=None, perform_synchronization=False):
self.bagfile = bagfile
self.topic = imagetopic
self.perform_synchronization = perform_synchronization
self.bag = rosbag.Bag(bagfile)
self.uncompress = None
if imagetopic is None:
raise RuntimeError(
"Please pass in a topic name referring to the image stream in the bag file\n{0}".format(self.bag))
self.CVB = cv_bridge.CvBridge()
# Get the message indices
conx = self.bag._get_connections(topics=imagetopic)
indices = self.bag._get_indexes(conx)
try:
self.index = indices.next()
except:
raise RuntimeError("Could not find topic {0} in {1}.".format(imagetopic, self.bagfile))
self.indices = np.arange(len(self.index))
# sort the indices by header.stamp
self.indices = self.sortByTime(self.indices)
# go through the bag and remove the indices outside the timespan [bag_start_time, bag_end_time]
if bag_from_to:
self.indices = self.truncateIndicesFromTime(self.indices, bag_from_to)
# sort the ros messegaes by the header time not message time
def sortByTime(self, indices):
self.timestamp_corrector = sm.DoubleTimestampCorrector()
timestamps = list()
for idx in self.indices:
topic, data, stamp = self.bag._read_message(self.index[idx].position)
timestamp = data.header.stamp.secs * 1e9 + data.header.stamp.nsecs
timestamps.append(timestamp)
if self.perform_synchronization:
self.timestamp_corrector.correctTimestamp(data.header.stamp.to_sec(),
stamp.to_sec())
sorted_tuples = sorted(zip(timestamps, indices))
sorted_indices = [tuple_value[1] for tuple_value in sorted_tuples]
return sorted_indices
def truncateIndicesFromTime(self, indices, bag_from_to):
# get the timestamps
timestamps = list()
for idx in self.indices:
topic, data, stamp = self.bag._read_message(self.index[idx].position)
timestamp = data.header.stamp.secs + data.header.stamp.nsecs / 1.0e9
timestamps.append(timestamp)
bagstart = min(timestamps)
baglength = max(timestamps) - bagstart
# some value checking
if bag_from_to[0] >= bag_from_to[1]:
raise RuntimeError("Bag start time must be bigger than end time.".format(bag_from_to[0]))
if bag_from_to[0] < 0.0:
sm.logWarn("Bag start time of {0} s is smaller 0".format(bag_from_to[0]))
if bag_from_to[1] > baglength:
sm.logWarn("Bag end time of {0} s is bigger than the total length of {1} s".format(
bag_from_to[1], baglength))
# find the valid timestamps
valid_indices = []
for idx, timestamp in enumerate(timestamps):
if timestamp >= (bagstart + bag_from_to[0]) and timestamp <= (bagstart + bag_from_to[1]):
valid_indices.append(idx)
sm.logWarn(
"BagImageDatasetReader: truncated {0} / {1} images.".format(len(indices) - len(valid_indices), len(indices)))
return valid_indices
def __iter__(self):
# Reset the bag reading
return self.readDataset()
def readDataset(self):
return BagImageDatasetReaderIterator(self, self.indices)
def readDatasetShuffle(self):
indices = self.indices
np.random.shuffle(indices)
return BagImageDatasetReaderIterator(self, indices)
def numImages(self):
return len(self.indices)
def getImage(self, idx):
topic, data, stamp = self.bag._read_message(self.index[idx].position)
if self.perform_synchronization:
timestamp = acv.Time(self.timestamp_corrector.getLocalTime(
data.header.stamp.to_sec()))
else:
timestamp = acv.Time(data.header.stamp.secs,
data.header.stamp.nsecs)
if data._type == 'mv_cameras/ImageSnappyMsg':
if self.uncompress is None:
from snappy import uncompress
self.uncompress = uncompress
img_data = np.reshape(self.uncompress(np.fromstring(
data.data, dtype='uint8')), (data.height, data.width), order="C")
elif data.encoding == "16UC1" or data.encoding == "mono16":
image_16u = np.array(self.CVB.imgmsg_to_cv2(data))
img_data = (image_16u / 256).astype("uint8")
elif data.encoding == "8UC1" or data.encoding == "mono8":
img_data = np.array(self.CVB.imgmsg_to_cv2(data))
elif data.encoding == "8UC3" or data.encoding == "bgr8":
img_data = np.array(self.CVB.imgmsg_to_cv2(data))
img_data = cv2.cvtColor(img_data, cv2.COLOR_BGR2GRAY)
elif data.encoding == "rgb8":
img_data = np.array(self.CVB.imgmsg_to_cv2(data))
img_data = cv2.cvtColor(img_data, cv2.COLOR_RGB2GRAY)
elif data.encoding == "8UC4" or data.encoding == "bgra8":
img_data = np.array(self.CVB.imgmsg_to_cv2(data))
img_data = cv2.cvtColor(img_data, cv2.COLOR_BGRA2GRAY)
elif data.encoding == "bayer_rggb8":
img_data = np.array(self.CVB.imgmsg_to_cv2(data))
img_data = cv2.cvtColor(img_data, cv2.COLOR_BAYER_BG2GRAY)
else:
raise RuntimeError(
"Unsupported Image format '{}' (Supported are: 16UC1 / mono16, 8UC1 / mono8, 8UC3 / rgb8 / bgr8, 8UC4 / bgra8, bayer_rggb8 and ImageSnappyMsg)".format(data.encoding));
return (timestamp, img_data)
| bsd-3-clause |
muffinresearch/addons-server | scripts/xpitool.py | 25 | 1149 | #!/usr/bin/env python
import optparse
import os
import subprocess
def main():
p = optparse.OptionParser(
usage='%prog [options] [-x addon-1.0.xpi] [-c /path/to/addon-1.0/]')
p.add_option('-x', '--extract',
help='Extracts xpi into current directory',
action='store_true')
p.add_option('-c', '--recreate',
help='Zips an extracted xpi into current directory',
action='store_true')
(options, args) = p.parse_args()
if len(args) != 1:
p.error("Incorrect usage")
addon = os.path.abspath(args[0])
if options.extract:
d = os.path.splitext(addon)[0]
os.mkdir(d)
os.chdir(d)
subprocess.check_call(['unzip', addon])
print "Extracted to %s" % d
elif options.recreate:
xpi = "%s.xpi" % addon
if os.path.exists(xpi):
p.error("Refusing to overwrite %r" % xpi)
os.chdir(addon)
subprocess.check_call(['zip', '-r', xpi] + os.listdir(os.getcwd()))
print "Created %s" % xpi
else:
p.error("Incorrect usage")
if __name__ == '__main__':
main()
| bsd-3-clause |
PairMhai/Backend | membership/admin.py | 1 | 3760 | from django.contrib import admin
from .models import Customer, Class, User
from payment.models import CreditCard
from allauth.account.models import EmailAddress
# -------------------------------------
# custom auth user
# -------------------------------------
from django import forms
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
class UserCreationForm(forms.ModelForm):
"""A form for creating new users."""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(
label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('first_name', 'last_name', 'telephone',
'address', 'date_of_birth', 'gender')
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""A form for updating users"""
password = ReadOnlyPasswordHashField()
class Meta:
model = User
fields = ('first_name', 'last_name', 'telephone', 'gender',
'address', 'password', 'date_of_birth', 'is_active', 'is_staff')
def clean_password(self):
return self.initial["password"]
class EmailInline(admin.TabularInline):
model = EmailAddress
extra = 1
class UserAdmin(BaseUserAdmin):
add_form = UserCreationForm
form = UserChangeForm
readonly_fields = ('id',)
list_display = ('username', 'is_staff')
list_filter = ('first_name', 'telephone', 'gender')
inlines = [EmailInline]
fieldsets = (
(None, {
'fields': ('id', 'username', 'password')}),
('Personal info', {
'fields': ('first_name', 'last_name')}),
('Addition personal info', {
'fields': ('date_of_birth', 'gender', 'telephone', 'address')}),
('Permissions', {'fields': (
'is_active', 'is_staff', 'is_superuser', 'groups', 'user_permissions')}),
('Important dates', {'fields': ('last_login', 'date_joined')}),
)
# add_fieldsets is not a standard ModelAdmin attribute. UserAdmin
# overrides get_fieldsets to use this attribute when creating a user.
add_fieldsets = (
(None, {'fields': ('id', 'username', 'password1', 'password2')}),
('Personal info', {'fields': ('first_name', 'last_name')}),
('Addition personal info', {
'fields': ('date_of_birth', 'gender', 'telephone', 'address')})
)
search_fields = ('first_name', 'telephone', 'address')
ordering = ('first_name', )
admin.site.register(User, UserAdmin)
# -------------------------------------
# other
# -------------------------------------
class CreditCardInline(admin.TabularInline):
model = CreditCard
extra = 1
class CustomerAdmin(admin.ModelAdmin):
readonly_fields = ('id',)
inlines = [CreditCardInline]
list_filter = ('classes',)
class Meta:
model = Customer
class ClassAdmin(admin.ModelAdmin):
class Meta:
model = Class
admin.site.register(Customer, CustomerAdmin)
admin.site.register(Class, ClassAdmin)
| agpl-3.0 |
kevcooper/bitcoin | test/functional/bumpfee.py | 1 | 13631 | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the bumpfee RPC.
Verifies that the bumpfee RPC creates replacement transactions successfully when
its preconditions are met, and returns appropriate errors in other cases.
This module consists of around a dozen individual test cases implemented in the
top-level functions named as test_<test_case_description>. The test functions
can be disabled or reordered if needed for debugging. If new test cases are
added in the future, they should try to follow the same convention and not
make assumptions about execution order.
"""
from segwit import send_to_witness
from test_framework.test_framework import BitcoinTestFramework
from test_framework import blocktools
from test_framework.mininode import CTransaction
from test_framework.util import *
import io
# Sequence number that is BIP 125 opt-in and BIP 68-compliant
BIP125_SEQUENCE_NUMBER = 0xfffffffd
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self, split=False):
extra_args = [["-prematurewitness", "-walletprematurewitness", "-walletrbf={}".format(i)]
for i in range(self.num_nodes)]
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].node_encrypt_wallet(WALLET_PASSPHRASE)
self.nodes[1] = self.start_node(1, self.options.tmpdir, extra_args[1])
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
def run_test(self):
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_bumpfee_metadata(rbf_node, dest_address)
test_locked_wallet_fails(rbf_node, dest_address)
self.log.info("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert_equal(bumped_tx["errors"], [])
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propagates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
use_p2wsh=False,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
rbf_node.getrawchangeaddress(): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransaction(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = peer_node.sendtoaddress(dest_address, Decimal("0.00090000"))
assert_raises_jsonrpc(-4, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransaction(rawtx)
signedtx = peer_node.signrawtransaction(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_jsonrpc(-4, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = spend_one_input(rbf_node, rbf_node_address)
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransaction(tx)
txid = rbf_node.sendrawtransaction(tx["hex"])
assert_raises_jsonrpc(-8, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.bumpfee(rbfid, {"totalFee": 50000})
rbfid = spend_one_input(rbf_node, dest_address)
assert_raises_jsonrpc(-4, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 50001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=49,900, but it converts to 50,000
rbfid = spend_one_input(rbf_node, dest_address)
fulltx = rbf_node.getrawtransaction(rbfid, 1)
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 49900})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00050000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated
def test_settxfee(rbf_node, dest_address):
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
rbfid = spend_one_input(rbf_node, dest_address)
requested_feerate = Decimal("0.00025000")
rbf_node.settxfee(requested_feerate)
bumped_tx = rbf_node.bumpfee(rbfid)
actual_feerate = bumped_tx["fee"] * 1000 / rbf_node.getrawtransaction(bumped_tx["txid"], True)["size"]
# Assert that the difference between the requested feerate and the actual
# feerate of the bumped transaction is small.
assert_greater_than(Decimal("0.00001000"), abs(requested_feerate - actual_feerate))
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 2000})
assert_raises_jsonrpc(-4, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 3000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 3000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = spend_one_input(rbf_node, dest_address)
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
assert_raises_jsonrpc(-4, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = spend_one_input(rbf_node, rbf_node_address)
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then call abandon to make sure the wallet doesn't attempt to resubmit the
# bump tx, then invalidate the block so the rbf tx will be put back in the
# mempool. this makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
rbf_node.abandontransaction(bumpid)
rbf_node.invalidateblock(block.hash)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_bumpfee_metadata(rbf_node, dest_address):
rbfid = rbf_node.sendtoaddress(dest_address, Decimal("0.00100000"), "comment value", "to value")
bumped_tx = rbf_node.bumpfee(rbfid)
bumped_wtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(bumped_wtx["comment"], "comment value")
assert_equal(bumped_wtx["to"], "to value")
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = spend_one_input(rbf_node, dest_address)
rbf_node.walletlock()
assert_raises_jsonrpc(-13, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def spend_one_input(node, dest_address):
tx_input = dict(
sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
rawtx = node.createrawtransaction(
[tx_input], {dest_address: Decimal("0.00050000"),
node.getrawchangeaddress(): Decimal("0.00049000")})
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
return block
if __name__ == "__main__":
BumpFeeTest().main()
| mit |
lthurlow/Boolean-Constrained-Routing | networkx-1.8.1/build/lib.linux-i686-2.7/networkx/classes/digraph.py | 23 | 39533 | """Base class for directed graphs."""
# Copyright (C) 2004-2011 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
from copy import deepcopy
import networkx as nx
from networkx.classes.graph import Graph
from networkx.exception import NetworkXError
import networkx.convert as convert
__author__ = """\n""".join(['Aric Hagberg ([email protected])',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
class DiGraph(Graph):
"""
Base class for directed graphs.
A DiGraph stores nodes and edges with optional data, or attributes.
DiGraphs hold directed edges. Self loops are allowed but multiple
(parallel) edges are not.
Nodes can be arbitrary (hashable) Python objects with optional
key/value attributes.
Edges are represented as links between nodes with optional
key/value attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
Graph
MultiGraph
MultiDiGraph
Examples
--------
Create an empty graph structure (a "null graph") with no nodes and
no edges.
>>> G = nx.DiGraph()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node(1)
Add the nodes from any container (a list, dict, set or
even the lines from a file or the nodes from another graph).
>>> G.add_nodes_from([2,3])
>>> G.add_nodes_from(range(100,110))
>>> H=nx.Graph()
>>> H.add_path([0,1,2,3,4,5,6,7,8,9])
>>> G.add_nodes_from(H)
In addition to strings and integers any hashable Python object
(except None) can represent a node, e.g. a customized node object,
or even another Graph.
>>> G.add_node(H)
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(1, 2)
a list of edges,
>>> G.add_edges_from([(1,2),(1,3)])
or a collection of edges,
>>> G.add_edges_from(H.edges())
If some edges connect nodes not yet in the graph, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Attributes:**
Each graph, node, and edge can hold key/value attribute pairs
in an associated attribute dictionary (the keys must be hashable).
By default these are empty, but can be added or changed using
add_edge, add_node or direct manipulation of the attribute
dictionaries named graph, node and edge respectively.
>>> G = nx.DiGraph(day="Friday")
>>> G.graph
{'day': 'Friday'}
Add node attributes using add_node(), add_nodes_from() or G.node
>>> G.add_node(1, time='5pm')
>>> G.add_nodes_from([3], time='2pm')
>>> G.node[1]
{'time': '5pm'}
>>> G.node[1]['room'] = 714
>>> del G.node[1]['room'] # remove attribute
>>> G.nodes(data=True)
[(1, {'time': '5pm'}), (3, {'time': '2pm'})]
Warning: adding a node to G.node does not add it to the graph.
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edge.
>>> G.add_edge(1, 2, weight=4.7 )
>>> G.add_edges_from([(3,4),(4,5)], color='red')
>>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
>>> G[1][2]['weight'] = 4.7
>>> G.edge[1][2]['weight'] = 4
**Shortcuts:**
Many common graph features allow python syntax to speed reporting.
>>> 1 in G # check if node in graph
True
>>> [n for n in G if n<3] # iterate through nodes
[1, 2]
>>> len(G) # number of nodes in graph
5
The fastest way to traverse all edges of a graph is via
adjacency_iter(), but the edges() method is often more convenient.
>>> for n,nbrsdict in G.adjacency_iter():
... for nbr,eattr in nbrsdict.items():
... if 'weight' in eattr:
... (n,nbr,eattr['weight'])
(1, 2, 4)
(2, 3, 8)
>>> [ (u,v,edata['weight']) for u,v,edata in G.edges(data=True) if 'weight' in edata ]
[(1, 2, 4), (2, 3, 8)]
**Reporting:**
Simple graph information is obtained using methods.
Iterator versions of many reporting methods exist for efficiency.
Methods exist for reporting nodes(), edges(), neighbors() and degree()
as well as the number of nodes and edges.
For details on these and other miscellaneous methods, see below.
"""
def __init__(self, data=None, **attr):
"""Initialize a graph with edges, name, graph attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
name : string, optional (default='')
An optional name for the graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
convert
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G = nx.Graph(name='my graph')
>>> e = [(1,2),(2,3),(3,4)] # list of edges
>>> G = nx.Graph(e)
Arbitrary graph attribute pairs (key=value) may be assigned
>>> G=nx.Graph(e, day="Friday")
>>> G.graph
{'day': 'Friday'}
"""
self.graph = {} # dictionary for graph attributes
self.node = {} # dictionary for node attributes
# We store two adjacency lists:
# the predecessors of node n are stored in the dict self.pred
# the successors of node n are stored in the dict self.succ=self.adj
self.adj = {} # empty adjacency dictionary
self.pred = {} # predecessor
self.succ = self.adj # successor
# attempt to load graph with data
if data is not None:
convert.to_networkx_graph(data,create_using=self)
# load graph attributes (must be after convert)
self.graph.update(attr)
self.edge=self.adj
def add_node(self, n, attr_dict=None, **attr):
"""Add a single node n and update node attributes.
Parameters
----------
n : node
A node can be any hashable Python object except None.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of node attributes. Key/value pairs will
update existing data associated with the node.
attr : keyword arguments, optional
Set or change attributes using key=value.
See Also
--------
add_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_node(1)
>>> G.add_node('Hello')
>>> K3 = nx.Graph([(0,1),(1,2),(2,0)])
>>> G.add_node(K3)
>>> G.number_of_nodes()
3
Use keywords set/change node attributes:
>>> G.add_node(1,size=10)
>>> G.add_node(3,weight=0.4,UTM=('13S',382871,3972649))
Notes
-----
A hashable object is one that can be used as a key in a Python
dictionary. This includes strings, numbers, tuples of strings
and numbers, etc.
On many platforms hashable items also include mutables such as
NetworkX Graphs, though one should be careful that the hash
doesn't change on mutables.
"""
# set up attribute dict
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dictionary.")
if n not in self.succ:
self.succ[n] = {}
self.pred[n] = {}
self.node[n] = attr_dict
else: # update attr even if node already exists
self.node[n].update(attr_dict)
def add_nodes_from(self, nodes, **attr):
"""Add multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.).
OR
A container of (node, attribute dict) tuples.
Node attributes are updated using the attribute dict.
attr : keyword arguments, optional (default= no attributes)
Update attributes for all nodes in nodes.
Node attributes specified in nodes as a tuple
take precedence over attributes specified generally.
See Also
--------
add_node
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_nodes_from('Hello')
>>> K3 = nx.Graph([(0,1),(1,2),(2,0)])
>>> G.add_nodes_from(K3)
>>> sorted(G.nodes(),key=str)
[0, 1, 2, 'H', 'e', 'l', 'o']
Use keywords to update specific node attributes for every node.
>>> G.add_nodes_from([1,2], size=10)
>>> G.add_nodes_from([3,4], weight=0.4)
Use (node, attrdict) tuples to update attributes for specific
nodes.
>>> G.add_nodes_from([(1,dict(size=11)), (2,{'color':'blue'})])
>>> G.node[1]['size']
11
>>> H = nx.Graph()
>>> H.add_nodes_from(G.nodes(data=True))
>>> H.node[1]['size']
11
"""
for n in nodes:
try:
newnode=n not in self.succ
except TypeError:
nn,ndict = n
if nn not in self.succ:
self.succ[nn] = {}
self.pred[nn] = {}
newdict = attr.copy()
newdict.update(ndict)
self.node[nn] = newdict
else:
olddict = self.node[nn]
olddict.update(attr)
olddict.update(ndict)
continue
if newnode:
self.succ[n] = {}
self.pred[n] = {}
self.node[n] = attr.copy()
else:
self.node[n].update(attr)
def remove_node(self, n):
"""Remove node n.
Removes the node n and all adjacent edges.
Attempting to remove a non-existent node will raise an exception.
Parameters
----------
n : node
A node in the graph
Raises
-------
NetworkXError
If n is not in the graph.
See Also
--------
remove_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> G.edges()
[(0, 1), (1, 2)]
>>> G.remove_node(1)
>>> G.edges()
[]
"""
try:
nbrs=self.succ[n]
del self.node[n]
except KeyError: # NetworkXError if n not in self
raise NetworkXError("The node %s is not in the digraph."%(n,))
for u in nbrs:
del self.pred[u][n] # remove all edges n-u in digraph
del self.succ[n] # remove node from succ
for u in self.pred[n]:
del self.succ[u][n] # remove all edges n-u in digraph
del self.pred[n] # remove node from pred
def remove_nodes_from(self, nbunch):
"""Remove multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.). If a node
in the container is not in the graph it is silently
ignored.
See Also
--------
remove_node
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> e = G.nodes()
>>> e
[0, 1, 2]
>>> G.remove_nodes_from(e)
>>> G.nodes()
[]
"""
for n in nbunch:
try:
succs=self.succ[n]
del self.node[n]
for u in succs:
del self.pred[u][n] # remove all edges n-u in digraph
del self.succ[n] # now remove node
for u in self.pred[n]:
del self.succ[u][n] # remove all edges n-u in digraph
del self.pred[n] # now remove node
except KeyError:
pass # silent failure on remove
def add_edge(self, u, v, attr_dict=None, **attr):
"""Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Edge attributes can be specified with keywords or by providing
a dictionary with key/value pairs. See examples below.
Parameters
----------
u,v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of edge attributes. Key/value pairs will
update existing data associated with the edge.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edges_from : add a collection of edges
Notes
-----
Adding an edge that already exists updates the edge data.
Many NetworkX algorithms designed for weighted graphs use as
the edge weight a numerical value assigned to a keyword
which by default is 'weight'.
Examples
--------
The following all add the edge e=(1,2) to graph G:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = (1,2)
>>> G.add_edge(1, 2) # explicit two-node form
>>> G.add_edge(*e) # single edge as tuple of two nodes
>>> G.add_edges_from( [(1,2)] ) # add edges from iterable container
Associate data to edges using keywords:
>>> G.add_edge(1, 2, weight=3)
>>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
"""
# set up attribute dict
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dictionary.")
# add nodes
if u not in self.succ:
self.succ[u]={}
self.pred[u]={}
self.node[u] = {}
if v not in self.succ:
self.succ[v]={}
self.pred[v]={}
self.node[v] = {}
# add the edge
datadict=self.adj[u].get(v,{})
datadict.update(attr_dict)
self.succ[u][v]=datadict
self.pred[v][u]=datadict
def add_edges_from(self, ebunch, attr_dict=None, **attr):
"""Add all the edges in ebunch.
Parameters
----------
ebunch : container of edges
Each edge given in the container will be added to the
graph. The edges must be given as as 2-tuples (u,v) or
3-tuples (u,v,d) where d is a dictionary containing edge
data.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of edge attributes. Key/value pairs will
update existing data associated with each edge.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edge : add a single edge
add_weighted_edges_from : convenient way to add weighted edges
Notes
-----
Adding the same edge twice has no effect but any edge data
will be updated when each duplicate edge is added.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edges_from([(0,1),(1,2)]) # using a list of edge tuples
>>> e = zip(range(0,3),range(1,4))
>>> G.add_edges_from(e) # Add the path graph 0-1-2-3
Associate data to edges
>>> G.add_edges_from([(1,2),(2,3)], weight=3)
>>> G.add_edges_from([(3,4),(1,4)], label='WN2898')
"""
# set up attribute dict
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dict.")
# process ebunch
for e in ebunch:
ne = len(e)
if ne==3:
u,v,dd = e
assert hasattr(dd,"update")
elif ne==2:
u,v = e
dd = {}
else:
raise NetworkXError(\
"Edge tuple %s must be a 2-tuple or 3-tuple."%(e,))
if u not in self.succ:
self.succ[u] = {}
self.pred[u] = {}
self.node[u] = {}
if v not in self.succ:
self.succ[v] = {}
self.pred[v] = {}
self.node[v] = {}
datadict=self.adj[u].get(v,{})
datadict.update(attr_dict)
datadict.update(dd)
self.succ[u][v] = datadict
self.pred[v][u] = datadict
def remove_edge(self, u, v):
"""Remove the edge between u and v.
Parameters
----------
u,v: nodes
Remove the edge between nodes u and v.
Raises
------
NetworkXError
If there is not an edge between u and v.
See Also
--------
remove_edges_from : remove a collection of edges
Examples
--------
>>> G = nx.Graph() # or DiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.remove_edge(0,1)
>>> e = (1,2)
>>> G.remove_edge(*e) # unpacks e from an edge tuple
>>> e = (2,3,{'weight':7}) # an edge with attribute data
>>> G.remove_edge(*e[:2]) # select first part of edge tuple
"""
try:
del self.succ[u][v]
del self.pred[v][u]
except KeyError:
raise NetworkXError("The edge %s-%s not in graph."%(u,v))
def remove_edges_from(self, ebunch):
"""Remove all edges specified in ebunch.
Parameters
----------
ebunch: list or container of edge tuples
Each edge given in the list or container will be removed
from the graph. The edges can be:
- 2-tuples (u,v) edge between u and v.
- 3-tuples (u,v,k) where k is ignored.
See Also
--------
remove_edge : remove a single edge
Notes
-----
Will fail silently if an edge in ebunch is not in the graph.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> ebunch=[(1,2),(2,3)]
>>> G.remove_edges_from(ebunch)
"""
for e in ebunch:
(u,v)=e[:2] # ignore edge data
if u in self.succ and v in self.succ[u]:
del self.succ[u][v]
del self.pred[v][u]
def has_successor(self, u, v):
"""Return True if node u has successor v.
This is true if graph has the edge u->v.
"""
return (u in self.succ and v in self.succ[u])
def has_predecessor(self, u, v):
"""Return True if node u has predecessor v.
This is true if graph has the edge u<-v.
"""
return (u in self.pred and v in self.pred[u])
def successors_iter(self,n):
"""Return an iterator over successor nodes of n.
neighbors_iter() and successors_iter() are the same.
"""
try:
return iter(self.succ[n])
except KeyError:
raise NetworkXError("The node %s is not in the digraph."%(n,))
def predecessors_iter(self,n):
"""Return an iterator over predecessor nodes of n."""
try:
return iter(self.pred[n])
except KeyError:
raise NetworkXError("The node %s is not in the digraph."%(n,))
def successors(self, n):
"""Return a list of successor nodes of n.
neighbors() and successors() are the same function.
"""
return list(self.successors_iter(n))
def predecessors(self, n):
"""Return a list of predecessor nodes of n."""
return list(self.predecessors_iter(n))
# digraph definitions
neighbors = successors
neighbors_iter = successors_iter
def edges_iter(self, nbunch=None, data=False):
"""Return an iterator over the edges.
Edges are returned as tuples with optional data
in the order (node, neighbor, data).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict in 3-tuple (u,v,data).
Returns
-------
edge_iter : iterator
An iterator of (u,v) or (u,v,d) tuples of edges.
See Also
--------
edges : return a list of edges
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-edges.
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> [e for e in G.edges_iter()]
[(0, 1), (1, 2), (2, 3)]
>>> list(G.edges_iter(data=True)) # default data is {} (empty dict)
[(0, 1, {}), (1, 2, {}), (2, 3, {})]
>>> list(G.edges_iter([0,2]))
[(0, 1), (2, 3)]
>>> list(G.edges_iter(0))
[(0, 1)]
"""
if nbunch is None:
nodes_nbrs=self.adj.items()
else:
nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch))
if data:
for n,nbrs in nodes_nbrs:
for nbr,data in nbrs.items():
yield (n,nbr,data)
else:
for n,nbrs in nodes_nbrs:
for nbr in nbrs:
yield (n,nbr)
# alias out_edges to edges
out_edges_iter=edges_iter
out_edges=Graph.edges
def in_edges_iter(self, nbunch=None, data=False):
"""Return an iterator over the incoming edges.
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict in 3-tuple (u,v,data).
Returns
-------
in_edge_iter : iterator
An iterator of (u,v) or (u,v,d) tuples of incoming edges.
See Also
--------
edges_iter : return an iterator of edges
"""
if nbunch is None:
nodes_nbrs=self.pred.items()
else:
nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch))
if data:
for n,nbrs in nodes_nbrs:
for nbr,data in nbrs.items():
yield (nbr,n,data)
else:
for n,nbrs in nodes_nbrs:
for nbr in nbrs:
yield (nbr,n)
def in_edges(self, nbunch=None, data=False):
"""Return a list of the incoming edges.
See Also
--------
edges : return a list of edges
"""
return list(self.in_edges_iter(nbunch, data))
def degree_iter(self, nbunch=None, weight=None):
"""Return an iterator for (node, degree).
The node degree is the number of edges adjacent to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree, in_degree, out_degree, in_degree_iter, out_degree_iter
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph
>>> G.add_path([0,1,2,3])
>>> list(G.degree_iter(0)) # node 0 with degree 1
[(0, 1)]
>>> list(G.degree_iter([0,1]))
[(0, 1), (1, 2)]
"""
if nbunch is None:
nodes_nbrs=zip(iter(self.succ.items()),iter(self.pred.items()))
else:
nodes_nbrs=zip(
((n,self.succ[n]) for n in self.nbunch_iter(nbunch)),
((n,self.pred[n]) for n in self.nbunch_iter(nbunch)))
if weight is None:
for (n,succ),(n2,pred) in nodes_nbrs:
yield (n,len(succ)+len(pred))
else:
# edge weighted graph - degree is sum of edge weights
for (n,succ),(n2,pred) in nodes_nbrs:
yield (n,
sum((succ[nbr].get(weight,1) for nbr in succ))+
sum((pred[nbr].get(weight,1) for nbr in pred)))
def in_degree_iter(self, nbunch=None, weight=None):
"""Return an iterator for (node, in-degree).
The node in-degree is the number of edges pointing in to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, in-degree).
See Also
--------
degree, in_degree, out_degree, out_degree_iter
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_path([0,1,2,3])
>>> list(G.in_degree_iter(0)) # node 0 with degree 0
[(0, 0)]
>>> list(G.in_degree_iter([0,1]))
[(0, 0), (1, 1)]
"""
if nbunch is None:
nodes_nbrs=self.pred.items()
else:
nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
for n,nbrs in nodes_nbrs:
yield (n,len(nbrs))
else:
# edge weighted graph - degree is sum of edge weights
for n,nbrs in nodes_nbrs:
yield (n, sum(data.get(weight,1) for data in nbrs.values()))
def out_degree_iter(self, nbunch=None, weight=None):
"""Return an iterator for (node, out-degree).
The node out-degree is the number of edges pointing out of the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, out-degree).
See Also
--------
degree, in_degree, out_degree, in_degree_iter
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_path([0,1,2,3])
>>> list(G.out_degree_iter(0)) # node 0 with degree 1
[(0, 1)]
>>> list(G.out_degree_iter([0,1]))
[(0, 1), (1, 1)]
"""
if nbunch is None:
nodes_nbrs=self.succ.items()
else:
nodes_nbrs=((n,self.succ[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
for n,nbrs in nodes_nbrs:
yield (n,len(nbrs))
else:
# edge weighted graph - degree is sum of edge weights
for n,nbrs in nodes_nbrs:
yield (n, sum(data.get(weight,1) for data in nbrs.values()))
def in_degree(self, nbunch=None, weight=None):
"""Return the in-degree of a node or nodes.
The node in-degree is the number of edges pointing in to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd : dictionary, or number
A dictionary with nodes as keys and in-degree as values or
a number if a single node is specified.
See Also
--------
degree, out_degree, in_degree_iter
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph
>>> G.add_path([0,1,2,3])
>>> G.in_degree(0)
0
>>> G.in_degree([0,1])
{0: 0, 1: 1}
>>> list(G.in_degree([0,1]).values())
[0, 1]
"""
if nbunch in self: # return a single node
return next(self.in_degree_iter(nbunch,weight))[1]
else: # return a dict
return dict(self.in_degree_iter(nbunch,weight))
def out_degree(self, nbunch=None, weight=None):
"""Return the out-degree of a node or nodes.
The node out-degree is the number of edges pointing out of the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd : dictionary, or number
A dictionary with nodes as keys and out-degree as values or
a number if a single node is specified.
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph
>>> G.add_path([0,1,2,3])
>>> G.out_degree(0)
1
>>> G.out_degree([0,1])
{0: 1, 1: 1}
>>> list(G.out_degree([0,1]).values())
[1, 1]
"""
if nbunch in self: # return a single node
return next(self.out_degree_iter(nbunch,weight))[1]
else: # return a dict
return dict(self.out_degree_iter(nbunch,weight))
def clear(self):
"""Remove all nodes and edges from the graph.
This also removes the name, and all graph, node, and edge attributes.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.clear()
>>> G.nodes()
[]
>>> G.edges()
[]
"""
self.succ.clear()
self.pred.clear()
self.node.clear()
self.graph.clear()
def is_multigraph(self):
"""Return True if graph is a multigraph, False otherwise."""
return False
def is_directed(self):
"""Return True if graph is directed, False otherwise."""
return True
def to_directed(self):
"""Return a directed copy of the graph.
Returns
-------
G : DiGraph
A deepcopy of the graph.
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar D=DiGraph(G) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Examples
--------
>>> G = nx.Graph() # or MultiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1), (1, 0)]
If already directed, return a (deep) copy
>>> G = nx.DiGraph() # or MultiDiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1)]
"""
return deepcopy(self)
def to_undirected(self, reciprocal=False):
"""Return an undirected representation of the digraph.
Parameters
----------
reciprocal : bool (optional)
If True only keep edges that appear in both directions
in the original digraph.
Returns
-------
G : Graph
An undirected graph with the same name and nodes and
with edge (u,v,data) if either (u,v,data) or (v,u,data)
is in the digraph. If both edges exist in digraph and
their edge data is different, only one edge is created
with an arbitrary choice of which edge data to use.
You must check and correct for this manually if desired.
Notes
-----
If edges in both directions (u,v) and (v,u) exist in the
graph, attributes for the new undirected edge will be a combination of
the attributes of the directed edges. The edge data is updated
in the (arbitrary) order that the edges are encountered. For
more customized control of the edge attributes use add_edge().
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar G=DiGraph(D) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
"""
H=Graph()
H.name=self.name
H.add_nodes_from(self)
if reciprocal is True:
H.add_edges_from( (u,v,deepcopy(d))
for u,nbrs in self.adjacency_iter()
for v,d in nbrs.items()
if v in self.pred[u])
else:
H.add_edges_from( (u,v,deepcopy(d))
for u,nbrs in self.adjacency_iter()
for v,d in nbrs.items() )
H.graph=deepcopy(self.graph)
H.node=deepcopy(self.node)
return H
def reverse(self, copy=True):
"""Return the reverse of the graph.
The reverse is a graph with the same nodes and edges
but with the directions of the edges reversed.
Parameters
----------
copy : bool optional (default=True)
If True, return a new DiGraph holding the reversed edges.
If False, reverse the reverse graph is created using
the original graph (this changes the original graph).
"""
if copy:
H = self.__class__(name="Reverse of (%s)"%self.name)
H.add_nodes_from(self)
H.add_edges_from( (v,u,deepcopy(d)) for u,v,d
in self.edges(data=True) )
H.graph=deepcopy(self.graph)
H.node=deepcopy(self.node)
else:
self.pred,self.succ=self.succ,self.pred
self.adj=self.succ
H=self
return H
def subgraph(self, nbunch):
"""Return the subgraph induced on nodes in nbunch.
The induced subgraph of the graph contains the nodes in nbunch
and the edges between those nodes.
Parameters
----------
nbunch : list, iterable
A container of nodes which will be iterated through once.
Returns
-------
G : Graph
A subgraph of the graph with the same edge attributes.
Notes
-----
The graph, edge or node attributes just point to the original graph.
So changes to the node or edge structure will not be reflected in
the original graph while changes to the attributes will.
To create a subgraph with its own copy of the edge/node attributes use:
nx.Graph(G.subgraph(nbunch))
If edge attributes are containers, a deep copy can be obtained using:
G.subgraph(nbunch).copy()
For an inplace reduction of a graph to a subgraph you can remove nodes:
G.remove_nodes_from([ n in G if n not in set(nbunch)])
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> H = G.subgraph([0,1,2])
>>> H.edges()
[(0, 1), (1, 2)]
"""
bunch = self.nbunch_iter(nbunch)
# create new graph and copy subgraph into it
H = self.__class__()
# copy node and attribute dictionaries
for n in bunch:
H.node[n]=self.node[n]
# namespace shortcuts for speed
H_succ=H.succ
H_pred=H.pred
self_succ=self.succ
# add nodes
for n in H:
H_succ[n]={}
H_pred[n]={}
# add edges
for u in H_succ:
Hnbrs=H_succ[u]
for v,datadict in self_succ[u].items():
if v in H_succ:
# add both representations of edge: u-v and v-u
Hnbrs[v]=datadict
H_pred[v][u]=datadict
H.graph=self.graph
return H
| mit |
fedspendingtransparency/data-act-validator | tests/jobTests.py | 1 | 11842 | from __future__ import print_function
from dataactcore.models.jobModels import JobDependency
from dataactvalidator.models.validationModels import Rule
from baseTest import BaseTest
import unittest
class JobTests(BaseTest):
@classmethod
def setUpClass(cls):
"""Set up class-wide resources (test data)"""
super(JobTests, cls).setUpClass()
#TODO: refactor into a pytest fixture
# Flag for testing a million+ errors (can take ~30 min to run)
cls.includeLongTests = False
validationDb = cls.validationDb
jobTracker = cls.jobTracker
# Clear validation rules
for fileType in ["award", "award_financial",
"appropriations", "program_activity"]:
validationDb.removeRulesByFileType(fileType)
validationDb.removeColumnsByFileType(fileType)
# Create submissions and get IDs back
submissionIDs = {}
for i in range(1, 17):
submissionIDs[i] = cls.insertSubmission(
jobTracker, userId=cls.userId)
csvFiles = {
"valid": {"filename": "testValid.csv", "status": "ready", "type": "csv_record_validation", "submissionLocalId": 1, "fileType": 1},
"bad_upload": {"filename": "", "status": "ready", "type": "file_upload", "submissionLocalId": 2, "fileType": 1},
"bad_prereq": {"filename": "", "status": "ready", "type": "csv_record_validation", "submissionLocalId" :2, "fileType": 1},
"wrong_type": {"filename": "", "status": "ready", "type": "external_validation", "submissionLocalId": 4, "fileType": 1},
"not_ready": {"filename": "", "status": "finished", "type": "csv_record_validation", "submissionLocalId": 5, "fileType": 1},
"valid_upload": {"filename": "", "status": "finished", "type": "file_upload", "submissionLocalId": 6, "fileType": 1},
"valid_prereq": {"filename": "testPrereq.csv", "status": "ready", "type": "csv_record_validation", "submissionLocalId": 6, "fileType": 1},
"bad_values": {"filename": "testBadValues.csv", "status": "ready", "type": "csv_record_validation", "submissionLocalId": 8, "fileType": 1},
"mixed": {"filename": "testMixed.csv", "status": "ready", "type": "csv_record_validation", "submissionLocalId": 9, "fileType": 1},
"empty": {"filename": "testEmpty.csv", "status": "ready", "type": "csv_record_validation", "submissionLocalId": 10, "fileType": 1},
"missing_header": {"filename": "testMissingHeader.csv", "status": "ready", "type": "csv_record_validation", "submissionLocalId": 11, "fileType": 1},
"bad_header": {"filename": "testBadHeader.csv", "status": "ready", "type": "csv_record_validation", "submissionLocalId": 12, "fileType": 2},
"many": {"filename": "testMany.csv", "status": "ready", "type": "csv_record_validation", "submissionLocalId": 11, "fileType": 3},
"odd_characters": {"filename": "testOddCharacters.csv", "status": "ready", "type": "csv_record_validation", "submissionLocalId":14, "fileType": 2},
"many_bad": {"filename": "testManyBadValues.csv", "status": "ready", "type": "csv_record_validation", "submissionLocalId": 11, "fileType": 4},
"rules": {"filename": "testRules.csv", "status":"ready", "type": "csv_record_validation", "submissionLocalId": 16, "fileType": 3}
}
# Upload needed files to S3
for key in csvFiles.keys():
csvFiles[key]["s3Filename"] = cls.uploadFile(
csvFiles[key]["filename"], cls.userId)
jobIdDict = {}
for key in csvFiles.keys():
file = csvFiles[key]
job = cls.addJob(
str(jobTracker.getStatusId(file["status"])),
str(jobTracker.getTypeId(file["type"])),
str(submissionIDs[file["submissionLocalId"]]),
file["s3Filename"],
str(file["fileType"]),
jobTracker.session)
# TODO: fix statement below--does this error really happen?
if(job.job_id == None):
# Failed to commit job correctly
raise Exception(
"".join(["Job for ", str(key), " did not get an id back"]))
jobIdDict[key] = job.job_id
# Print submission IDs for error report checking
print("".join([str(key),": ",str(jobTracker.getSubmissionId(job.job_id)), ", "]), end = "")
# Create dependencies
dependencies = [
JobDependency(
job_id = str(jobIdDict["bad_prereq"]),
prerequisite_id = str(jobIdDict["bad_upload"])),
JobDependency(
job_id = str(jobIdDict["valid_prereq"]),
prerequisite_id = str(jobIdDict["valid_upload"]))
]
for dependency in dependencies:
jobTracker.session.add(dependency)
jobTracker.session.commit()
colIdDict = {}
for fileId in range(1, 5):
for columnId in range(1, 6):
#TODO: get rid of hard-coded surrogate keys
if columnId < 3:
fieldType = 1
else:
fieldType = 4
columnName = "header_{}".format(columnId)
column = cls.addFileColumn(
fileId, fieldType, columnName, "",
(columnId != 3), validationDb.session)
colIdDict["header_{}_file_type_{}".format(
columnId, fileId)] = column.file_column_id
rules = [
Rule(file_column_id = str(colIdDict["".join(["header_",str(1),"_file_type_",str(3)])]),rule_type_id = 5, rule_text_1 = 0, description = 'value 1 must be greater than zero', rule_timing_id=1),
Rule(file_column_id = str(colIdDict["".join(["header_",str(1),"_file_type_",str(3)])]),rule_type_id = 3, rule_text_1 = 13, description = 'value 1 may not be 13', rule_timing_id=1),
Rule(file_column_id = str(colIdDict["".join(["header_",str(5),"_file_type_",str(3)])]),rule_type_id = 1, rule_text_1 = "INT", description = 'value 5 must be an integer', rule_timing_id=1),
Rule(file_column_id = str(colIdDict["".join(["header_",str(3),"_file_type_",str(3)])]),rule_type_id = 2, rule_text_1 = 42, description = 'value 3 must be equal to 42 if present', rule_timing_id=1),
Rule(file_column_id = str(colIdDict["".join(["header_",str(1),"_file_type_",str(3)])]),rule_type_id = 4, rule_text_1 = 100, description = 'value 1 must be less than 100', rule_timing_id=1),
Rule(file_column_id = str(colIdDict["".join(["header_",str(1),"_file_type_",str(3)])]),rule_type_id = 2, rule_text_1 = " ", description = 'None shall pass', rule_timing_id=2) #This rule should never be checked with rule_timing 2
]
for rule in rules:
validationDb.session.add(rule)
validationDb.session.commit()
# If staging already has corresponding job tables, drop them
for k, v in jobIdDict.items():
try:
cls.stagingDb.dropTable("job{}".format(v))
except Exception as e:
cls.stagingDb.session.close()
cls.stagingDb.session = cls.stagingDb.Session()
cls.jobIdDict = jobIdDict
def test_valid_job(self):
"""Test valid job."""
jobId = self.jobIdDict["valid"]
response = self.run_test(
jobId, 200, "finished", 52, 1, "complete", 0, False)
def test_rules(self):
"""Test rules, should have one type failure and four value failures."""
jobId = self.jobIdDict["rules"]
response = self.run_test(
jobId, 200, "finished", 350, 1, "complete", 5, True)
def test_bad_values_job(self):
"""Test a job with bad values."""
jobId = self.jobIdDict["bad_values"]
response = self.run_test(
jobId, 200, "finished", 5474, 0, "complete", 90, True)
def test_many_bad_values_job(self):
# Test job with many bad values
if self.includeLongTests:
jobId = self.jobIdDict["many_bad"]
response = self.run_test(
jobId, 200, "finished", 151665643, 0, "complete", 2302930, True)
else:
self.skipTest("includeLongTests flag is off")
def test_mixed_job(self):
"""Test mixed job."""
jobId = self.jobIdDict["mixed"]
response = self.run_test(
jobId, 200, "finished", 99, 3, "complete", 1, True)
def test_empty(self):
"""Test empty file."""
jobId = self.jobIdDict["empty"]
if self.useThreads:
status = 200
else:
status = 400
response = self.run_test(
jobId, status, "invalid", False, False, "single_row_error", 0, False)
if not self.useThreads:
self.assertEqual(
response.json["message"], "CSV file must have a header")
def test_missing_header(self):
"""Test missing header in first row."""
jobId = self.jobIdDict["missing_header"]
if self.useThreads:
status = 200
else:
status = 400
response = self.run_test(
jobId, status, "invalid", False, False, "header_error", 0, False)
if not self.useThreads:
self.assertEqual(
response.json["message"], "Errors in header row")
def test_bad_header(self):
""" Ignore bad header value in first row, then fail on a duplicate header """
jobId = self.jobIdDict["bad_header"]
if self.useThreads:
status = 200
else:
status = 400
response = self.run_test(
jobId, status, "invalid", False, False, "header_error", 0, False)
if not self.useThreads:
self.assertEqual(
response.json["message"], "Errors in header row")
def test_many_rows(self):
"""Test many rows."""
if self.includeLongTests:
jobId = self.jobIdDict["many"]
response = self.run_test(
jobId, 200, "finished", 52, 22380, "complete", 0, False)
else:
self.skipTest("includeLongTests flag is off")
def test_odd_characters(self):
"""Test potentially problematic characters."""
jobId = self.jobIdDict["odd_characters"]
response = self.run_test(
jobId, 200, "finished", 99, 6, "complete", 1, True)
def test_bad_id_job(self):
"""Test job ID not found in job status table."""
jobId = -1
response = self.run_test(
jobId, 400, False, False, False, False, 0, None)
def test_prereq_job(self):
"""Test job with prerequisites finished."""
jobId = self.jobIdDict["valid_prereq"]
response = self.run_test(
jobId, 200, "finished", 52, 4, "complete", 0, False)
def test_bad_prereq_job(self):
"""Test job with unfinished prerequisites."""
jobId = self.jobIdDict["bad_prereq"]
response = self.run_test(
jobId, 400, "ready", False, False, "job_error", 0, None)
def test_bad_type_job(self):
"""Test job with wrong type."""
jobId = self.jobIdDict["wrong_type"]
response = self.run_test(
jobId, 400, "ready", False, False, "job_error", 0, None)
# TODO uncomment this unit test once jobs are labeled as ready
# def test_finished_job(self):
# """ Test job that is already finished """
# jobId = self.jobIdDict["finished"]
# self.run_test(jobId,400,"finished",False,False,"job_error",0)
def tearDown(self):
super(JobTests, self).tearDown()
# TODO: drop tables, etc.
if __name__ == '__main__':
unittest.main()
| cc0-1.0 |
bjuvensjo/scripts | vang/azdo/get_project.py | 1 | 2787 | #!/usr/bin/env python3
import argparse
import logging
from json import loads
from os import environ
from os.path import basename
from pprint import pprint
from sys import argv
from typing import Dict
from requests import get
from vang.azdo.list_projects import list_projects
logging.basicConfig(level=logging.ERROR)
logger = logging.getLogger(basename(__file__))
base_url = 'https://dev.azure.com'
def get_project_id(token: str, organization: str, project: str, verify_certificate: bool,
api_version: str = '6.1-preview.4') -> str:
for p in list_projects(token, organization, verify_certificate, api_version)['value']:
if p['name'] == project:
return p['id']
return 'project_not_found'
def get_project(token: str, organization: str, project: str, verify_certificate: bool = True,
api_version: str = '6.1-preview.4') -> Dict:
project_id = get_project_id(token, organization, project, verify_certificate, api_version)
url = f'{base_url}/{organization}/_apis/projects/{project_id}?api-version={api_version}'
params = {'url': url, 'auth': ('', token), 'verify': verify_certificate}
logger.info(f'params: {str(params).replace(token, "***")}')
response = get(**params)
logger.info(f'response.status_code: {response.status_code}')
logger.info(f'response.text: {response.text}')
response.raise_for_status()
return loads(response.text)
def parse_args(args): # pragma: no cover
parser = argparse.ArgumentParser(
description='Get project')
parser.add_argument(
'--token',
default=environ.get('AZDO_TOKEN', ''),
help='The Azure DevOps authorisation token')
parser.add_argument(
'--organisation',
default=environ.get('AZDO_ORGANISATION', ''),
help='The Azure DevOps organisation')
parser.add_argument(
'--project',
default=environ.get('AZDO_PROJECT', ''),
help='The Azure DevOps project')
parser.add_argument(
'-au',
'--azure_devops_url',
default='https://dev.azure.com',
help='The Azure DevOps REST API base url')
optional_group = parser.add_mutually_exclusive_group(required=False)
optional_group.add_argument(
'-i', '--project_id', action='store_true', help='Get only project id')
return parser.parse_args(args)
def main(token: str, organisation: str, project: str, azure_devops_url: str,
project_id: bool) -> None: # pragma: no cover
global base_url
base_url = azure_devops_url
project = get_project(token, organisation, project)
if not project_id:
pprint(project)
else:
print(project['id'])
if __name__ == '__main__': # pragma: no cover
main(**parse_args(argv[1:]).__dict__)
| apache-2.0 |
bayespy/bayespy | bayespy/demos/lssm.py | 5 | 10690 | ################################################################################
# Copyright (C) 2013-2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Demonstrate linear Gaussian state-space model.
Some of the functions in this module are re-usable:
* ``model`` can be used to construct the classical linear state-space model.
* ``infer`` can be used to apply linear state-space model to given data.
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
from bayespy.nodes import GaussianMarkovChain
from bayespy.nodes import Gaussian, GaussianARD
from bayespy.nodes import Gamma
from bayespy.nodes import SumMultiply
from bayespy.inference.vmp.nodes.gamma import diagonal
from bayespy.utils import random
from bayespy.inference.vmp.vmp import VB
from bayespy.inference.vmp import transformations
import bayespy.plot as bpplt
def model(M=10, N=100, D=3):
"""
Construct linear state-space model.
See, for instance, the following publication:
"Fast variational Bayesian linear state-space model"
Luttinen (ECML 2013)
"""
# Dynamics matrix with ARD
alpha = Gamma(1e-5,
1e-5,
plates=(D,),
name='alpha')
A = GaussianARD(0,
alpha,
shape=(D,),
plates=(D,),
plotter=bpplt.GaussianHintonPlotter(rows=0,
cols=1,
scale=0),
name='A')
A.initialize_from_value(np.identity(D))
# Latent states with dynamics
X = GaussianMarkovChain(np.zeros(D), # mean of x0
1e-3*np.identity(D), # prec of x0
A, # dynamics
np.ones(D), # innovation
n=N, # time instances
plotter=bpplt.GaussianMarkovChainPlotter(scale=2),
name='X')
X.initialize_from_value(np.random.randn(N,D))
# Mixing matrix from latent space to observation space using ARD
gamma = Gamma(1e-5,
1e-5,
plates=(D,),
name='gamma')
gamma.initialize_from_value(1e-2*np.ones(D))
C = GaussianARD(0,
gamma,
shape=(D,),
plates=(M,1),
plotter=bpplt.GaussianHintonPlotter(rows=0,
cols=2,
scale=0),
name='C')
C.initialize_from_value(np.random.randn(M,1,D))
# Observation noise
tau = Gamma(1e-5,
1e-5,
name='tau')
tau.initialize_from_value(1e2)
# Underlying noiseless function
F = SumMultiply('i,i',
C,
X,
name='F')
# Noisy observations
Y = GaussianARD(F,
tau,
name='Y')
Q = VB(Y, F, C, gamma, X, A, alpha, tau, C)
return Q
def infer(y, D,
mask=True,
maxiter=100,
rotate=True,
debug=False,
precompute=False,
update_hyper=0,
start_rotating=0,
plot_C=True,
monitor=True,
autosave=None):
"""
Apply linear state-space model for the given data.
"""
(M, N) = np.shape(y)
# Construct the model
Q = model(M, N, D)
if not plot_C:
Q['C'].set_plotter(None)
if autosave is not None:
Q.set_autosave(autosave, iterations=10)
# Observe data
Q['Y'].observe(y, mask=mask)
# Set up rotation speed-up
if rotate:
# Initial rotate the D-dimensional state space (X, A, C)
# Does not update hyperparameters
rotA_init = transformations.RotateGaussianARD(Q['A'],
axis=0,
precompute=precompute)
rotX_init = transformations.RotateGaussianMarkovChain(Q['X'],
rotA_init)
rotC_init = transformations.RotateGaussianARD(Q['C'],
axis=0,
precompute=precompute)
R_X_init = transformations.RotationOptimizer(rotX_init, rotC_init, D)
# Rotate the D-dimensional state space (X, A, C)
rotA = transformations.RotateGaussianARD(Q['A'],
Q['alpha'],
axis=0,
precompute=precompute)
rotX = transformations.RotateGaussianMarkovChain(Q['X'],
rotA)
rotC = transformations.RotateGaussianARD(Q['C'],
Q['gamma'],
axis=0,
precompute=precompute)
R_X = transformations.RotationOptimizer(rotX, rotC, D)
# Keyword arguments for the rotation
if debug:
rotate_kwargs = {'maxiter': 10,
'check_bound': True,
'check_gradient': True}
else:
rotate_kwargs = {'maxiter': 10}
# Plot initial distributions
if monitor:
Q.plot()
# Run inference using rotations
for ind in range(maxiter):
if ind < update_hyper:
# It might be a good idea to learn the lower level nodes a bit
# before starting to learn the upper level nodes.
Q.update('X', 'C', 'A', 'tau', plot=monitor)
if rotate and ind >= start_rotating:
# Use the rotation which does not update alpha nor beta
R_X_init.rotate(**rotate_kwargs)
else:
Q.update(plot=monitor)
if rotate and ind >= start_rotating:
# It might be a good idea to not rotate immediately because it
# might lead to pruning out components too efficiently before
# even estimating them roughly
R_X.rotate(**rotate_kwargs)
# Return the posterior approximation
return Q
def simulate_data(M, N):
"""
Generate a dataset using linear state-space model.
The process has two latent oscillation components and one random walk
component.
"""
# Simulate some data
D = 3
c = np.random.randn(M, D)
w = 0.3
a = np.array([[np.cos(w), -np.sin(w), 0],
[np.sin(w), np.cos(w), 0],
[0, 0, 1]])
x = np.empty((N,D))
f = np.empty((M,N))
y = np.empty((M,N))
x[0] = 10*np.random.randn(D)
f[:,0] = np.dot(c,x[0])
y[:,0] = f[:,0] + 3*np.random.randn(M)
for n in range(N-1):
x[n+1] = np.dot(a,x[n]) + np.random.randn(D)
f[:,n+1] = np.dot(c,x[n+1])
y[:,n+1] = f[:,n+1] + 3*np.random.randn(M)
return (y, f)
@bpplt.interactive
def demo(M=6, N=200, D=3, maxiter=100, debug=False, seed=42, rotate=True,
precompute=False, plot=True, monitor=True):
"""
Run the demo for linear state-space model.
"""
# Use deterministic random numbers
if seed is not None:
np.random.seed(seed)
# Get data
(y, f) = simulate_data(M, N)
# Add missing values randomly
mask = random.mask(M, N, p=0.3)
# Add missing values to a period of time
mask[:,30:80] = False
y[~mask] = np.nan # BayesPy doesn't require this. Just for plotting.
# Run inference
Q = infer(y, D,
mask=mask,
rotate=rotate,
debug=debug,
monitor=monitor,
maxiter=maxiter)
if plot:
# Show results
plt.figure()
bpplt.timeseries_normal(Q['F'], scale=2)
bpplt.timeseries(f, linestyle='-', color='b')
bpplt.timeseries(y, linestyle='None', color='r', marker='.')
if __name__ == '__main__':
import sys, getopt, os
try:
opts, args = getopt.getopt(sys.argv[1:],
"",
["m=",
"n=",
"d=",
"seed=",
"maxiter=",
"debug",
"precompute",
"no-plot",
"no-monitor",
"no-rotation"])
except getopt.GetoptError:
print('python lssm.py <options>')
print('--m=<INT> Dimensionality of data vectors')
print('--n=<INT> Number of data vectors')
print('--d=<INT> Dimensionality of the latent vectors in the model')
print('--no-rotation Do not apply speed-up rotations')
print('--maxiter=<INT> Maximum number of VB iterations')
print('--seed=<INT> Seed (integer) for the random number generator')
print('--debug Check that the rotations are implemented correctly')
print('--no-plot Do not plot the results')
print('--no-monitor Do not plot distributions during learning')
print('--precompute Precompute some moments when rotating. May '
'speed up or slow down.')
sys.exit(2)
kwargs = {}
for opt, arg in opts:
if opt == "--no-rotation":
kwargs["rotate"] = False
elif opt == "--maxiter":
kwargs["maxiter"] = int(arg)
elif opt == "--debug":
kwargs["debug"] = True
elif opt == "--precompute":
kwargs["precompute"] = True
elif opt == "--seed":
kwargs["seed"] = int(arg)
elif opt in ("--m",):
kwargs["M"] = int(arg)
elif opt in ("--n",):
kwargs["N"] = int(arg)
elif opt in ("--d",):
kwargs["D"] = int(arg)
elif opt in ("--no-plot"):
kwargs["plot"] = False
elif opt in ("--no-monitor"):
kwargs["monitor"] = False
else:
raise ValueError("Unhandled option given")
demo(**kwargs)
plt.show()
| mit |
yencarnacion/jaikuengine | .google_appengine/lib/django-1.4/django/core/management/validation.py | 79 | 19846 | import sys
from django.core.management.color import color_style
from django.utils.itercompat import is_iterable
class ModelErrorCollection:
def __init__(self, outfile=sys.stdout):
self.errors = []
self.outfile = outfile
self.style = color_style()
def add(self, context, error):
self.errors.append((context, error))
self.outfile.write(self.style.ERROR("%s: %s\n" % (context, error)))
def get_validation_errors(outfile, app=None):
"""
Validates all models that are part of the specified app. If no app name is provided,
validates all models of all installed apps. Writes errors, if any, to outfile.
Returns number of errors.
"""
from django.conf import settings
from django.db import models, connection
from django.db.models.loading import get_app_errors
from django.db.models.fields.related import RelatedObject
from django.db.models.deletion import SET_NULL, SET_DEFAULT
e = ModelErrorCollection(outfile)
for (app_name, error) in get_app_errors().items():
e.add(app_name, error)
for cls in models.get_models(app):
opts = cls._meta
# Do field-specific validation.
for f in opts.local_fields:
if f.name == 'id' and not f.primary_key and opts.pk.name == 'id':
e.add(opts, '"%s": You can\'t use "id" as a field name, because each model automatically gets an "id" field if none of the fields have primary_key=True. You need to either remove/rename your "id" field or add primary_key=True to a field.' % f.name)
if f.name.endswith('_'):
e.add(opts, '"%s": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.' % f.name)
if (f.primary_key and f.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
e.add(opts, '"%s": Primary key fields cannot have null=True.' % f.name)
if isinstance(f, models.CharField):
try:
max_length = int(f.max_length)
if max_length <= 0:
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
except (ValueError, TypeError):
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
if isinstance(f, models.DecimalField):
decimalp_ok, mdigits_ok = False, False
decimalp_msg ='"%s": DecimalFields require a "decimal_places" attribute that is a non-negative integer.'
try:
decimal_places = int(f.decimal_places)
if decimal_places < 0:
e.add(opts, decimalp_msg % f.name)
else:
decimalp_ok = True
except (ValueError, TypeError):
e.add(opts, decimalp_msg % f.name)
mdigits_msg = '"%s": DecimalFields require a "max_digits" attribute that is a positive integer.'
try:
max_digits = int(f.max_digits)
if max_digits <= 0:
e.add(opts, mdigits_msg % f.name)
else:
mdigits_ok = True
except (ValueError, TypeError):
e.add(opts, mdigits_msg % f.name)
invalid_values_msg = '"%s": DecimalFields require a "max_digits" attribute value that is greater than or equal to the value of the "decimal_places" attribute.'
if decimalp_ok and mdigits_ok:
if decimal_places > max_digits:
e.add(opts, invalid_values_msg % f.name)
if isinstance(f, models.FileField) and not f.upload_to:
e.add(opts, '"%s": FileFields require an "upload_to" attribute.' % f.name)
if isinstance(f, models.ImageField):
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image
except ImportError:
try:
import Image
except ImportError:
e.add(opts, '"%s": To use ImageFields, you need to install the Python Imaging Library. Get it at http://www.pythonware.com/products/pil/ .' % f.name)
if isinstance(f, models.BooleanField) and getattr(f, 'null', False):
e.add(opts, '"%s": BooleanFields do not accept null values. Use a NullBooleanField instead.' % f.name)
if f.choices:
if isinstance(f.choices, basestring) or not is_iterable(f.choices):
e.add(opts, '"%s": "choices" should be iterable (e.g., a tuple or list).' % f.name)
else:
for c in f.choices:
if not isinstance(c, (list, tuple)) or len(c) != 2:
e.add(opts, '"%s": "choices" should be a sequence of two-tuples.' % f.name)
if f.db_index not in (None, True, False):
e.add(opts, '"%s": "db_index" should be either None, True or False.' % f.name)
# Perform any backend-specific field validation.
connection.validation.validate_field(e, opts, f)
# Check if the on_delete behavior is sane
if f.rel and hasattr(f.rel, 'on_delete'):
if f.rel.on_delete == SET_NULL and not f.null:
e.add(opts, "'%s' specifies on_delete=SET_NULL, but cannot be null." % f.name)
elif f.rel.on_delete == SET_DEFAULT and not f.has_default():
e.add(opts, "'%s' specifies on_delete=SET_DEFAULT, but has no default value." % f.name)
# Check to see if the related field will clash with any existing
# fields, m2m fields, m2m related objects or related objects
if f.rel:
if f.rel.to not in models.get_models():
e.add(opts, "'%s' has a relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, (str, unicode)):
continue
# Make sure the related field specified by a ForeignKey is unique
if not f.rel.to._meta.get_field(f.rel.field_name).unique:
e.add(opts, "Field '%s' under model '%s' must have a unique=True constraint." % (f.rel.field_name, f.rel.to.__name__))
rel_opts = f.rel.to._meta
rel_name = RelatedObject(f.rel.to, cls, f).get_accessor_name()
rel_query_name = f.related_query_name()
if not f.rel.is_hidden():
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
seen_intermediary_signatures = []
for i, f in enumerate(opts.local_many_to_many):
# Check to see if the related m2m field will clash with any
# existing fields, m2m fields, m2m related objects or related
# objects
if f.rel.to not in models.get_models():
e.add(opts, "'%s' has an m2m relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, (str, unicode)):
continue
# Check that the field is not set to unique. ManyToManyFields do not support unique.
if f.unique:
e.add(opts, "ManyToManyFields cannot be unique. Remove the unique argument on '%s'." % f.name)
if f.rel.through is not None and not isinstance(f.rel.through, basestring):
from_model, to_model = cls, f.rel.to
if from_model == to_model and f.rel.symmetrical and not f.rel.through._meta.auto_created:
e.add(opts, "Many-to-many fields with intermediate tables cannot be symmetrical.")
seen_from, seen_to, seen_self = False, False, 0
for inter_field in f.rel.through._meta.fields:
rel_to = getattr(inter_field.rel, 'to', None)
if from_model == to_model: # relation to self
if rel_to == from_model:
seen_self += 1
if seen_self > 2:
e.add(opts, "Intermediary model %s has more than "
"two foreign keys to %s, which is ambiguous "
"and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
if rel_to == from_model:
if seen_from:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
seen_from = True
elif rel_to == to_model:
if seen_to:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
rel_to._meta.object_name
)
)
else:
seen_to = True
if f.rel.through not in models.get_models(include_auto_created=True):
e.add(opts, "'%s' specifies an m2m relation through model "
"%s, which has not been installed." % (f.name, f.rel.through)
)
signature = (f.rel.to, cls, f.rel.through)
if signature in seen_intermediary_signatures:
e.add(opts, "The model %s has two manually-defined m2m "
"relations through the model %s, which is not "
"permitted. Please consider using an extra field on "
"your intermediary model instead." % (
cls._meta.object_name,
f.rel.through._meta.object_name
)
)
else:
seen_intermediary_signatures.append(signature)
if not f.rel.through._meta.auto_created:
seen_related_fk, seen_this_fk = False, False
for field in f.rel.through._meta.fields:
if field.rel:
if not seen_related_fk and field.rel.to == f.rel.to:
seen_related_fk = True
elif field.rel.to == cls:
seen_this_fk = True
if not seen_related_fk or not seen_this_fk:
e.add(opts, "'%s' is a manually-defined m2m relation "
"through model %s, which does not have foreign keys "
"to %s and %s" % (f.name, f.rel.through._meta.object_name,
f.rel.to._meta.object_name, cls._meta.object_name)
)
elif isinstance(f.rel.through, basestring):
e.add(opts, "'%s' specifies an m2m relation through model %s, "
"which has not been installed" % (f.name, f.rel.through)
)
rel_opts = f.rel.to._meta
rel_name = RelatedObject(f.rel.to, cls, f).get_accessor_name()
rel_query_name = f.related_query_name()
# If rel_name is none, there is no reverse accessor (this only
# occurs for symmetrical m2m relations to self). If this is the
# case, there are no clashes to check for this field, as there are
# no reverse descriptors for this field.
if rel_name is not None:
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
# Check ordering attribute.
if opts.ordering:
for field_name in opts.ordering:
if field_name == '?': continue
if field_name.startswith('-'):
field_name = field_name[1:]
if opts.order_with_respect_to and field_name == '_order':
continue
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
if '__' in field_name:
continue
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
if field_name == 'pk':
continue
try:
opts.get_field(field_name, many_to_many=False)
except models.FieldDoesNotExist:
e.add(opts, '"ordering" refers to "%s", a field that doesn\'t exist.' % field_name)
# Check unique_together.
for ut in opts.unique_together:
for field_name in ut:
try:
f = opts.get_field(field_name, many_to_many=True)
except models.FieldDoesNotExist:
e.add(opts, '"unique_together" refers to %s, a field that doesn\'t exist. Check your syntax.' % field_name)
else:
if isinstance(f.rel, models.ManyToManyRel):
e.add(opts, '"unique_together" refers to %s. ManyToManyFields are not supported in unique_together.' % f.name)
if f not in opts.local_fields:
e.add(opts, '"unique_together" refers to %s. This is not in the same model as the unique_together statement.' % f.name)
return len(e.errors)
| apache-2.0 |
EPDCenter/android_kernel_archos_97_titan | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
AltSchool/django-allauth | allauth/socialaccount/providers/linkedin/views.py | 1 | 2354 | from xml.etree import ElementTree
from xml.parsers.expat import ExpatError
from allauth.compat import six
from allauth.socialaccount import providers
from allauth.socialaccount.providers.oauth.client import OAuth
from allauth.socialaccount.providers.oauth.views import (
OAuthAdapter,
OAuthCallbackView,
OAuthLoginView,
)
from .provider import LinkedInProvider
class LinkedInAPI(OAuth):
url = 'https://api.linkedin.com/v1/people/~'
def get_user_info(self):
fields = providers.registry \
.by_id(LinkedInProvider.id, self.request) \
.get_profile_fields()
url = self.url + ':(%s)' % ','.join(fields)
raw_xml = self.query(url)
if not six.PY3:
raw_xml = raw_xml.encode('utf8')
try:
return self.to_dict(ElementTree.fromstring(raw_xml))
except (ExpatError, KeyError, IndexError):
return None
def to_dict(self, xml):
"""
Convert XML structure to dict recursively, repeated keys
entries are returned as in list containers.
"""
children = list(xml)
if not children:
return xml.text
else:
out = {}
for node in list(xml):
if node.tag in out:
if not isinstance(out[node.tag], list):
out[node.tag] = [out[node.tag]]
out[node.tag].append(self.to_dict(node))
else:
out[node.tag] = self.to_dict(node)
return out
class LinkedInOAuthAdapter(OAuthAdapter):
provider_id = LinkedInProvider.id
request_token_url = 'https://api.linkedin.com/uas/oauth/requestToken'
access_token_url = 'https://api.linkedin.com/uas/oauth/accessToken'
authorize_url = 'https://www.linkedin.com/uas/oauth/authenticate'
def complete_login(self, request, app, token, response):
client = LinkedInAPI(request, app.client_id, app.secret,
self.request_token_url)
extra_data = client.get_user_info()
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth_login = OAuthLoginView.adapter_view(LinkedInOAuthAdapter)
oauth_callback = OAuthCallbackView.adapter_view(LinkedInOAuthAdapter)
| mit |
santoshbanda/mysql-5.6 | xtrabackup/test/python/testtools/tests/test_compat.py | 62 | 9528 | # Copyright (c) 2010 testtools developers. See LICENSE for details.
"""Tests for miscellaneous compatibility functions"""
import linecache
import os
import sys
import tempfile
import traceback
import testtools
from testtools.compat import (
_b,
_detect_encoding,
_get_source_encoding,
_u,
unicode_output_stream,
)
from testtools.matchers import (
MatchesException,
Not,
Raises,
)
class TestDetectEncoding(testtools.TestCase):
"""Test detection of Python source encodings"""
def _check_encoding(self, expected, lines, possibly_invalid=False):
"""Check lines are valid Python and encoding is as expected"""
if not possibly_invalid:
compile(_b("".join(lines)), "<str>", "exec")
encoding = _detect_encoding(lines)
self.assertEqual(expected, encoding,
"Encoding %r expected but got %r from lines %r" %
(expected, encoding, lines))
def test_examples_from_pep(self):
"""Check the examples given in PEP 263 all work as specified
See 'Examples' section of <http://www.python.org/dev/peps/pep-0263/>
"""
# With interpreter binary and using Emacs style file encoding comment:
self._check_encoding("latin-1", (
"#!/usr/bin/python\n",
"# -*- coding: latin-1 -*-\n",
"import os, sys\n"))
self._check_encoding("iso-8859-15", (
"#!/usr/bin/python\n",
"# -*- coding: iso-8859-15 -*-\n",
"import os, sys\n"))
self._check_encoding("ascii", (
"#!/usr/bin/python\n",
"# -*- coding: ascii -*-\n",
"import os, sys\n"))
# Without interpreter line, using plain text:
self._check_encoding("utf-8", (
"# This Python file uses the following encoding: utf-8\n",
"import os, sys\n"))
# Text editors might have different ways of defining the file's
# encoding, e.g.
self._check_encoding("latin-1", (
"#!/usr/local/bin/python\n",
"# coding: latin-1\n",
"import os, sys\n"))
# Without encoding comment, Python's parser will assume ASCII text:
self._check_encoding("ascii", (
"#!/usr/local/bin/python\n",
"import os, sys\n"))
# Encoding comments which don't work:
# Missing "coding:" prefix:
self._check_encoding("ascii", (
"#!/usr/local/bin/python\n",
"# latin-1\n",
"import os, sys\n"))
# Encoding comment not on line 1 or 2:
self._check_encoding("ascii", (
"#!/usr/local/bin/python\n",
"#\n",
"# -*- coding: latin-1 -*-\n",
"import os, sys\n"))
# Unsupported encoding:
self._check_encoding("ascii", (
"#!/usr/local/bin/python\n",
"# -*- coding: utf-42 -*-\n",
"import os, sys\n"),
possibly_invalid=True)
def test_bom(self):
"""Test the UTF-8 BOM counts as an encoding declaration"""
self._check_encoding("utf-8", (
"\xef\xbb\xbfimport sys\n",
))
self._check_encoding("utf-8", (
"\xef\xbb\xbf# File encoding: UTF-8\n",
))
self._check_encoding("utf-8", (
'\xef\xbb\xbf"""Module docstring\n',
'\xef\xbb\xbfThat should just be a ZWNB"""\n'))
self._check_encoding("latin-1", (
'"""Is this coding: latin-1 or coding: utf-8 instead?\n',
'\xef\xbb\xbfThose should be latin-1 bytes"""\n'))
self._check_encoding("utf-8", (
"\xef\xbb\xbf# Is the coding: utf-8 or coding: euc-jp instead?\n",
'"""Module docstring say \xe2\x98\x86"""\n'))
def test_multiple_coding_comments(self):
"""Test only the first of multiple coding declarations counts"""
self._check_encoding("iso-8859-1", (
"# Is the coding: iso-8859-1\n",
"# Or is it coding: iso-8859-2\n"),
possibly_invalid=True)
self._check_encoding("iso-8859-1", (
"#!/usr/bin/python\n",
"# Is the coding: iso-8859-1\n",
"# Or is it coding: iso-8859-2\n"))
self._check_encoding("iso-8859-1", (
"# Is the coding: iso-8859-1 or coding: iso-8859-2\n",
"# Or coding: iso-8859-3 or coding: iso-8859-4\n"),
possibly_invalid=True)
self._check_encoding("iso-8859-2", (
"# Is the coding iso-8859-1 or coding: iso-8859-2\n",
"# Spot the missing colon above\n"))
class TestGetSourceEncoding(testtools.TestCase):
"""Test reading and caching the encodings of source files"""
def setUp(self):
testtools.TestCase.setUp(self)
dir = tempfile.mkdtemp()
self.addCleanup(os.rmdir, dir)
self.filename = os.path.join(dir, self.id().rsplit(".", 1)[1] + ".py")
self._written = False
def put_source(self, text):
f = open(self.filename, "w")
try:
f.write(text)
finally:
f.close()
if not self._written:
self._written = True
self.addCleanup(os.remove, self.filename)
self.addCleanup(linecache.cache.pop, self.filename, None)
def test_nonexistant_file_as_ascii(self):
"""When file can't be found, the encoding should default to ascii"""
self.assertEquals("ascii", _get_source_encoding(self.filename))
def test_encoding_is_cached(self):
"""The encoding should stay the same if the cache isn't invalidated"""
self.put_source(
"# coding: iso-8859-13\n"
"import os\n")
self.assertEquals("iso-8859-13", _get_source_encoding(self.filename))
self.put_source(
"# coding: rot-13\n"
"vzcbeg bf\n")
self.assertEquals("iso-8859-13", _get_source_encoding(self.filename))
def test_traceback_rechecks_encoding(self):
"""A traceback function checks the cache and resets the encoding"""
self.put_source(
"# coding: iso-8859-8\n"
"import os\n")
self.assertEquals("iso-8859-8", _get_source_encoding(self.filename))
self.put_source(
"# coding: utf-8\n"
"import os\n")
try:
exec (compile("raise RuntimeError\n", self.filename, "exec"))
except RuntimeError:
traceback.extract_tb(sys.exc_info()[2])
else:
self.fail("RuntimeError not raised")
self.assertEquals("utf-8", _get_source_encoding(self.filename))
class _FakeOutputStream(object):
"""A simple file-like object for testing"""
def __init__(self):
self.writelog = []
def write(self, obj):
self.writelog.append(obj)
class TestUnicodeOutputStream(testtools.TestCase):
"""Test wrapping output streams so they work with arbitrary unicode"""
uni = _u("pa\u026a\u03b8\u0259n")
def setUp(self):
super(TestUnicodeOutputStream, self).setUp()
if sys.platform == "cli":
self.skip("IronPython shouldn't wrap streams to do encoding")
def test_no_encoding_becomes_ascii(self):
"""A stream with no encoding attribute gets ascii/replace strings"""
sout = _FakeOutputStream()
unicode_output_stream(sout).write(self.uni)
self.assertEqual([_b("pa???n")], sout.writelog)
def test_encoding_as_none_becomes_ascii(self):
"""A stream with encoding value of None gets ascii/replace strings"""
sout = _FakeOutputStream()
sout.encoding = None
unicode_output_stream(sout).write(self.uni)
self.assertEqual([_b("pa???n")], sout.writelog)
def test_bogus_encoding_becomes_ascii(self):
"""A stream with a bogus encoding gets ascii/replace strings"""
sout = _FakeOutputStream()
sout.encoding = "bogus"
unicode_output_stream(sout).write(self.uni)
self.assertEqual([_b("pa???n")], sout.writelog)
def test_partial_encoding_replace(self):
"""A string which can be partly encoded correctly should be"""
sout = _FakeOutputStream()
sout.encoding = "iso-8859-7"
unicode_output_stream(sout).write(self.uni)
self.assertEqual([_b("pa?\xe8?n")], sout.writelog)
def test_unicode_encodings_not_wrapped(self):
"""A unicode encoding is left unwrapped as needs no error handler"""
sout = _FakeOutputStream()
sout.encoding = "utf-8"
self.assertIs(unicode_output_stream(sout), sout)
sout = _FakeOutputStream()
sout.encoding = "utf-16-be"
self.assertIs(unicode_output_stream(sout), sout)
def test_stringio(self):
"""A StringIO object should maybe get an ascii native str type"""
try:
from cStringIO import StringIO
newio = False
except ImportError:
from io import StringIO
newio = True
sout = StringIO()
soutwrapper = unicode_output_stream(sout)
if newio:
self.expectFailure("Python 3 StringIO expects text not bytes",
self.assertThat, lambda: soutwrapper.write(self.uni),
Not(Raises(MatchesException(TypeError))))
soutwrapper.write(self.uni)
self.assertEqual("pa???n", sout.getvalue())
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
| gpl-2.0 |
barseghyanartur/oauth2app | tests/testsite/apps/api/tests/granttype.py | 4 | 1894 | #-*- coding: utf-8 -*-
try: import simplejson as json
except ImportError: import json
from base64 import b64encode
from django.utils import unittest
from django.contrib.auth.models import User
from oauth2app.models import Client
from django.test.client import Client as DjangoTestClient
USER_USERNAME = "testuser"
USER_PASSWORD = "testpassword"
USER_EMAIL = "[email protected]"
USER_FIRSTNAME = "Foo"
USER_LASTNAME = "Bar"
CLIENT_USERNAME = "client"
CLIENT_EMAIL = "[email protected]"
REDIRECT_URI = "http://example.com/callback"
class GrantTypeTestCase(unittest.TestCase):
user = None
client_holder = None
client_application = None
def setUp(self):
self.user = User.objects.create_user(
USER_USERNAME,
USER_EMAIL,
USER_PASSWORD)
self.user.first_name = USER_FIRSTNAME
self.user.last_name = USER_LASTNAME
self.user.save()
self.client = User.objects.create_user(CLIENT_USERNAME, CLIENT_EMAIL)
self.client_application = Client.objects.create(
name="TestApplication",
user=self.client)
def tearDown(self):
self.user.delete()
self.client.delete()
self.client_application.delete()
def test_00_grant_type_client_credentials(self):
user = DjangoTestClient()
user.login(username=USER_USERNAME, password=USER_PASSWORD)
client = DjangoTestClient()
parameters = {
"client_id": self.client_application.key,
"grant_type": "client_credentials",
"redirect_uri": REDIRECT_URI}
basic_auth = b64encode("%s:%s" % (self.client_application.key,
self.client_application.secret))
response = client.get(
"/oauth2/token",
parameters,
HTTP_AUTHORIZATION="Basic %s" % basic_auth)
token = json.loads(response.content)
| mit |
gnowledge/ISON | objectapp/settings.py | 3 | 6704 | # Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This project incorporates work covered by the following copyright and permission notice:
# Copyright (c) 2009, Julien Fache
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""Settings of Objectapp"""
from django.conf import settings
PING_DIRECTORIES = getattr(settings, 'OBJECTAPP_PING_DIRECTORIES',
('http://django-blog-objectapp.com/xmlrpc/',))
SAVE_PING_DIRECTORIES = getattr(settings, 'OBJECTAPP_SAVE_PING_DIRECTORIES',
bool(PING_DIRECTORIES))
SAVE_PING_EXTERNAL_URLS = getattr(settings, 'OBJECTAPP_PING_EXTERNAL_URLS', True)
COPYRIGHT = getattr(settings, 'OBJECTAPP_COPYRIGHT', 'Objectapp')
PAGINATION = getattr(settings, 'OBJECTAPP_PAGINATION', 10)
ALLOW_EMPTY = getattr(settings, 'OBJECTAPP_ALLOW_EMPTY', True)
ALLOW_FUTURE = getattr(settings, 'OBJECTAPP_ALLOW_FUTURE', True)
GBOBJECT_TEMPLATES = getattr(settings, 'OBJECTAPP_GBOBJECT_TEMPLATES', [])
GBOBJECT_BASE_MODEL = getattr(settings, 'OBJECTAPP_GBOBJECT_BASE_MODEL', '')
MARKUP_LANGUAGE = getattr(settings, 'OBJECTAPP_MARKUP_LANGUAGE', 'html')
MARKDOWN_EXTENSIONS = getattr(settings, 'OBJECTAPP_MARKDOWN_EXTENSIONS', '')
WYSIWYG_MARKUP_MAPPING = {
'textile': 'markitup',
'markdown': 'markitup',
'restructuredtext': 'markitup',
'html': 'tinymce' in settings.INSTALLED_APPS and 'tinymce' or 'wymeditor'}
WYSIWYG = getattr(settings, 'OBJECTAPP_WYSIWYG',
WYSIWYG_MARKUP_MAPPING.get(MARKUP_LANGUAGE))
AUTO_CLOSE_COMMENTS_AFTER = getattr(
settings, 'OBJECTAPP_AUTO_CLOSE_COMMENTS_AFTER', None)
AUTO_MODERATE_COMMENTS = getattr(settings, 'OBJECTAPP_AUTO_MODERATE_COMMENTS',
False)
MAIL_COMMENT_REPLY = getattr(settings, 'OBJECTAPP_MAIL_COMMENT_REPLY', False)
MAIL_COMMENT_AUTHORS = getattr(settings, 'OBJECTAPP_MAIL_COMMENT_AUTHORS', True)
MAIL_COMMENT_NOTIFICATION_RECIPIENTS = getattr(
settings, 'OBJECTAPP_MAIL_COMMENT_NOTIFICATION_RECIPIENTS',
[manager_tuple[1] for manager_tuple in settings.MANAGERS])
UPLOAD_TO = getattr(settings, 'OBJECTAPP_UPLOAD_TO', 'uploads')
PROTOCOL = getattr(settings, 'OBJECTAPP_PROTOCOL', 'http')
FEEDS_FORMAT = getattr(settings, 'OBJECTAPP_FEEDS_FORMAT', 'rss')
FEEDS_MAX_ITEMS = getattr(settings, 'OBJECTAPP_FEEDS_MAX_ITEMS', 15)
PINGBACK_CONTENT_LENGTH = getattr(settings,
'OBJECTAPP_PINGBACK_CONTENT_LENGTH', 300)
F_MIN = getattr(settings, 'OBJECTAPP_F_MIN', 0.1)
F_MAX = getattr(settings, 'OBJECTAPP_F_MAX', 1.0)
SPAM_CHECKER_BACKENDS = getattr(settings, 'OBJECTAPP_SPAM_CHECKER_BACKENDS',
())
URL_SHORTENER_BACKEND = getattr(settings, 'OBJECTAPP_URL_SHORTENER_BACKEND',
'objectapp.url_shortener.backends.default')
STOP_WORDS = getattr(settings, 'OBJECTAPP_STOP_WORDS',
('able', 'about', 'across', 'after', 'all', 'almost',
'also', 'among', 'and', 'any', 'are', 'because', 'been',
'but', 'can', 'cannot', 'could', 'dear', 'did', 'does',
'either', 'else', 'ever', 'every', 'for', 'from', 'get',
'got', 'had', 'has', 'have', 'her', 'hers', 'him', 'his',
'how', 'however', 'into', 'its', 'just', 'least', 'let',
'like', 'likely', 'may', 'might', 'most', 'must',
'neither', 'nor', 'not', 'off', 'often', 'only', 'other',
'our', 'own', 'rather', 'said', 'say', 'says', 'she',
'should', 'since', 'some', 'than', 'that', 'the',
'their', 'them', 'then', 'there', 'these', 'they',
'this', 'tis', 'too', 'twas', 'wants', 'was', 'were',
'what', 'when', 'where', 'which', 'while', 'who', 'whom',
'why', 'will', 'with', 'would', 'yet', 'you', 'your'))
TWITTER_CONSUMER_KEY = getattr(settings, 'TWITTER_CONSUMER_KEY', '')
TWITTER_CONSUMER_SECRET = getattr(settings, 'TWITTER_CONSUMER_SECRET', '')
TWITTER_ACCESS_KEY = getattr(settings, 'TWITTER_ACCESS_KEY', '')
TWITTER_ACCESS_SECRET = getattr(settings, 'TWITTER_ACCESS_SECRET', '')
USE_TWITTER = getattr(settings, 'OBJECTAPP_USE_TWITTER',
bool(TWITTER_ACCESS_KEY and TWITTER_ACCESS_SECRET and \
TWITTER_CONSUMER_KEY and TWITTER_CONSUMER_SECRET))
OBJECTAPP_VERSIONING = True
| agpl-3.0 |
bboalimoe/ndn-cache-policy | docs/sphinx-contrib/erlangdomain/test/conf.py | 3 | 7112 | # -*- coding: utf-8 -*-
#
# sphinxcontrib-rubydomain-acceptancetest documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 25 13:27:18 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinxcontrib.erlangdomain']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sphinxcontrib-rubydomain-acceptancetest'
copyright = u'2010, SHIBUKAWA Yoshiki'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinxcontrib-rubydomain-acceptancetestdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sphinxcontrib-rubydomain-acceptancetest.tex', u'sphinxcontrib-rubydomain-acceptancetest Documentation',
u'SHIBUKAWA Yoshiki', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sphinxcontrib-rubydomain-acceptancetest', u'sphinxcontrib-rubydomain-acceptancetest Documentation',
[u'SHIBUKAWA Yoshiki'], 1)
]
| gpl-3.0 |
Observer-Wu/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/model/svnrevision.py | 143 | 1735 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import db
class SVNRevision(db.Model):
number = db.IntegerProperty()
broken_bots = db.StringListProperty(default=[])
date = db.DateTimeProperty(auto_now_add=True)
| bsd-3-clause |
systers/mailman | src/mailman/rules/tests/test_moderation.py | 4 | 5825 | # Copyright (C) 2014-2015 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Test the `member-moderation` and `nonmember-moderation` rules."""
__all__ = [
'TestModeration',
]
import unittest
from mailman.app.lifecycle import create_list
from mailman.interfaces.action import Action
from mailman.interfaces.member import MemberRole
from mailman.interfaces.usermanager import IUserManager
from mailman.rules import moderation
from mailman.testing.helpers import specialized_message_from_string as mfs
from mailman.testing.layers import ConfigLayer
from zope.component import getUtility
class TestModeration(unittest.TestCase):
"""Test the approved handler."""
layer = ConfigLayer
def setUp(self):
self._mlist = create_list('[email protected]')
def test_member_and_nonmember(self):
user_manager = getUtility(IUserManager)
anne = user_manager.create_address('[email protected]')
user_manager.create_address('[email protected]')
self._mlist.subscribe(anne, MemberRole.member)
rule = moderation.NonmemberModeration()
msg = mfs("""\
From: [email protected]
Sender: [email protected]
To: [email protected]
Subject: A test message
Message-ID: <ant>
MIME-Version: 1.0
A message body.
""")
# Both Anne and Bill are in the message's senders list.
self.assertIn('[email protected]', msg.senders)
self.assertIn('[email protected]', msg.senders)
# The NonmemberModeration rule should *not* hit, because even though
# Bill is in the list of senders he is not a member of the mailing
# list. Anne is also in the list of senders and she *is* a member, so
# she takes precedence.
result = rule.check(self._mlist, msg, {})
self.assertFalse(result, 'NonmemberModeration rule should not hit')
# After the rule runs, Bill becomes a non-member.
bill_member = self._mlist.nonmembers.get_member('[email protected]')
self.assertIsNotNone(bill_member)
# Bill is not a member.
bill_member = self._mlist.members.get_member('[email protected]')
self.assertIsNone(bill_member)
def test_moderation_reason(self):
# When a message is moderated, a reason is added to the metadata.
user_manager = getUtility(IUserManager)
anne = user_manager.create_address('[email protected]')
msg = mfs("""\
From: [email protected]
To: [email protected]
Subject: A test message
Message-ID: <ant>
MIME-Version: 1.0
A message body.
""")
# Anne is in the message's senders list.
self.assertIn('[email protected]', msg.senders)
# Now run the rule.
rule = moderation.NonmemberModeration()
msgdata = {}
result = rule.check(self._mlist, msg, msgdata)
self.assertTrue(result, 'NonmemberModeration rule should hit')
# The reason for moderation should be in the msgdata.
reasons = msgdata['moderation_reasons']
self.assertEqual(reasons, ['The message is not from a list member'])
# Now make Anne a moderated member...
anne_member = self._mlist.subscribe(anne, MemberRole.member)
anne_member.moderation_action = Action.hold
# ...and run the rule again.
rule = moderation.MemberModeration()
msgdata = {}
result = rule.check(self._mlist, msg, msgdata)
self.assertTrue(result, 'MemberModeration rule should hit')
# The reason for moderation should be in the msgdata.
reasons = msgdata['moderation_reasons']
self.assertEqual(
reasons, ['The message comes from a moderated member'])
def test_these_nonmembers(self):
# Test the legacy *_these_nonmembers attributes.
user_manager = getUtility(IUserManager)
actions = {
'[email protected]': 'accept',
'[email protected]': 'hold',
'[email protected]': 'reject',
'[email protected]': 'discard',
'^anne-.*@example.com': 'accept',
'^bill-.*@example.com': 'hold',
'^chris-.*@example.com': 'reject',
'^dana-.*@example.com': 'discard',
}
rule = moderation.NonmemberModeration()
user_manager = getUtility(IUserManager)
for address, action_name in actions.items():
setattr(self._mlist,
'{}_these_nonmembers'.format(action_name),
[address])
if address.startswith('^'):
# It's a pattern, craft a proper address.
address = address[1:].replace('.*', 'something')
user_manager.create_address(address)
msg = mfs("""\
From: {}
To: [email protected]
Subject: A test message
Message-ID: <ant>
MIME-Version: 1.0
A message body.
""".format(address))
msgdata = {}
result = rule.check(self._mlist, msg, msgdata)
self.assertTrue(result, 'NonmemberModeration rule should hit')
self.assertIn('moderation_action', msgdata)
self.assertEqual(msgdata['moderation_action'], action_name,
'Wrong action for {}: {}'.format(address, action_name))
| gpl-3.0 |
hujiajie/chromium-crosswalk | tools/telemetry/telemetry/internal/platform/profiler/v8_profiler.py | 19 | 1768 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import re
import tempfile
from telemetry.internal.platform import profiler
class V8Profiler(profiler.Profiler):
_V8_ARG = '--js-flags=--logfile=%s --prof --log-timer-events'
@classmethod
def name(cls):
return 'v8'
@classmethod
def is_supported(cls, browser_type):
return not browser_type.startswith('cros')
@classmethod
def CustomizeBrowserOptions(cls, browser_type, options):
if browser_type.startswith('android'):
dump_file = '/data/local/tmp/v8-profile.log'
else:
dump_file = tempfile.mkstemp()[1]
options.AppendExtraBrowserArgs([cls._V8_ARG % dump_file, '--no-sandbox'])
def CollectProfile(self):
# Find output filename from browser argument.
for i in self._browser_backend.browser_options.extra_browser_args:
match = re.match(self._V8_ARG % r'(\S+)', i)
if match:
output_file = match.groups(0)[0]
assert output_file
# On Android pull the output file to the host.
if self._platform_backend.GetOSName() == 'android':
host_output_file = '%s.log' % self._output_path
try:
self._browser_backend.device.PullFile(
output_file, host_output_file)
except:
logging.exception('New exception caused by DeviceUtils conversion')
raise
# Clean the device
self._browser_backend.device.RunShellCommand('rm %s' % output_file)
output_file = host_output_file
print 'V8 profile saved as %s' % output_file
print 'To view, open in ' \
'http://v8.googlecode.com/svn/trunk/tools/tick-processor.html'
return [output_file]
| bsd-3-clause |
highweb-project/highweb-webcl-html5spec | tools/perf/profile_creators/profile_extender.py | 3 | 5213 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
from telemetry.core import platform
from telemetry.util import wpr_modes
from telemetry.internal.browser import browser_finder
from telemetry.internal.browser import browser_finder_exceptions
class ProfileExtender(object):
"""Abstract base class for an object that constructs a Chrome profile."""
def __init__(self, finder_options):
"""Initializer.
|finder_options| is an instance of BrowserFinderOptions. When subclass
implementations of this method inevitably attempt to find and launch a
browser, they should pass |finder_options| to the relevant methods.
Several properties of |finder_options| might require direct manipulation by
subclasses. These are:
|finder_options.output_profile_path|: The path at which the profile
should be created.
|finder_options.browser_options.profile_dir|: If this property is None,
then a new profile is created. Otherwise, the existing profile is
appended on to.
"""
self._finder_options = copy.deepcopy(finder_options)
# Since profile extenders are not supported on remote platforms,
# this should be the same as target platform.
self._os_name = platform.GetHostPlatform().GetOSName()
# A reference to the browser that will be performing all of the tab
# navigations.
# This member is initialized during SetUpBrowser().
self._browser = None
def Run(self):
"""Creates or extends the profile."""
raise NotImplementedError()
def WebPageReplayArchivePath(self):
"""Returns the path to the WPR archive.
Can be overridden by subclasses.
"""
return None
@property
def finder_options(self):
"""The options to use to find and run the browser."""
return self._finder_options
@property
def profile_path(self):
"""The path of the profile that the browser will use while it's running."""
return self.finder_options.output_profile_path
@property
def browser(self):
return self._browser
@property
def os_name(self):
"""Name of OS that extender is currently running on."""
return self._os_name
def EnabledOSList(self):
"""Returns a list of OSes that this extender can run on.
Can be overridden by subclasses.
Returns:
List of OS ('win', 'mac', or 'linux') that this extender can run on.
"""
return ['win', 'mac', 'linux']
def SetUpBrowser(self):
"""Finds and starts the browser.
Can be overridden by subclasses. The subclass implementation must call the
super class implementation.
Subclasses do not need to call this method. This method is only necessary
if the subclass needs to start a browser. If a subclass does call this
method, the subclass must also call TearDownBrowser().
"""
possible_browser = self._GetPossibleBrowser(self.finder_options)
enabled_os_list = self.EnabledOSList()
if self._os_name not in enabled_os_list:
raise NotImplementedError(
'This profile extender on %s is not yet supported'
% self._os_name)
if possible_browser.IsRemote():
raise NotImplementedError(
'Profile extenders are not yet supported on remote platforms.')
assert possible_browser.supports_tab_control
self._SetUpWebPageReplay(self.finder_options, possible_browser)
self._browser = possible_browser.Create(self.finder_options)
def TearDownBrowser(self):
"""Tears down the browser.
Can be overridden by subclasses. The subclass implementation must call the
super class implementation.
"""
if self._browser:
self._browser.platform.network_controller.Close()
self._browser.Close()
self._browser = None
def FetchWebPageReplayArchives(self):
"""Fetches the web page replay archives.
Can be overridden by subclasses.
"""
pass
def _SetUpWebPageReplay(self, finder_options, possible_browser):
"""Sets up Web Page Replay, if necessary."""
wpr_archive_path = self.WebPageReplayArchivePath()
if not wpr_archive_path:
return
self.FetchWebPageReplayArchives()
if finder_options.use_live_sites:
wpr_mode = wpr_modes.WPR_OFF
else:
wpr_mode = wpr_modes.WPR_REPLAY
network_controller = possible_browser.platform.network_controller
network_controller.Open(wpr_mode, finder_options.browser_options.netsim,
finder_options.browser_options.extra_wpr_args)
network_controller.StartReplay(
wpr_archive_path, make_javascript_deterministic=True)
def _GetPossibleBrowser(self, finder_options):
"""Return a possible_browser with the given options."""
possible_browser = browser_finder.FindBrowser(finder_options)
if not possible_browser:
raise browser_finder_exceptions.BrowserFinderException(
'No browser found.\n\nAvailable browsers:\n%s\n' %
'\n'.join(browser_finder.GetAllAvailableBrowserTypes(finder_options)))
finder_options.browser_options.browser_type = (
possible_browser.browser_type)
return possible_browser
| bsd-3-clause |
da1z/intellij-community | python/helpers/pycharm/nose_helper/util.py | 85 | 6390 | """Utility functions and classes used by nose internally.
"""
import inspect
import os
import sys
import types
try:
# for python 3
from types import ClassType, TypeType
class_types = (ClassType, TypeType)
except:
class_types = (type, )
try:
#for jython
from compiler.consts import CO_GENERATOR
except:
CO_GENERATOR=0x20
PYTHON_VERSION_MAJOR = sys.version_info[0]
PYTHON_VERSION_MINOR = sys.version_info[1]
def cmp_lineno(a, b):
"""Compare functions by their line numbers.
"""
return cmp(func_lineno(a), func_lineno(b))
def func_lineno(func):
"""Get the line number of a function.
"""
try:
return func.compat_co_firstlineno
except AttributeError:
try:
if PYTHON_VERSION_MAJOR == 3:
return func.__code__.co_firstlineno
return func.func_code.co_firstlineno
except AttributeError:
return -1
def isclass(obj):
obj_type = type(obj)
return obj_type in class_types or issubclass(obj_type, type)
def isgenerator(func):
if PYTHON_VERSION_MAJOR == 3:
return inspect.isgeneratorfunction(func)
try:
return func.func_code.co_flags & CO_GENERATOR != 0
except AttributeError:
return False
def resolve_name(name, module=None):
"""Resolve a dotted name to a module and its parts.
"""
parts = name.split('.')
parts_copy = parts[:]
if module is None:
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
obj = getattr(obj, part)
return obj
def try_run(obj, names):
"""Given a list of possible method names, try to run them with the
provided object.
"""
for name in names:
func = getattr(obj, name, None)
if func is not None:
if type(obj) == types.ModuleType:
try:
args, varargs, varkw, defaults = inspect.getargspec(func)
except TypeError:
if hasattr(func, '__call__'):
func = func.__call__
try:
args, varargs, varkw, defaults = \
inspect.getargspec(func)
args.pop(0)
except TypeError:
raise TypeError("Attribute %s of %r is not a python "
"function. Only functions or callables"
" may be used as fixtures." %
(name, obj))
if len(args):
return func(obj)
return func()
def src(filename):
"""Find the python source file for a .pyc, .pyo
or $py.class file on jython
"""
if filename is None:
return filename
if sys.platform.startswith('java') and filename.endswith('$py.class'):
return '.'.join((filename[:-9], 'py'))
base, ext = os.path.splitext(filename)
if ext in ('.pyc', '.pyo', '.py'):
return '.'.join((base, 'py'))
return filename
def transplant_class(cls, module):
"""
Make a class appear to reside in `module`, rather than the module in which
it is actually defined.
"""
class C(cls):
pass
C.__module__ = module
C.__name__ = cls.__name__
return C
def transplant_func(func, module = None):
"""
Make a function imported from module A appear as if it is located
in module B.
"""
def newfunc(*arg, **kw):
return func(*arg, **kw)
newfunc = make_decorator(func)(newfunc)
if module is None:
newfunc.__module__ = inspect.getmodule(func)
else:
newfunc.__module__ = module
return newfunc
def make_decorator(func):
"""
Wraps a test decorator so as to properly replicate metadata
of the decorated function.
"""
def decorate(newfunc):
if hasattr(func, 'compat_func_name'):
name = func.compat_func_name
else:
name = func.__name__
newfunc.__dict__ = func.__dict__
newfunc.__doc__ = func.__doc__
if not hasattr(newfunc, 'compat_co_firstlineno'):
if PYTHON_VERSION_MAJOR == 3:
newfunc.compat_co_firstlineno = func.__code__.co_firstlineno
else:
newfunc.compat_co_firstlineno = func.func_code.co_firstlineno
try:
newfunc.__name__ = name
except TypeError:
newfunc.compat_func_name = name
return newfunc
return decorate
# trick for python 3
# The following emulates the behavior (we need) of an 'unbound method' under
# Python 3.x (namely, the ability to have a class associated with a function
# definition so that things can do stuff based on its associated class)
class UnboundMethod:
def __init__(self, cls, func):
self.func = func
self.__self__ = UnboundSelf(cls)
def address(self):
cls = self.__self__.cls
module = cls.__module__
m = sys.modules[module]
file = getattr(m, '__file__', None)
if file is not None:
file = os.path.abspath(file)
return (nose.util.src(file), module, "%s.%s" % (cls.__name__, self.func.__name__))
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __getattr__(self, attr):
return getattr(self.func, attr)
class UnboundSelf:
def __init__(self, cls):
self.cls = cls
# We have to do this hackery because Python won't let us override the
# __class__ attribute...
def __getattribute__(self, attr):
if attr == '__class__':
return self.cls
else:
return object.__getattribute__(self, attr)
def unbound_method(cls, func):
if inspect.ismethod(func):
return func
if not inspect.isfunction(func):
raise TypeError('%s is not a function' % (repr(func),))
return UnboundMethod(cls, func)
def ismethod(obj):
return inspect.ismethod(obj) or isinstance(obj, UnboundMethod)
def isunboundmethod(obj):
return (inspect.ismethod(obj) and obj.im_self is None) or isinstance(obj, UnboundMethod)
| apache-2.0 |
4eek/edx-platform | common/djangoapps/student/migrations/0045_add_trk_partner_to_linkedin_config.py | 102 | 14314 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'LinkedInAddToProfileConfiguration.trk_partner_name'
db.add_column('student_linkedinaddtoprofileconfiguration', 'trk_partner_name',
self.gf('django.db.models.fields.CharField')(default='', max_length=10, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'LinkedInAddToProfileConfiguration.trk_partner_name'
db.delete_column('student_linkedinaddtoprofileconfiguration', 'trk_partner_name')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.anonymoususerid': {
'Meta': {'object_name': 'AnonymousUserId'},
'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseaccessrole': {
'Meta': {'unique_together': "(('user', 'org', 'course_id', 'role'),)", 'object_name': 'CourseAccessRole'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'org': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.dashboardconfiguration': {
'Meta': {'object_name': 'DashboardConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recent_enrollment_time_delta': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'student.linkedinaddtoprofileconfiguration': {
'Meta': {'object_name': 'LinkedInAddToProfileConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'company_identifier': ('django.db.models.fields.TextField', [], {}),
'dashboard_tracking_code': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'trk_partner_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'})
},
'student.loginfailures': {
'Meta': {'object_name': 'LoginFailures'},
'failure_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lockout_until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.passwordhistory': {
'Meta': {'object_name': 'PasswordHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_set': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usersignupsource': {
'Meta': {'object_name': 'UserSignupSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
Arcanemagus/plexpy | lib/tzlocal/windows_tz.py | 13 | 31191 | # This file is autogenerated by the update_windows_mapping.py script
# Do not edit.
win_tz = {'AUS Central Standard Time': 'Australia/Darwin',
'AUS Eastern Standard Time': 'Australia/Sydney',
'Afghanistan Standard Time': 'Asia/Kabul',
'Alaskan Standard Time': 'America/Anchorage',
'Aleutian Standard Time': 'America/Adak',
'Altai Standard Time': 'Asia/Barnaul',
'Arab Standard Time': 'Asia/Riyadh',
'Arabian Standard Time': 'Asia/Dubai',
'Arabic Standard Time': 'Asia/Baghdad',
'Argentina Standard Time': 'America/Buenos_Aires',
'Astrakhan Standard Time': 'Europe/Astrakhan',
'Atlantic Standard Time': 'America/Halifax',
'Aus Central W. Standard Time': 'Australia/Eucla',
'Azerbaijan Standard Time': 'Asia/Baku',
'Azores Standard Time': 'Atlantic/Azores',
'Bahia Standard Time': 'America/Bahia',
'Bangladesh Standard Time': 'Asia/Dhaka',
'Belarus Standard Time': 'Europe/Minsk',
'Bougainville Standard Time': 'Pacific/Bougainville',
'Canada Central Standard Time': 'America/Regina',
'Cape Verde Standard Time': 'Atlantic/Cape_Verde',
'Caucasus Standard Time': 'Asia/Yerevan',
'Cen. Australia Standard Time': 'Australia/Adelaide',
'Central America Standard Time': 'America/Guatemala',
'Central Asia Standard Time': 'Asia/Almaty',
'Central Brazilian Standard Time': 'America/Cuiaba',
'Central Europe Standard Time': 'Europe/Budapest',
'Central European Standard Time': 'Europe/Warsaw',
'Central Pacific Standard Time': 'Pacific/Guadalcanal',
'Central Standard Time': 'America/Chicago',
'Central Standard Time (Mexico)': 'America/Mexico_City',
'Chatham Islands Standard Time': 'Pacific/Chatham',
'China Standard Time': 'Asia/Shanghai',
'Cuba Standard Time': 'America/Havana',
'Dateline Standard Time': 'Etc/GMT+12',
'E. Africa Standard Time': 'Africa/Nairobi',
'E. Australia Standard Time': 'Australia/Brisbane',
'E. Europe Standard Time': 'Europe/Chisinau',
'E. South America Standard Time': 'America/Sao_Paulo',
'Easter Island Standard Time': 'Pacific/Easter',
'Eastern Standard Time': 'America/New_York',
'Eastern Standard Time (Mexico)': 'America/Cancun',
'Egypt Standard Time': 'Africa/Cairo',
'Ekaterinburg Standard Time': 'Asia/Yekaterinburg',
'FLE Standard Time': 'Europe/Kiev',
'Fiji Standard Time': 'Pacific/Fiji',
'GMT Standard Time': 'Europe/London',
'GTB Standard Time': 'Europe/Bucharest',
'Georgian Standard Time': 'Asia/Tbilisi',
'Greenland Standard Time': 'America/Godthab',
'Greenwich Standard Time': 'Atlantic/Reykjavik',
'Haiti Standard Time': 'America/Port-au-Prince',
'Hawaiian Standard Time': 'Pacific/Honolulu',
'India Standard Time': 'Asia/Calcutta',
'Iran Standard Time': 'Asia/Tehran',
'Israel Standard Time': 'Asia/Jerusalem',
'Jordan Standard Time': 'Asia/Amman',
'Kaliningrad Standard Time': 'Europe/Kaliningrad',
'Korea Standard Time': 'Asia/Seoul',
'Libya Standard Time': 'Africa/Tripoli',
'Line Islands Standard Time': 'Pacific/Kiritimati',
'Lord Howe Standard Time': 'Australia/Lord_Howe',
'Magadan Standard Time': 'Asia/Magadan',
'Magallanes Standard Time': 'America/Punta_Arenas',
'Marquesas Standard Time': 'Pacific/Marquesas',
'Mauritius Standard Time': 'Indian/Mauritius',
'Middle East Standard Time': 'Asia/Beirut',
'Montevideo Standard Time': 'America/Montevideo',
'Morocco Standard Time': 'Africa/Casablanca',
'Mountain Standard Time': 'America/Denver',
'Mountain Standard Time (Mexico)': 'America/Chihuahua',
'Myanmar Standard Time': 'Asia/Rangoon',
'N. Central Asia Standard Time': 'Asia/Novosibirsk',
'Namibia Standard Time': 'Africa/Windhoek',
'Nepal Standard Time': 'Asia/Katmandu',
'New Zealand Standard Time': 'Pacific/Auckland',
'Newfoundland Standard Time': 'America/St_Johns',
'Norfolk Standard Time': 'Pacific/Norfolk',
'North Asia East Standard Time': 'Asia/Irkutsk',
'North Asia Standard Time': 'Asia/Krasnoyarsk',
'North Korea Standard Time': 'Asia/Pyongyang',
'Omsk Standard Time': 'Asia/Omsk',
'Pacific SA Standard Time': 'America/Santiago',
'Pacific Standard Time': 'America/Los_Angeles',
'Pacific Standard Time (Mexico)': 'America/Tijuana',
'Pakistan Standard Time': 'Asia/Karachi',
'Paraguay Standard Time': 'America/Asuncion',
'Romance Standard Time': 'Europe/Paris',
'Russia Time Zone 10': 'Asia/Srednekolymsk',
'Russia Time Zone 11': 'Asia/Kamchatka',
'Russia Time Zone 3': 'Europe/Samara',
'Russian Standard Time': 'Europe/Moscow',
'SA Eastern Standard Time': 'America/Cayenne',
'SA Pacific Standard Time': 'America/Bogota',
'SA Western Standard Time': 'America/La_Paz',
'SE Asia Standard Time': 'Asia/Bangkok',
'Saint Pierre Standard Time': 'America/Miquelon',
'Sakhalin Standard Time': 'Asia/Sakhalin',
'Samoa Standard Time': 'Pacific/Apia',
'Saratov Standard Time': 'Europe/Saratov',
'Singapore Standard Time': 'Asia/Singapore',
'South Africa Standard Time': 'Africa/Johannesburg',
'Sri Lanka Standard Time': 'Asia/Colombo',
'Syria Standard Time': 'Asia/Damascus',
'Taipei Standard Time': 'Asia/Taipei',
'Tasmania Standard Time': 'Australia/Hobart',
'Tocantins Standard Time': 'America/Araguaina',
'Tokyo Standard Time': 'Asia/Tokyo',
'Tomsk Standard Time': 'Asia/Tomsk',
'Tonga Standard Time': 'Pacific/Tongatapu',
'Transbaikal Standard Time': 'Asia/Chita',
'Turkey Standard Time': 'Europe/Istanbul',
'Turks And Caicos Standard Time': 'America/Grand_Turk',
'US Eastern Standard Time': 'America/Indianapolis',
'US Mountain Standard Time': 'America/Phoenix',
'UTC': 'Etc/GMT',
'UTC+12': 'Etc/GMT-12',
'UTC+13': 'Etc/GMT-13',
'UTC-02': 'Etc/GMT+2',
'UTC-08': 'Etc/GMT+8',
'UTC-09': 'Etc/GMT+9',
'UTC-11': 'Etc/GMT+11',
'Ulaanbaatar Standard Time': 'Asia/Ulaanbaatar',
'Venezuela Standard Time': 'America/Caracas',
'Vladivostok Standard Time': 'Asia/Vladivostok',
'W. Australia Standard Time': 'Australia/Perth',
'W. Central Africa Standard Time': 'Africa/Lagos',
'W. Europe Standard Time': 'Europe/Berlin',
'W. Mongolia Standard Time': 'Asia/Hovd',
'West Asia Standard Time': 'Asia/Tashkent',
'West Bank Standard Time': 'Asia/Hebron',
'West Pacific Standard Time': 'Pacific/Port_Moresby',
'Yakutsk Standard Time': 'Asia/Yakutsk'}
# Old name for the win_tz variable:
tz_names = win_tz
tz_win = {'Africa/Abidjan': 'Greenwich Standard Time',
'Africa/Accra': 'Greenwich Standard Time',
'Africa/Addis_Ababa': 'E. Africa Standard Time',
'Africa/Algiers': 'W. Central Africa Standard Time',
'Africa/Asmera': 'E. Africa Standard Time',
'Africa/Bamako': 'Greenwich Standard Time',
'Africa/Bangui': 'W. Central Africa Standard Time',
'Africa/Banjul': 'Greenwich Standard Time',
'Africa/Bissau': 'Greenwich Standard Time',
'Africa/Blantyre': 'South Africa Standard Time',
'Africa/Brazzaville': 'W. Central Africa Standard Time',
'Africa/Bujumbura': 'South Africa Standard Time',
'Africa/Cairo': 'Egypt Standard Time',
'Africa/Casablanca': 'Morocco Standard Time',
'Africa/Ceuta': 'Romance Standard Time',
'Africa/Conakry': 'Greenwich Standard Time',
'Africa/Dakar': 'Greenwich Standard Time',
'Africa/Dar_es_Salaam': 'E. Africa Standard Time',
'Africa/Djibouti': 'E. Africa Standard Time',
'Africa/Douala': 'W. Central Africa Standard Time',
'Africa/El_Aaiun': 'Morocco Standard Time',
'Africa/Freetown': 'Greenwich Standard Time',
'Africa/Gaborone': 'South Africa Standard Time',
'Africa/Harare': 'South Africa Standard Time',
'Africa/Johannesburg': 'South Africa Standard Time',
'Africa/Juba': 'E. Africa Standard Time',
'Africa/Kampala': 'E. Africa Standard Time',
'Africa/Khartoum': 'E. Africa Standard Time',
'Africa/Kigali': 'South Africa Standard Time',
'Africa/Kinshasa': 'W. Central Africa Standard Time',
'Africa/Lagos': 'W. Central Africa Standard Time',
'Africa/Libreville': 'W. Central Africa Standard Time',
'Africa/Lome': 'Greenwich Standard Time',
'Africa/Luanda': 'W. Central Africa Standard Time',
'Africa/Lubumbashi': 'South Africa Standard Time',
'Africa/Lusaka': 'South Africa Standard Time',
'Africa/Malabo': 'W. Central Africa Standard Time',
'Africa/Maputo': 'South Africa Standard Time',
'Africa/Maseru': 'South Africa Standard Time',
'Africa/Mbabane': 'South Africa Standard Time',
'Africa/Mogadishu': 'E. Africa Standard Time',
'Africa/Monrovia': 'Greenwich Standard Time',
'Africa/Nairobi': 'E. Africa Standard Time',
'Africa/Ndjamena': 'W. Central Africa Standard Time',
'Africa/Niamey': 'W. Central Africa Standard Time',
'Africa/Nouakchott': 'Greenwich Standard Time',
'Africa/Ouagadougou': 'Greenwich Standard Time',
'Africa/Porto-Novo': 'W. Central Africa Standard Time',
'Africa/Sao_Tome': 'Greenwich Standard Time',
'Africa/Timbuktu': 'Greenwich Standard Time',
'Africa/Tripoli': 'Libya Standard Time',
'Africa/Tunis': 'W. Central Africa Standard Time',
'Africa/Windhoek': 'Namibia Standard Time',
'America/Adak': 'Aleutian Standard Time',
'America/Anchorage': 'Alaskan Standard Time',
'America/Anguilla': 'SA Western Standard Time',
'America/Antigua': 'SA Western Standard Time',
'America/Araguaina': 'Tocantins Standard Time',
'America/Argentina/La_Rioja': 'Argentina Standard Time',
'America/Argentina/Rio_Gallegos': 'Argentina Standard Time',
'America/Argentina/Salta': 'Argentina Standard Time',
'America/Argentina/San_Juan': 'Argentina Standard Time',
'America/Argentina/San_Luis': 'Argentina Standard Time',
'America/Argentina/Tucuman': 'Argentina Standard Time',
'America/Argentina/Ushuaia': 'Argentina Standard Time',
'America/Aruba': 'SA Western Standard Time',
'America/Asuncion': 'Paraguay Standard Time',
'America/Atka': 'Aleutian Standard Time',
'America/Bahia': 'Bahia Standard Time',
'America/Bahia_Banderas': 'Central Standard Time (Mexico)',
'America/Barbados': 'SA Western Standard Time',
'America/Belem': 'SA Eastern Standard Time',
'America/Belize': 'Central America Standard Time',
'America/Blanc-Sablon': 'SA Western Standard Time',
'America/Boa_Vista': 'SA Western Standard Time',
'America/Bogota': 'SA Pacific Standard Time',
'America/Boise': 'Mountain Standard Time',
'America/Buenos_Aires': 'Argentina Standard Time',
'America/Cambridge_Bay': 'Mountain Standard Time',
'America/Campo_Grande': 'Central Brazilian Standard Time',
'America/Cancun': 'Eastern Standard Time (Mexico)',
'America/Caracas': 'Venezuela Standard Time',
'America/Catamarca': 'Argentina Standard Time',
'America/Cayenne': 'SA Eastern Standard Time',
'America/Cayman': 'SA Pacific Standard Time',
'America/Chicago': 'Central Standard Time',
'America/Chihuahua': 'Mountain Standard Time (Mexico)',
'America/Coral_Harbour': 'SA Pacific Standard Time',
'America/Cordoba': 'Argentina Standard Time',
'America/Costa_Rica': 'Central America Standard Time',
'America/Creston': 'US Mountain Standard Time',
'America/Cuiaba': 'Central Brazilian Standard Time',
'America/Curacao': 'SA Western Standard Time',
'America/Danmarkshavn': 'UTC',
'America/Dawson': 'Pacific Standard Time',
'America/Dawson_Creek': 'US Mountain Standard Time',
'America/Denver': 'Mountain Standard Time',
'America/Detroit': 'Eastern Standard Time',
'America/Dominica': 'SA Western Standard Time',
'America/Edmonton': 'Mountain Standard Time',
'America/Eirunepe': 'SA Pacific Standard Time',
'America/El_Salvador': 'Central America Standard Time',
'America/Ensenada': 'Pacific Standard Time (Mexico)',
'America/Fort_Nelson': 'US Mountain Standard Time',
'America/Fortaleza': 'SA Eastern Standard Time',
'America/Glace_Bay': 'Atlantic Standard Time',
'America/Godthab': 'Greenland Standard Time',
'America/Goose_Bay': 'Atlantic Standard Time',
'America/Grand_Turk': 'Turks And Caicos Standard Time',
'America/Grenada': 'SA Western Standard Time',
'America/Guadeloupe': 'SA Western Standard Time',
'America/Guatemala': 'Central America Standard Time',
'America/Guayaquil': 'SA Pacific Standard Time',
'America/Guyana': 'SA Western Standard Time',
'America/Halifax': 'Atlantic Standard Time',
'America/Havana': 'Cuba Standard Time',
'America/Hermosillo': 'US Mountain Standard Time',
'America/Indiana/Knox': 'Central Standard Time',
'America/Indiana/Marengo': 'US Eastern Standard Time',
'America/Indiana/Petersburg': 'Eastern Standard Time',
'America/Indiana/Tell_City': 'Central Standard Time',
'America/Indiana/Vevay': 'US Eastern Standard Time',
'America/Indiana/Vincennes': 'Eastern Standard Time',
'America/Indiana/Winamac': 'Eastern Standard Time',
'America/Indianapolis': 'US Eastern Standard Time',
'America/Inuvik': 'Mountain Standard Time',
'America/Iqaluit': 'Eastern Standard Time',
'America/Jamaica': 'SA Pacific Standard Time',
'America/Jujuy': 'Argentina Standard Time',
'America/Juneau': 'Alaskan Standard Time',
'America/Kentucky/Monticello': 'Eastern Standard Time',
'America/Knox_IN': 'Central Standard Time',
'America/Kralendijk': 'SA Western Standard Time',
'America/La_Paz': 'SA Western Standard Time',
'America/Lima': 'SA Pacific Standard Time',
'America/Los_Angeles': 'Pacific Standard Time',
'America/Louisville': 'Eastern Standard Time',
'America/Lower_Princes': 'SA Western Standard Time',
'America/Maceio': 'SA Eastern Standard Time',
'America/Managua': 'Central America Standard Time',
'America/Manaus': 'SA Western Standard Time',
'America/Marigot': 'SA Western Standard Time',
'America/Martinique': 'SA Western Standard Time',
'America/Matamoros': 'Central Standard Time',
'America/Mazatlan': 'Mountain Standard Time (Mexico)',
'America/Mendoza': 'Argentina Standard Time',
'America/Menominee': 'Central Standard Time',
'America/Merida': 'Central Standard Time (Mexico)',
'America/Metlakatla': 'Alaskan Standard Time',
'America/Mexico_City': 'Central Standard Time (Mexico)',
'America/Miquelon': 'Saint Pierre Standard Time',
'America/Moncton': 'Atlantic Standard Time',
'America/Monterrey': 'Central Standard Time (Mexico)',
'America/Montevideo': 'Montevideo Standard Time',
'America/Montreal': 'Eastern Standard Time',
'America/Montserrat': 'SA Western Standard Time',
'America/Nassau': 'Eastern Standard Time',
'America/New_York': 'Eastern Standard Time',
'America/Nipigon': 'Eastern Standard Time',
'America/Nome': 'Alaskan Standard Time',
'America/Noronha': 'UTC-02',
'America/North_Dakota/Beulah': 'Central Standard Time',
'America/North_Dakota/Center': 'Central Standard Time',
'America/North_Dakota/New_Salem': 'Central Standard Time',
'America/Ojinaga': 'Mountain Standard Time',
'America/Panama': 'SA Pacific Standard Time',
'America/Pangnirtung': 'Eastern Standard Time',
'America/Paramaribo': 'SA Eastern Standard Time',
'America/Phoenix': 'US Mountain Standard Time',
'America/Port-au-Prince': 'Haiti Standard Time',
'America/Port_of_Spain': 'SA Western Standard Time',
'America/Porto_Acre': 'SA Pacific Standard Time',
'America/Porto_Velho': 'SA Western Standard Time',
'America/Puerto_Rico': 'SA Western Standard Time',
'America/Punta_Arenas': 'Magallanes Standard Time',
'America/Rainy_River': 'Central Standard Time',
'America/Rankin_Inlet': 'Central Standard Time',
'America/Recife': 'SA Eastern Standard Time',
'America/Regina': 'Canada Central Standard Time',
'America/Resolute': 'Central Standard Time',
'America/Rio_Branco': 'SA Pacific Standard Time',
'America/Santa_Isabel': 'Pacific Standard Time (Mexico)',
'America/Santarem': 'SA Eastern Standard Time',
'America/Santiago': 'Pacific SA Standard Time',
'America/Santo_Domingo': 'SA Western Standard Time',
'America/Sao_Paulo': 'E. South America Standard Time',
'America/Scoresbysund': 'Azores Standard Time',
'America/Shiprock': 'Mountain Standard Time',
'America/Sitka': 'Alaskan Standard Time',
'America/St_Barthelemy': 'SA Western Standard Time',
'America/St_Johns': 'Newfoundland Standard Time',
'America/St_Kitts': 'SA Western Standard Time',
'America/St_Lucia': 'SA Western Standard Time',
'America/St_Thomas': 'SA Western Standard Time',
'America/St_Vincent': 'SA Western Standard Time',
'America/Swift_Current': 'Canada Central Standard Time',
'America/Tegucigalpa': 'Central America Standard Time',
'America/Thule': 'Atlantic Standard Time',
'America/Thunder_Bay': 'Eastern Standard Time',
'America/Tijuana': 'Pacific Standard Time (Mexico)',
'America/Toronto': 'Eastern Standard Time',
'America/Tortola': 'SA Western Standard Time',
'America/Vancouver': 'Pacific Standard Time',
'America/Virgin': 'SA Western Standard Time',
'America/Whitehorse': 'Pacific Standard Time',
'America/Winnipeg': 'Central Standard Time',
'America/Yakutat': 'Alaskan Standard Time',
'America/Yellowknife': 'Mountain Standard Time',
'Antarctica/Casey': 'Central Pacific Standard Time',
'Antarctica/Davis': 'SE Asia Standard Time',
'Antarctica/DumontDUrville': 'West Pacific Standard Time',
'Antarctica/Macquarie': 'Central Pacific Standard Time',
'Antarctica/Mawson': 'West Asia Standard Time',
'Antarctica/McMurdo': 'New Zealand Standard Time',
'Antarctica/Palmer': 'Magallanes Standard Time',
'Antarctica/Rothera': 'SA Eastern Standard Time',
'Antarctica/South_Pole': 'New Zealand Standard Time',
'Antarctica/Syowa': 'E. Africa Standard Time',
'Antarctica/Vostok': 'Central Asia Standard Time',
'Arctic/Longyearbyen': 'W. Europe Standard Time',
'Asia/Aden': 'Arab Standard Time',
'Asia/Almaty': 'Central Asia Standard Time',
'Asia/Amman': 'Jordan Standard Time',
'Asia/Anadyr': 'Russia Time Zone 11',
'Asia/Aqtau': 'West Asia Standard Time',
'Asia/Aqtobe': 'West Asia Standard Time',
'Asia/Ashgabat': 'West Asia Standard Time',
'Asia/Ashkhabad': 'West Asia Standard Time',
'Asia/Atyrau': 'West Asia Standard Time',
'Asia/Baghdad': 'Arabic Standard Time',
'Asia/Bahrain': 'Arab Standard Time',
'Asia/Baku': 'Azerbaijan Standard Time',
'Asia/Bangkok': 'SE Asia Standard Time',
'Asia/Barnaul': 'Altai Standard Time',
'Asia/Beirut': 'Middle East Standard Time',
'Asia/Bishkek': 'Central Asia Standard Time',
'Asia/Brunei': 'Singapore Standard Time',
'Asia/Calcutta': 'India Standard Time',
'Asia/Chita': 'Transbaikal Standard Time',
'Asia/Choibalsan': 'Ulaanbaatar Standard Time',
'Asia/Chongqing': 'China Standard Time',
'Asia/Chungking': 'China Standard Time',
'Asia/Colombo': 'Sri Lanka Standard Time',
'Asia/Dacca': 'Bangladesh Standard Time',
'Asia/Damascus': 'Syria Standard Time',
'Asia/Dhaka': 'Bangladesh Standard Time',
'Asia/Dili': 'Tokyo Standard Time',
'Asia/Dubai': 'Arabian Standard Time',
'Asia/Dushanbe': 'West Asia Standard Time',
'Asia/Famagusta': 'Turkey Standard Time',
'Asia/Gaza': 'West Bank Standard Time',
'Asia/Harbin': 'China Standard Time',
'Asia/Hebron': 'West Bank Standard Time',
'Asia/Hong_Kong': 'China Standard Time',
'Asia/Hovd': 'W. Mongolia Standard Time',
'Asia/Irkutsk': 'North Asia East Standard Time',
'Asia/Jakarta': 'SE Asia Standard Time',
'Asia/Jayapura': 'Tokyo Standard Time',
'Asia/Jerusalem': 'Israel Standard Time',
'Asia/Kabul': 'Afghanistan Standard Time',
'Asia/Kamchatka': 'Russia Time Zone 11',
'Asia/Karachi': 'Pakistan Standard Time',
'Asia/Kashgar': 'Central Asia Standard Time',
'Asia/Katmandu': 'Nepal Standard Time',
'Asia/Khandyga': 'Yakutsk Standard Time',
'Asia/Krasnoyarsk': 'North Asia Standard Time',
'Asia/Kuala_Lumpur': 'Singapore Standard Time',
'Asia/Kuching': 'Singapore Standard Time',
'Asia/Kuwait': 'Arab Standard Time',
'Asia/Macao': 'China Standard Time',
'Asia/Macau': 'China Standard Time',
'Asia/Magadan': 'Magadan Standard Time',
'Asia/Makassar': 'Singapore Standard Time',
'Asia/Manila': 'Singapore Standard Time',
'Asia/Muscat': 'Arabian Standard Time',
'Asia/Nicosia': 'GTB Standard Time',
'Asia/Novokuznetsk': 'North Asia Standard Time',
'Asia/Novosibirsk': 'N. Central Asia Standard Time',
'Asia/Omsk': 'Omsk Standard Time',
'Asia/Oral': 'West Asia Standard Time',
'Asia/Phnom_Penh': 'SE Asia Standard Time',
'Asia/Pontianak': 'SE Asia Standard Time',
'Asia/Pyongyang': 'North Korea Standard Time',
'Asia/Qatar': 'Arab Standard Time',
'Asia/Qyzylorda': 'Central Asia Standard Time',
'Asia/Rangoon': 'Myanmar Standard Time',
'Asia/Riyadh': 'Arab Standard Time',
'Asia/Saigon': 'SE Asia Standard Time',
'Asia/Sakhalin': 'Sakhalin Standard Time',
'Asia/Samarkand': 'West Asia Standard Time',
'Asia/Seoul': 'Korea Standard Time',
'Asia/Shanghai': 'China Standard Time',
'Asia/Singapore': 'Singapore Standard Time',
'Asia/Srednekolymsk': 'Russia Time Zone 10',
'Asia/Taipei': 'Taipei Standard Time',
'Asia/Tashkent': 'West Asia Standard Time',
'Asia/Tbilisi': 'Georgian Standard Time',
'Asia/Tehran': 'Iran Standard Time',
'Asia/Tel_Aviv': 'Israel Standard Time',
'Asia/Thimbu': 'Bangladesh Standard Time',
'Asia/Thimphu': 'Bangladesh Standard Time',
'Asia/Tokyo': 'Tokyo Standard Time',
'Asia/Tomsk': 'Tomsk Standard Time',
'Asia/Ujung_Pandang': 'Singapore Standard Time',
'Asia/Ulaanbaatar': 'Ulaanbaatar Standard Time',
'Asia/Ulan_Bator': 'Ulaanbaatar Standard Time',
'Asia/Urumqi': 'Central Asia Standard Time',
'Asia/Ust-Nera': 'Vladivostok Standard Time',
'Asia/Vientiane': 'SE Asia Standard Time',
'Asia/Vladivostok': 'Vladivostok Standard Time',
'Asia/Yakutsk': 'Yakutsk Standard Time',
'Asia/Yekaterinburg': 'Ekaterinburg Standard Time',
'Asia/Yerevan': 'Caucasus Standard Time',
'Atlantic/Azores': 'Azores Standard Time',
'Atlantic/Bermuda': 'Atlantic Standard Time',
'Atlantic/Canary': 'GMT Standard Time',
'Atlantic/Cape_Verde': 'Cape Verde Standard Time',
'Atlantic/Faeroe': 'GMT Standard Time',
'Atlantic/Jan_Mayen': 'W. Europe Standard Time',
'Atlantic/Madeira': 'GMT Standard Time',
'Atlantic/Reykjavik': 'Greenwich Standard Time',
'Atlantic/South_Georgia': 'UTC-02',
'Atlantic/St_Helena': 'Greenwich Standard Time',
'Atlantic/Stanley': 'SA Eastern Standard Time',
'Australia/ACT': 'AUS Eastern Standard Time',
'Australia/Adelaide': 'Cen. Australia Standard Time',
'Australia/Brisbane': 'E. Australia Standard Time',
'Australia/Broken_Hill': 'Cen. Australia Standard Time',
'Australia/Canberra': 'AUS Eastern Standard Time',
'Australia/Currie': 'Tasmania Standard Time',
'Australia/Darwin': 'AUS Central Standard Time',
'Australia/Eucla': 'Aus Central W. Standard Time',
'Australia/Hobart': 'Tasmania Standard Time',
'Australia/LHI': 'Lord Howe Standard Time',
'Australia/Lindeman': 'E. Australia Standard Time',
'Australia/Lord_Howe': 'Lord Howe Standard Time',
'Australia/Melbourne': 'AUS Eastern Standard Time',
'Australia/NSW': 'AUS Eastern Standard Time',
'Australia/North': 'AUS Central Standard Time',
'Australia/Perth': 'W. Australia Standard Time',
'Australia/Queensland': 'E. Australia Standard Time',
'Australia/South': 'Cen. Australia Standard Time',
'Australia/Sydney': 'AUS Eastern Standard Time',
'Australia/Tasmania': 'Tasmania Standard Time',
'Australia/Victoria': 'AUS Eastern Standard Time',
'Australia/West': 'W. Australia Standard Time',
'Australia/Yancowinna': 'Cen. Australia Standard Time',
'Brazil/Acre': 'SA Pacific Standard Time',
'Brazil/DeNoronha': 'UTC-02',
'Brazil/East': 'E. South America Standard Time',
'Brazil/West': 'SA Western Standard Time',
'CST6CDT': 'Central Standard Time',
'Canada/Atlantic': 'Atlantic Standard Time',
'Canada/Central': 'Central Standard Time',
'Canada/Eastern': 'Eastern Standard Time',
'Canada/Mountain': 'Mountain Standard Time',
'Canada/Newfoundland': 'Newfoundland Standard Time',
'Canada/Pacific': 'Pacific Standard Time',
'Canada/Saskatchewan': 'Canada Central Standard Time',
'Canada/Yukon': 'Pacific Standard Time',
'Chile/Continental': 'Pacific SA Standard Time',
'Chile/EasterIsland': 'Easter Island Standard Time',
'Cuba': 'Cuba Standard Time',
'EST5EDT': 'Eastern Standard Time',
'Egypt': 'Egypt Standard Time',
'Eire': 'GMT Standard Time',
'Etc/GMT': 'UTC',
'Etc/GMT+1': 'Cape Verde Standard Time',
'Etc/GMT+10': 'Hawaiian Standard Time',
'Etc/GMT+11': 'UTC-11',
'Etc/GMT+12': 'Dateline Standard Time',
'Etc/GMT+2': 'UTC-02',
'Etc/GMT+3': 'SA Eastern Standard Time',
'Etc/GMT+4': 'SA Western Standard Time',
'Etc/GMT+5': 'SA Pacific Standard Time',
'Etc/GMT+6': 'Central America Standard Time',
'Etc/GMT+7': 'US Mountain Standard Time',
'Etc/GMT+8': 'UTC-08',
'Etc/GMT+9': 'UTC-09',
'Etc/GMT-1': 'W. Central Africa Standard Time',
'Etc/GMT-10': 'West Pacific Standard Time',
'Etc/GMT-11': 'Central Pacific Standard Time',
'Etc/GMT-12': 'UTC+12',
'Etc/GMT-13': 'UTC+13',
'Etc/GMT-14': 'Line Islands Standard Time',
'Etc/GMT-2': 'South Africa Standard Time',
'Etc/GMT-3': 'E. Africa Standard Time',
'Etc/GMT-4': 'Arabian Standard Time',
'Etc/GMT-5': 'West Asia Standard Time',
'Etc/GMT-6': 'Central Asia Standard Time',
'Etc/GMT-7': 'SE Asia Standard Time',
'Etc/GMT-8': 'Singapore Standard Time',
'Etc/GMT-9': 'Tokyo Standard Time',
'Etc/UTC': 'UTC',
'Europe/Amsterdam': 'W. Europe Standard Time',
'Europe/Andorra': 'W. Europe Standard Time',
'Europe/Astrakhan': 'Astrakhan Standard Time',
'Europe/Athens': 'GTB Standard Time',
'Europe/Belfast': 'GMT Standard Time',
'Europe/Belgrade': 'Central Europe Standard Time',
'Europe/Berlin': 'W. Europe Standard Time',
'Europe/Bratislava': 'Central Europe Standard Time',
'Europe/Brussels': 'Romance Standard Time',
'Europe/Bucharest': 'GTB Standard Time',
'Europe/Budapest': 'Central Europe Standard Time',
'Europe/Busingen': 'W. Europe Standard Time',
'Europe/Chisinau': 'E. Europe Standard Time',
'Europe/Copenhagen': 'Romance Standard Time',
'Europe/Dublin': 'GMT Standard Time',
'Europe/Gibraltar': 'W. Europe Standard Time',
'Europe/Guernsey': 'GMT Standard Time',
'Europe/Helsinki': 'FLE Standard Time',
'Europe/Isle_of_Man': 'GMT Standard Time',
'Europe/Istanbul': 'Turkey Standard Time',
'Europe/Jersey': 'GMT Standard Time',
'Europe/Kaliningrad': 'Kaliningrad Standard Time',
'Europe/Kiev': 'FLE Standard Time',
'Europe/Kirov': 'Russian Standard Time',
'Europe/Lisbon': 'GMT Standard Time',
'Europe/Ljubljana': 'Central Europe Standard Time',
'Europe/London': 'GMT Standard Time',
'Europe/Luxembourg': 'W. Europe Standard Time',
'Europe/Madrid': 'Romance Standard Time',
'Europe/Malta': 'W. Europe Standard Time',
'Europe/Mariehamn': 'FLE Standard Time',
'Europe/Minsk': 'Belarus Standard Time',
'Europe/Monaco': 'W. Europe Standard Time',
'Europe/Moscow': 'Russian Standard Time',
'Europe/Oslo': 'W. Europe Standard Time',
'Europe/Paris': 'Romance Standard Time',
'Europe/Podgorica': 'Central Europe Standard Time',
'Europe/Prague': 'Central Europe Standard Time',
'Europe/Riga': 'FLE Standard Time',
'Europe/Rome': 'W. Europe Standard Time',
'Europe/Samara': 'Russia Time Zone 3',
'Europe/San_Marino': 'W. Europe Standard Time',
'Europe/Sarajevo': 'Central European Standard Time',
'Europe/Saratov': 'Saratov Standard Time',
'Europe/Simferopol': 'Russian Standard Time',
'Europe/Skopje': 'Central European Standard Time',
'Europe/Sofia': 'FLE Standard Time',
'Europe/Stockholm': 'W. Europe Standard Time',
'Europe/Tallinn': 'FLE Standard Time',
'Europe/Tirane': 'Central Europe Standard Time',
'Europe/Tiraspol': 'E. Europe Standard Time',
'Europe/Ulyanovsk': 'Astrakhan Standard Time',
'Europe/Uzhgorod': 'FLE Standard Time',
'Europe/Vaduz': 'W. Europe Standard Time',
'Europe/Vatican': 'W. Europe Standard Time',
'Europe/Vienna': 'W. Europe Standard Time',
'Europe/Vilnius': 'FLE Standard Time',
'Europe/Volgograd': 'Russian Standard Time',
'Europe/Warsaw': 'Central European Standard Time',
'Europe/Zagreb': 'Central European Standard Time',
'Europe/Zaporozhye': 'FLE Standard Time',
'Europe/Zurich': 'W. Europe Standard Time',
'GB': 'GMT Standard Time',
'GB-Eire': 'GMT Standard Time',
'GMT+0': 'UTC',
'GMT-0': 'UTC',
'GMT0': 'UTC',
'Greenwich': 'UTC',
'Hongkong': 'China Standard Time',
'Iceland': 'Greenwich Standard Time',
'Indian/Antananarivo': 'E. Africa Standard Time',
'Indian/Chagos': 'Central Asia Standard Time',
'Indian/Christmas': 'SE Asia Standard Time',
'Indian/Cocos': 'Myanmar Standard Time',
'Indian/Comoro': 'E. Africa Standard Time',
'Indian/Kerguelen': 'West Asia Standard Time',
'Indian/Mahe': 'Mauritius Standard Time',
'Indian/Maldives': 'West Asia Standard Time',
'Indian/Mauritius': 'Mauritius Standard Time',
'Indian/Mayotte': 'E. Africa Standard Time',
'Indian/Reunion': 'Mauritius Standard Time',
'Iran': 'Iran Standard Time',
'Israel': 'Israel Standard Time',
'Jamaica': 'SA Pacific Standard Time',
'Japan': 'Tokyo Standard Time',
'Kwajalein': 'UTC+12',
'Libya': 'Libya Standard Time',
'MST7MDT': 'Mountain Standard Time',
'Mexico/BajaNorte': 'Pacific Standard Time (Mexico)',
'Mexico/BajaSur': 'Mountain Standard Time (Mexico)',
'Mexico/General': 'Central Standard Time (Mexico)',
'NZ': 'New Zealand Standard Time',
'NZ-CHAT': 'Chatham Islands Standard Time',
'Navajo': 'Mountain Standard Time',
'PRC': 'China Standard Time',
'PST8PDT': 'Pacific Standard Time',
'Pacific/Apia': 'Samoa Standard Time',
'Pacific/Auckland': 'New Zealand Standard Time',
'Pacific/Bougainville': 'Bougainville Standard Time',
'Pacific/Chatham': 'Chatham Islands Standard Time',
'Pacific/Easter': 'Easter Island Standard Time',
'Pacific/Efate': 'Central Pacific Standard Time',
'Pacific/Enderbury': 'UTC+13',
'Pacific/Fakaofo': 'UTC+13',
'Pacific/Fiji': 'Fiji Standard Time',
'Pacific/Funafuti': 'UTC+12',
'Pacific/Galapagos': 'Central America Standard Time',
'Pacific/Gambier': 'UTC-09',
'Pacific/Guadalcanal': 'Central Pacific Standard Time',
'Pacific/Guam': 'West Pacific Standard Time',
'Pacific/Honolulu': 'Hawaiian Standard Time',
'Pacific/Johnston': 'Hawaiian Standard Time',
'Pacific/Kiritimati': 'Line Islands Standard Time',
'Pacific/Kosrae': 'Central Pacific Standard Time',
'Pacific/Kwajalein': 'UTC+12',
'Pacific/Majuro': 'UTC+12',
'Pacific/Marquesas': 'Marquesas Standard Time',
'Pacific/Midway': 'UTC-11',
'Pacific/Nauru': 'UTC+12',
'Pacific/Niue': 'UTC-11',
'Pacific/Norfolk': 'Norfolk Standard Time',
'Pacific/Noumea': 'Central Pacific Standard Time',
'Pacific/Pago_Pago': 'UTC-11',
'Pacific/Palau': 'Tokyo Standard Time',
'Pacific/Pitcairn': 'UTC-08',
'Pacific/Ponape': 'Central Pacific Standard Time',
'Pacific/Port_Moresby': 'West Pacific Standard Time',
'Pacific/Rarotonga': 'Hawaiian Standard Time',
'Pacific/Saipan': 'West Pacific Standard Time',
'Pacific/Samoa': 'UTC-11',
'Pacific/Tahiti': 'Hawaiian Standard Time',
'Pacific/Tarawa': 'UTC+12',
'Pacific/Tongatapu': 'Tonga Standard Time',
'Pacific/Truk': 'West Pacific Standard Time',
'Pacific/Wake': 'UTC+12',
'Pacific/Wallis': 'UTC+12',
'Poland': 'Central European Standard Time',
'Portugal': 'GMT Standard Time',
'ROC': 'Taipei Standard Time',
'ROK': 'Korea Standard Time',
'Singapore': 'Singapore Standard Time',
'Turkey': 'Turkey Standard Time',
'US/Alaska': 'Alaskan Standard Time',
'US/Aleutian': 'Aleutian Standard Time',
'US/Arizona': 'US Mountain Standard Time',
'US/Central': 'Central Standard Time',
'US/Eastern': 'Eastern Standard Time',
'US/Hawaii': 'Hawaiian Standard Time',
'US/Indiana-Starke': 'Central Standard Time',
'US/Michigan': 'Eastern Standard Time',
'US/Mountain': 'Mountain Standard Time',
'US/Pacific': 'Pacific Standard Time',
'US/Samoa': 'UTC-11',
'UTC': 'UTC',
'Universal': 'UTC',
'W-SU': 'Russian Standard Time',
'Zulu': 'UTC'}
| gpl-3.0 |
markflyhigh/incubator-beam | sdks/python/apache_beam/transforms/cy_combiners.py | 2 | 10475 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: language_level=3
"""A library of basic cythonized CombineFn subclasses.
For internal use only; no backwards-compatibility guarantees.
"""
from __future__ import absolute_import
from __future__ import division
import operator
from builtins import object
from apache_beam.transforms import core
try:
from apache_beam.transforms.cy_dataflow_distribution_counter import DataflowDistributionCounter
except ImportError:
from apache_beam.transforms.py_dataflow_distribution_counter import DataflowDistributionCounter
class AccumulatorCombineFn(core.CombineFn):
# singleton?
def create_accumulator(self):
return self._accumulator_type()
@staticmethod
def add_input(accumulator, element):
accumulator.add_input(element)
return accumulator
def merge_accumulators(self, accumulators):
accumulator = self._accumulator_type()
accumulator.merge(accumulators)
return accumulator
@staticmethod
def extract_output(accumulator):
return accumulator.extract_output()
def __eq__(self, other):
return (isinstance(other, AccumulatorCombineFn)
and self._accumulator_type is other._accumulator_type)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash(self._accumulator_type)
_63 = 63 # Avoid large literals in C source code.
globals()['INT64_MAX'] = 2**_63 - 1
globals()['INT64_MIN'] = -2**_63
class CountAccumulator(object):
def __init__(self):
self.value = 0
def add_input(self, unused_element):
self.value += 1
def merge(self, accumulators):
for accumulator in accumulators:
self.value += accumulator.value
def extract_output(self):
return self.value
class SumInt64Accumulator(object):
def __init__(self):
self.value = 0
def add_input(self, element):
global INT64_MAX, INT64_MIN # pylint: disable=global-variable-not-assigned
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
self.value += element
def merge(self, accumulators):
for accumulator in accumulators:
self.value += accumulator.value
def extract_output(self):
if not INT64_MIN <= self.value <= INT64_MAX:
self.value %= 2**64
if self.value >= INT64_MAX:
self.value -= 2**64
return self.value
class MinInt64Accumulator(object):
def __init__(self):
self.value = INT64_MAX
def add_input(self, element):
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
if element < self.value:
self.value = element
def merge(self, accumulators):
for accumulator in accumulators:
if accumulator.value < self.value:
self.value = accumulator.value
def extract_output(self):
return self.value
class MaxInt64Accumulator(object):
def __init__(self):
self.value = INT64_MIN
def add_input(self, element):
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
if element > self.value:
self.value = element
def merge(self, accumulators):
for accumulator in accumulators:
if accumulator.value > self.value:
self.value = accumulator.value
def extract_output(self):
return self.value
class MeanInt64Accumulator(object):
def __init__(self):
self.sum = 0
self.count = 0
def add_input(self, element):
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
self.sum += element
self.count += 1
def merge(self, accumulators):
for accumulator in accumulators:
self.sum += accumulator.sum
self.count += accumulator.count
def extract_output(self):
if not INT64_MIN <= self.sum <= INT64_MAX:
self.sum %= 2**64
if self.sum >= INT64_MAX:
self.sum -= 2**64
return self.sum // self.count if self.count else _NAN
class DistributionInt64Accumulator(object):
def __init__(self):
self.sum = 0
self.count = 0
self.min = INT64_MAX
self.max = INT64_MIN
def add_input(self, element):
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
self.sum += element
self.count += 1
self.min = min(self.min, element)
self.max = max(self.max, element)
def merge(self, accumulators):
for accumulator in accumulators:
self.sum += accumulator.sum
self.count += accumulator.count
self.min = min(self.min, accumulator.min)
self.max = max(self.max, accumulator.max)
def extract_output(self):
if not INT64_MIN <= self.sum <= INT64_MAX:
self.sum %= 2**64
if self.sum >= INT64_MAX:
self.sum -= 2**64
mean = self.sum // self.count if self.count else _NAN
return mean, self.sum, self.count, self.min, self.max
class CountCombineFn(AccumulatorCombineFn):
_accumulator_type = CountAccumulator
class SumInt64Fn(AccumulatorCombineFn):
_accumulator_type = SumInt64Accumulator
class MinInt64Fn(AccumulatorCombineFn):
_accumulator_type = MinInt64Accumulator
class MaxInt64Fn(AccumulatorCombineFn):
_accumulator_type = MaxInt64Accumulator
class MeanInt64Fn(AccumulatorCombineFn):
_accumulator_type = MeanInt64Accumulator
class DistributionInt64Fn(AccumulatorCombineFn):
_accumulator_type = DistributionInt64Accumulator
_POS_INF = float('inf')
_NEG_INF = float('-inf')
_NAN = float('nan')
class SumDoubleAccumulator(object):
def __init__(self):
self.value = 0
def add_input(self, element):
element = float(element)
self.value += element
def merge(self, accumulators):
for accumulator in accumulators:
self.value += accumulator.value
def extract_output(self):
return self.value
class MinDoubleAccumulator(object):
def __init__(self):
self.value = _POS_INF
def add_input(self, element):
element = float(element)
if element < self.value:
self.value = element
def merge(self, accumulators):
for accumulator in accumulators:
if accumulator.value < self.value:
self.value = accumulator.value
def extract_output(self):
return self.value
class MaxDoubleAccumulator(object):
def __init__(self):
self.value = _NEG_INF
def add_input(self, element):
element = float(element)
if element > self.value:
self.value = element
def merge(self, accumulators):
for accumulator in accumulators:
if accumulator.value > self.value:
self.value = accumulator.value
def extract_output(self):
return self.value
class MeanDoubleAccumulator(object):
def __init__(self):
self.sum = 0
self.count = 0
def add_input(self, element):
element = float(element)
self.sum += element
self.count += 1
def merge(self, accumulators):
for accumulator in accumulators:
self.sum += accumulator.sum
self.count += accumulator.count
def extract_output(self):
return self.sum // self.count if self.count else _NAN
class SumFloatFn(AccumulatorCombineFn):
_accumulator_type = SumDoubleAccumulator
class MinFloatFn(AccumulatorCombineFn):
_accumulator_type = MinDoubleAccumulator
class MaxFloatFn(AccumulatorCombineFn):
_accumulator_type = MaxDoubleAccumulator
class MeanFloatFn(AccumulatorCombineFn):
_accumulator_type = MeanDoubleAccumulator
class AllAccumulator(object):
def __init__(self):
self.value = True
def add_input(self, element):
self.value &= not not element
def merge(self, accumulators):
for accumulator in accumulators:
self.value &= accumulator.value
def extract_output(self):
return self.value
class AnyAccumulator(object):
def __init__(self):
self.value = False
def add_input(self, element):
self.value |= not not element
def merge(self, accumulators):
for accumulator in accumulators:
self.value |= accumulator.value
def extract_output(self):
return self.value
class AnyCombineFn(AccumulatorCombineFn):
_accumulator_type = AnyAccumulator
class AllCombineFn(AccumulatorCombineFn):
_accumulator_type = AllAccumulator
class DataflowDistributionCounterFn(AccumulatorCombineFn):
"""A subclass of cy_combiners.AccumulatorCombineFn.
Make DataflowDistributionCounter able to report to Dataflow service via
CounterFactory.
When cythonized DataflowDistributinoCounter available, make
CounterFn combine with cythonized module, otherwise, combine with python
version.
"""
_accumulator_type = DataflowDistributionCounter
class ComparableValue(object):
"""A way to allow comparing elements in a rich fashion."""
__slots__ = (
'value', '_less_than_fn', '_comparable_value', 'requires_hydration')
def __init__(self, value, less_than_fn, key_fn, _requires_hydration=False):
self.value = value
self.hydrate(less_than_fn, key_fn)
self.requires_hydration = _requires_hydration
def hydrate(self, less_than_fn, key_fn):
self._less_than_fn = less_than_fn if less_than_fn else operator.lt
self._comparable_value = key_fn(self.value) if key_fn else self.value
self.requires_hydration = False
def __lt__(self, other):
assert not self.requires_hydration
assert self._less_than_fn is other._less_than_fn
return self._less_than_fn(self._comparable_value, other._comparable_value)
def __repr__(self):
return 'ComparableValue[%s]' % str(self.value)
def __reduce__(self):
# Since we can't pickle the Compare and Key Fn we pass None and we signify
# that this object _requires_hydration.
return ComparableValue, (self.value, None, None, True)
| apache-2.0 |
scrapinghub/exporters | exporters/transform/flatson_transform.py | 1 | 1030 | from exporters.transform.base_transform import BaseTransform
class FlatsonTransform(BaseTransform):
"""
It flatten a JSON-like dataset into flat CSV-like tables using the
Flatson library, please refer to Flatson
`official documentation
<http://flatson.readthedocs.io/en/latest/readme.html>`_.
- flatson_schema (dict)
Valid Flatson schema
"""
# List of options to set up the transform module
supported_options = {
'flatson_schema': {'type': dict}
}
def __init__(self, *args, **kwargs):
from flatson import Flatson
super(FlatsonTransform, self).__init__(*args, **kwargs)
self.flatson_schema = self.read_option('flatson_schema')
self.flatson = Flatson(self.flatson_schema)
self.logger.info(
'FlatsonTransform has been initiated. Schema: {!r}'.format(
self.flatson_schema))
def transform_batch(self, batch):
for record in batch:
yield self.flatson.flatten_dict(record)
| bsd-3-clause |
fenginx/django | django/core/management/commands/showmigrations.py | 41 | 5855 | import sys
from django.apps import apps
from django.core.management.base import BaseCommand
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.loader import MigrationLoader
class Command(BaseCommand):
help = "Shows all available migrations for the current project"
def add_arguments(self, parser):
parser.add_argument(
'app_label', nargs='*',
help='App labels of applications to limit the output to.',
)
parser.add_argument(
'--database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to synchronize. Defaults to the "default" database.',
)
formats = parser.add_mutually_exclusive_group()
formats.add_argument(
'--list', '-l', action='store_const', dest='format', const='list',
help=(
'Shows a list of all migrations and which are applied. '
'With a verbosity level of 2 or above, the applied datetimes '
'will be included.'
),
)
formats.add_argument(
'--plan', '-p', action='store_const', dest='format', const='plan',
help=(
'Shows all migrations in the order they will be applied. '
'With a verbosity level of 2 or above all direct migration dependencies '
'and reverse dependencies (run_before) will be included.'
)
)
parser.set_defaults(format='list')
def handle(self, *args, **options):
self.verbosity = options['verbosity']
# Get the database we're operating from
db = options['database']
connection = connections[db]
if options['format'] == "plan":
return self.show_plan(connection, options['app_label'])
else:
return self.show_list(connection, options['app_label'])
def _validate_app_names(self, loader, app_names):
has_bad_names = False
for app_name in app_names:
try:
apps.get_app_config(app_name)
except LookupError as err:
self.stderr.write(str(err))
has_bad_names = True
if has_bad_names:
sys.exit(2)
def show_list(self, connection, app_names=None):
"""
Show a list of all migrations on the system, or only those of
some named apps.
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection, ignore_no_migrations=True)
graph = loader.graph
# If we were passed a list of apps, validate it
if app_names:
self._validate_app_names(loader, app_names)
# Otherwise, show all apps in alphabetic order
else:
app_names = sorted(loader.migrated_apps)
# For each app, print its migrations in order from oldest (roots) to
# newest (leaves).
for app_name in app_names:
self.stdout.write(app_name, self.style.MIGRATE_LABEL)
shown = set()
for node in graph.leaf_nodes(app_name):
for plan_node in graph.forwards_plan(node):
if plan_node not in shown and plan_node[0] == app_name:
# Give it a nice title if it's a squashed one
title = plan_node[1]
if graph.nodes[plan_node].replaces:
title += " (%s squashed migrations)" % len(graph.nodes[plan_node].replaces)
applied_migration = loader.applied_migrations.get(plan_node)
# Mark it as applied/unapplied
if applied_migration:
output = ' [X] %s' % title
if self.verbosity >= 2:
output += ' (applied at %s)' % applied_migration.applied.strftime('%Y-%m-%d %H:%M:%S')
self.stdout.write(output)
else:
self.stdout.write(" [ ] %s" % title)
shown.add(plan_node)
# If we didn't print anything, then a small message
if not shown:
self.stdout.write(" (no migrations)", self.style.ERROR)
def show_plan(self, connection, app_names=None):
"""
Show all known migrations (or only those of the specified app_names)
in the order they will be applied.
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection)
graph = loader.graph
if app_names:
self._validate_app_names(loader, app_names)
targets = [key for key in graph.leaf_nodes() if key[0] in app_names]
else:
targets = graph.leaf_nodes()
plan = []
seen = set()
# Generate the plan
for target in targets:
for migration in graph.forwards_plan(target):
if migration not in seen:
node = graph.node_map[migration]
plan.append(node)
seen.add(migration)
# Output
def print_deps(node):
out = []
for parent in sorted(node.parents):
out.append("%s.%s" % parent.key)
if out:
return " ... (%s)" % ", ".join(out)
return ""
for node in plan:
deps = ""
if self.verbosity >= 2:
deps = print_deps(node)
if node.key in loader.applied_migrations:
self.stdout.write("[X] %s.%s%s" % (node.key[0], node.key[1], deps))
else:
self.stdout.write("[ ] %s.%s%s" % (node.key[0], node.key[1], deps))
if not plan:
self.stdout.write('(no migrations)', self.style.ERROR)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.