code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# pylint: disable=missing-docstring
from datetime import datetime, timedelta
import factory
import pytz
from factory.django import DjangoModelFactory
from factory.fuzzy import FuzzyText
from oauth2_provider.models import AccessToken, Application, RefreshToken
from openedx.core.djangoapps.oauth_dispatch.models import ApplicationAccess
from common.djangoapps.student.tests.factories import UserFactory
class ApplicationFactory(DjangoModelFactory):
class Meta:
model = Application
user = factory.SubFactory(UserFactory)
client_id = factory.Sequence('client_{}'.format)
client_secret = 'some_secret'
client_type = 'confidential'
authorization_grant_type = Application.CLIENT_CONFIDENTIAL
name = FuzzyText(prefix='name', length=8)
class ApplicationAccessFactory(DjangoModelFactory):
class Meta:
model = ApplicationAccess
application = factory.SubFactory(ApplicationFactory)
scopes = ['grades:read']
class AccessTokenFactory(DjangoModelFactory):
class Meta:
model = AccessToken
django_get_or_create = ('user', 'application')
token = FuzzyText(length=32)
expires = datetime.now(pytz.UTC) + timedelta(days=1)
class RefreshTokenFactory(DjangoModelFactory):
class Meta:
model = RefreshToken
django_get_or_create = ('user', 'application')
token = FuzzyText(length=32)
| eduNEXT/edx-platform | openedx/core/djangoapps/oauth_dispatch/tests/factories.py | Python | agpl-3.0 | 1,383 |
#import DBusInterface
| buguelos/odoo | yowsup/Interfaces/DBus/__init__.py | Python | agpl-3.0 | 22 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Sleef(CMakePackage):
"""SIMD Library for Evaluating Elementary Functions,
vectorized libm and DFT."""
homepage = "http://sleef.org"
url = "https://github.com/shibatch/sleef/archive/3.2.tar.gz"
version('3.2', '459215058f2c8d55cd2b644d56c8c4f0')
| krafczyk/spack | var/spack/repos/builtin/packages/sleef/package.py | Python | lgpl-2.1 | 1,534 |
# -*- coding: utf-8 -*-
import httplib as http
import mock
from nose.tools import * # noqa
from boto.exception import S3ResponseError
from framework.auth import Auth
from tests.base import get_default_metaschema
from tests.factories import ProjectFactory, AuthUserFactory
from website.addons.base import testing
from website.addons.s3.tests.utils import S3AddonTestCase
from website.addons.s3.utils import validate_bucket_name, validate_bucket_location
from website.util import api_url_for
class TestS3Views(S3AddonTestCase, testing.views.OAuthAddonConfigViewsTestCaseMixin):
def setUp(self):
self.mock_can_list = mock.patch('website.addons.s3.views.utils.can_list')
self.mock_can_list.return_value = True
self.mock_can_list.start()
self.mock_uid = mock.patch('website.addons.s3.views.utils.get_user_info')
self.mock_uid.return_value = {'id': '1234567890', 'display_name': 's3.user'}
self.mock_uid.start()
self.mock_exists = mock.patch('website.addons.s3.views.utils.bucket_exists')
self.mock_exists.return_value = True
self.mock_exists.start()
super(TestS3Views, self).setUp()
def tearDown(self):
self.mock_can_list.stop()
self.mock_uid.stop()
self.mock_exists.stop()
super(TestS3Views, self).tearDown()
def test_s3_settings_input_empty_keys(self):
url = self.project.api_url_for('s3_add_user_account')
rv = self.app.post_json(url,{
'access_key': '',
'secret_key': ''
}, auth=self.user.auth, expect_errors=True)
assert_equals(rv.status_int, http.BAD_REQUEST)
assert_in('All the fields above are required.', rv.body)
def test_s3_settings_input_empty_access_key(self):
url = self.project.api_url_for('s3_add_user_account')
rv = self.app.post_json(url,{
'access_key': '',
'secret_key': 'Non-empty-secret-key'
}, auth=self.user.auth, expect_errors=True)
assert_equals(rv.status_int, http.BAD_REQUEST)
assert_in('All the fields above are required.', rv.body)
def test_s3_settings_input_empty_secret_key(self):
url = self.project.api_url_for('s3_add_user_account')
rv = self.app.post_json(url,{
'access_key': 'Non-empty-access-key',
'secret_key': ''
}, auth=self.user.auth, expect_errors=True)
assert_equals(rv.status_int, http.BAD_REQUEST)
assert_in('All the fields above are required.', rv.body)
def test_s3_set_bucket_no_settings(self):
user = AuthUserFactory()
self.project.add_contributor(user, save=True)
url = self.project.api_url_for('s3_set_config')
res = self.app.put_json(
url, {'s3_bucket': 'hammertofall'}, auth=user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_s3_set_bucket_no_auth(self):
user = AuthUserFactory()
user.add_addon('s3')
self.project.add_contributor(user, save=True)
url = self.project.api_url_for('s3_set_config')
res = self.app.put_json(
url, {'s3_bucket': 'hammertofall'}, auth=user.auth,
expect_errors=True
)
assert_equal(res.status_code, http.FORBIDDEN)
def test_s3_set_bucket_registered(self):
registration = self.project.register_node(
get_default_metaschema(), Auth(self.user), '', ''
)
url = registration.api_url_for('s3_set_config')
res = self.app.put_json(
url, {'s3_bucket': 'hammertofall'}, auth=self.user.auth,
expect_errors=True,
)
assert_equal(res.status_code, http.BAD_REQUEST)
@mock.patch('website.addons.s3.views.utils.can_list', return_value=False)
def test_user_settings_cant_list(self, mock_can_list):
url = api_url_for('s3_add_user_account')
rv = self.app.post_json(url, {
'access_key': 'aldkjf',
'secret_key': 'las'
}, auth=self.user.auth, expect_errors=True)
assert_equals(rv.status_int, http.BAD_REQUEST)
assert_in('Unable to list buckets.', rv.body)
def test_s3_remove_node_settings_owner(self):
url = self.node_settings.owner.api_url_for('s3_deauthorize_node')
ret = self.app.delete(url, auth=self.user.auth)
result = self.Serializer().serialize_settings(node_settings=self.node_settings, current_user=self.user)
assert_equal(result['nodeHasAuth'], False)
def test_s3_remove_node_settings_unauthorized(self):
url = self.node_settings.owner.api_url_for('s3_deauthorize_node')
ret = self.app.delete(url, auth=None, expect_errors=True)
assert_equal(ret.status_code, 401)
def test_s3_get_node_settings_owner(self):
self.node_settings.set_auth(self.external_account, self.user)
self.node_settings.folder_id = 'bucket'
self.node_settings.save()
url = self.node_settings.owner.api_url_for('s3_get_config')
res = self.app.get(url, auth=self.user.auth)
result = res.json['result']
assert_equal(result['nodeHasAuth'], True)
assert_equal(result['userIsOwner'], True)
assert_equal(result['folder']['path'], self.node_settings.folder_id)
def test_s3_get_node_settings_unauthorized(self):
url = self.node_settings.owner.api_url_for('s3_get_config')
unauthorized = AuthUserFactory()
ret = self.app.get(url, auth=unauthorized.auth, expect_errors=True)
assert_equal(ret.status_code, 403)
## Overrides ##
@mock.patch('website.addons.s3.model.get_bucket_names')
def test_folder_list(self, mock_names):
mock_names.return_value = ['bucket1', 'bucket2']
super(TestS3Views, self).test_folder_list()
@mock.patch('website.addons.s3.model.bucket_exists')
@mock.patch('website.addons.s3.model.get_bucket_location_or_error')
def test_set_config(self, mock_location, mock_exists):
mock_exists.return_value = True
mock_location.return_value = ''
self.node_settings.set_auth(self.external_account, self.user)
url = self.project.api_url_for('{0}_set_config'.format(self.ADDON_SHORT_NAME))
res = self.app.put_json(url, {
'selected': self.folder
}, auth=self.user.auth)
assert_equal(res.status_code, http.OK)
self.project.reload()
self.node_settings.reload()
assert_equal(
self.project.logs.latest().action,
'{0}_bucket_linked'.format(self.ADDON_SHORT_NAME)
)
assert_equal(res.json['result']['folder']['name'], self.node_settings.folder_name)
class TestCreateBucket(S3AddonTestCase):
def setUp(self):
super(TestCreateBucket, self).setUp()
self.user = AuthUserFactory()
self.consolidated_auth = Auth(user=self.user)
self.auth = self.user.auth
self.project = ProjectFactory(creator=self.user)
self.project.add_addon('s3', auth=self.consolidated_auth)
self.project.creator.add_addon('s3')
self.user_settings = self.user.get_addon('s3')
self.user_settings.access_key = 'We-Will-Rock-You'
self.user_settings.secret_key = 'Idontknowanyqueensongs'
self.user_settings.save()
self.node_settings = self.project.get_addon('s3')
self.node_settings.bucket = 'Sheer-Heart-Attack'
self.node_settings.user_settings = self.project.creator.get_addon('s3')
self.node_settings.save()
def test_bad_names(self):
assert_false(validate_bucket_name(''))
assert_false(validate_bucket_name('no'))
assert_false(validate_bucket_name('a' * 64))
assert_false(validate_bucket_name(' leadingspace'))
assert_false(validate_bucket_name('trailingspace '))
assert_false(validate_bucket_name('bogus naMe'))
assert_false(validate_bucket_name('.cantstartwithp'))
assert_false(validate_bucket_name('or.endwith.'))
assert_false(validate_bucket_name('..nodoubles'))
assert_false(validate_bucket_name('no_unders_in'))
assert_false(validate_bucket_name('-leadinghyphen'))
assert_false(validate_bucket_name('trailinghyphen-'))
assert_false(validate_bucket_name('Mixedcase'))
assert_false(validate_bucket_name('empty..label'))
assert_false(validate_bucket_name('label-.trailinghyphen'))
assert_false(validate_bucket_name('label.-leadinghyphen'))
assert_false(validate_bucket_name('8.8.8.8'))
assert_false(validate_bucket_name('600.9000.0.28'))
assert_false(validate_bucket_name('no_underscore'))
assert_false(validate_bucket_name('_nounderscoreinfront'))
assert_false(validate_bucket_name('no-underscore-in-back_'))
assert_false(validate_bucket_name('no-underscore-in_the_middle_either'))
def test_names(self):
assert_true(validate_bucket_name('imagoodname'))
assert_true(validate_bucket_name('still.passing'))
assert_true(validate_bucket_name('can-have-dashes'))
assert_true(validate_bucket_name('kinda.name.spaced'))
assert_true(validate_bucket_name('a-o.valid'))
assert_true(validate_bucket_name('11.12.m'))
assert_true(validate_bucket_name('a--------a'))
assert_true(validate_bucket_name('a' * 63))
def test_bad_locations(self):
assert_false(validate_bucket_location('Venus'))
assert_false(validate_bucket_location('AlphaCentari'))
assert_false(validate_bucket_location('CostaRica'))
def test_locations(self):
assert_true(validate_bucket_location(''))
assert_true(validate_bucket_location('us-east-2'))
assert_true(validate_bucket_location('eu-central-1'))
assert_true(validate_bucket_location('us-west-1'))
assert_true(validate_bucket_location('us-west-2'))
assert_true(validate_bucket_location('ap-northeast-1'))
assert_true(validate_bucket_location('ap-northeast-2'))
assert_true(validate_bucket_location('ap-southeast-1'))
assert_true(validate_bucket_location('ap-southeast-2'))
assert_true(validate_bucket_location('ap-south-1'))
assert_true(validate_bucket_location('sa-east-1'))
assert_true(validate_bucket_location('eu-west-1'))
@mock.patch('website.addons.s3.views.utils.create_bucket')
@mock.patch('website.addons.s3.views.utils.get_bucket_names')
def test_create_bucket_pass(self, mock_names, mock_make):
mock_make.return_value = True
mock_names.return_value = [
'butintheend',
'it',
'doesntevenmatter'
]
url = self.project.api_url_for('create_bucket')
ret = self.app.post_json(
url,
{
'bucket_name': 'doesntevenmatter',
'bucket_location': '',
},
auth=self.user.auth
)
assert_equal(ret.status_int, http.OK)
assert_equal(ret.json, {})
@mock.patch('website.addons.s3.views.utils.create_bucket')
def test_create_bucket_fail(self, mock_make):
error = S3ResponseError(418, 'because Im a test')
error.message = 'This should work'
mock_make.side_effect = error
url = "/api/v1/project/{0}/s3/newbucket/".format(self.project._id)
ret = self.app.post_json(url, {'bucket_name': 'doesntevenmatter'}, auth=self.user.auth, expect_errors=True)
assert_equals(ret.body, '{"message": "This should work", "title": "Problem connecting to S3"}')
@mock.patch('website.addons.s3.views.utils.create_bucket')
def test_bad_location_fails(self, mock_make):
url = "/api/v1/project/{0}/s3/newbucket/".format(self.project._id)
ret = self.app.post_json(
url,
{
'bucket_name': 'doesntevenmatter',
'bucket_location': 'not a real bucket location',
},
auth=self.user.auth,
expect_errors=True)
assert_equals(ret.body, '{"message": "That bucket location is not valid.", "title": "Invalid bucket location"}')
| monikagrabowska/osf.io | website/addons/s3/tests/test_view.py | Python | apache-2.0 | 12,215 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import unittest
from multiprocessing import Process
import signal
import numpy
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.layers.io import ListenAndServ
from paddle.fluid.layers.io import Recv
from paddle.fluid.layers.io import Send
import paddle.fluid.layers.ops as ops
from paddle.fluid.transpiler.details import program_to_code
class TestProgram2Code(unittest.TestCase):
def test_print(self):
place = fluid.CPUPlace()
self.init_serv(place)
self.init_client(place, 9123)
def init_serv(self, place):
main = fluid.Program()
with fluid.program_guard(main):
serv = ListenAndServ("127.0.0.1:0", ["X"], optimizer_mode=False)
with serv.do():
out_var = main.global_block().create_var(
name="scale_0.tmp_0",
psersistable=True,
dtype="float32",
shape=[32, 32])
x = layers.data(
shape=[32, 32],
dtype='float32',
name="X",
append_batch_size=False)
fluid.initializer.Constant(value=1.0)(x, main.global_block())
ops._scale(x=x, scale=10.0, out=out_var)
program_to_code(main)
def init_client(self, place, port):
main = fluid.Program()
with fluid.program_guard(main):
x = layers.data(
shape=[32, 32],
dtype='float32',
name='X',
append_batch_size=False)
fluid.initializer.Constant(value=2.3)(x, main.global_block())
get_var = main.global_block().create_var(
name="scale_0.tmp_0", # server side var
dtype="float32",
persistable=False,
shape=[32, 32])
fluid.initializer.Constant(value=2.3)(get_var, main.global_block())
Send("127.0.0.1:%d" % port, [x])
o = Recv("127.0.0.1:%d" % port, [get_var])
program_to_code(main)
if __name__ == "__main__":
unittest.main()
| tensor-tang/Paddle | python/paddle/fluid/tests/unittests/test_program_code.py | Python | apache-2.0 | 2,769 |
import re
from django import template
from django import forms
register = template.Library()
def _process_field_attributes(field, attr, process):
# split attribute name and value from 'attr:value' string
params = attr.split(':', 1)
attribute = params[0]
value = params[1] if len(params) == 2 else ''
# decorate field.as_widget method with updated attributes
old_as_widget = field.as_widget
def as_widget(self, widget=None, attrs=None, only_initial=False):
attrs = attrs or {}
process(widget or self.field.widget, attrs, attribute, value)
return old_as_widget(widget, attrs, only_initial)
bound_method = type(old_as_widget)
try:
field.as_widget = bound_method(as_widget, field, field.__class__)
except TypeError: # python 3
field.as_widget = bound_method(as_widget, field)
return field
@register.filter
def subtract(value, arg):
return value - arg
@register.filter
def modulo(num, val):
return num % val
@register.filter
def addcss(field, attr):
def process(widget, attrs, attribute, value):
if attrs.get(attribute):
attrs[attribute] += ' ' + value
elif widget.attrs.get(attribute):
attrs[attribute] = widget.attrs[attribute] + ' ' + value
else:
attrs[attribute] = value
return _process_field_attributes(field, attr, process)
@register.filter
def is_checkbox(field):
return isinstance(field.field.widget, forms.CheckboxInput)
@register.filter
def is_multiple_checkbox(field):
return isinstance(field.field.widget, forms.CheckboxSelectMultiple)
@register.filter
def is_radio(field):
return isinstance(field.field.widget, forms.RadioSelect)
@register.filter
def is_file(field):
return isinstance(field.field.widget, forms.FileInput) or \
isinstance(field, forms.ClearableFileInput)
@register.filter
def sum_dict(d):
total = 0
for key, value in d.items():
total += value
return total
@register.filter
def nice_title(title):
pat = re.compile(r'Finding [0-9][0-9][0-9]:*')
s = pat.split(title, 2)
try:
ret = s[1]
return ret
except:
return title
@register.filter
def pad_zeroes(num):
return str(num).zfill(3)
@register.filter
def hash(h, key):
return h[key]
@register.filter
def getZero(h, key):
return h[key][0]
@register.filter
def getOne(h, key):
return h[key][1]
@register.filter
def getTwo(h, key):
return h[key][2]
@register.filter
def getThree(h, key):
return h[key][3]
@register.filter
def getFour(h, key):
return h[key][4]
| Prakhash/security-tools | external/django-DefectDojo-1.2.1/dojo/templatetags/event_tags.py | Python | apache-2.0 | 2,640 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from designateclient import exceptions as designate_exceptions
from designateclient import v1 as designate_client
from heat.common import exception as heat_exception
from heat.engine.clients.os import designate as client
from heat.tests import common
class DesignateDomainConstraintTest(common.HeatTestCase):
def test_expected_exceptions(self):
self.assertEqual((heat_exception.EntityNotFound,),
client.DesignateDomainConstraint.expected_exceptions,
"DesignateDomainConstraint expected exceptions error")
def test_constrain(self):
constrain = client.DesignateDomainConstraint()
client_mock = mock.MagicMock()
client_plugin_mock = mock.MagicMock()
client_plugin_mock.get_domain_id.return_value = None
client_mock.client_plugin.return_value = client_plugin_mock
self.assertIsNone(constrain.validate_with_client(client_mock,
'domain_1'))
client_plugin_mock.get_domain_id.assert_called_once_with('domain_1')
class DesignateClientPluginTest(common.HeatTestCase):
@mock.patch.object(designate_client, 'Client')
@mock.patch.object(client.DesignateClientPlugin, '_get_client_args')
def test_client(self,
get_client_args,
client_designate):
args = dict(
auth_url='auth_url',
project_id='project_id',
token=lambda: '',
os_endpoint='os_endpoint',
cacert='cacert',
insecure='insecure'
)
get_client_args.return_value = args
client_plugin = client.DesignateClientPlugin(
context=mock.MagicMock()
)
client_plugin.client()
# Make sure the right args are created
get_client_args.assert_called_once_with(
service_name='designate',
service_type='dns'
)
# Make sure proper client is created with expected args
client_designate.assert_called_once_with(
auth_url='auth_url',
project_id='project_id',
token='',
endpoint='os_endpoint',
cacert='cacert',
insecure='insecure'
)
class DesignateClientPluginDomainTest(common.HeatTestCase):
sample_uuid = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
sample_name = 'test-domain.com'
def _get_mock_domain(self):
domain = mock.MagicMock()
domain.id = self.sample_uuid
domain.name = self.sample_name
return domain
def setUp(self):
super(DesignateClientPluginDomainTest, self).setUp()
self._client = mock.MagicMock()
self.client_plugin = client.DesignateClientPlugin(
context=mock.MagicMock()
)
@mock.patch.object(client.DesignateClientPlugin, 'client')
def test_get_domain_id(self, client_designate):
self._client.domains.get.return_value = self._get_mock_domain()
client_designate.return_value = self._client
self.assertEqual(self.sample_uuid,
self.client_plugin.get_domain_id(self.sample_uuid))
self._client.domains.get.assert_called_once_with(
self.sample_uuid)
@mock.patch.object(client.DesignateClientPlugin, 'client')
def test_get_domain_id_not_found(self, client_designate):
self._client.domains.get.side_effect = (designate_exceptions
.NotFound)
client_designate.return_value = self._client
ex = self.assertRaises(heat_exception.EntityNotFound,
self.client_plugin.get_domain_id,
self.sample_uuid)
msg = ("The Designate Domain (%(name)s) could not be found." %
{'name': self.sample_uuid})
self.assertEqual(msg, six.text_type(ex))
self._client.domains.get.assert_called_once_with(
self.sample_uuid)
@mock.patch.object(client.DesignateClientPlugin, 'client')
def test_get_domain_id_by_name(self, client_designate):
self._client.domains.get.side_effect = (designate_exceptions
.NotFound)
self._client.domains.list.return_value = [self._get_mock_domain()]
client_designate.return_value = self._client
self.assertEqual(self.sample_uuid,
self.client_plugin.get_domain_id(self.sample_name))
self._client.domains.get.assert_called_once_with(
self.sample_name)
self._client.domains.list.assert_called_once_with()
@mock.patch.object(client.DesignateClientPlugin, 'client')
def test_get_domain_id_by_name_not_found(self, client_designate):
self._client.domains.get.side_effect = (designate_exceptions
.NotFound)
self._client.domains.list.return_value = []
client_designate.return_value = self._client
ex = self.assertRaises(heat_exception.EntityNotFound,
self.client_plugin.get_domain_id,
self.sample_name)
msg = ("The Designate Domain (%(name)s) could not be found." %
{'name': self.sample_name})
self.assertEqual(msg, six.text_type(ex))
self._client.domains.get.assert_called_once_with(
self.sample_name)
self._client.domains.list.assert_called_once_with()
@mock.patch.object(client.DesignateClientPlugin, 'client')
@mock.patch('designateclient.v1.domains.Domain')
def test_domain_create(self, mock_domain, client_designate):
self._client.domains.create.return_value = None
client_designate.return_value = self._client
domain = dict(
name='test-domain.com',
description='updated description',
ttl=4200,
email='[email protected]'
)
mock_sample_domain = mock.Mock()
mock_domain.return_value = mock_sample_domain
self.client_plugin.domain_create(**domain)
# Make sure domain entity is created with right arguments
mock_domain.assert_called_once_with(**domain)
self._client.domains.create.assert_called_once_with(
mock_sample_domain)
@mock.patch.object(client.DesignateClientPlugin, 'client')
def test_domain_update(self, client_designate):
self._client.domains.update.return_value = None
mock_domain = self._get_mock_domain()
self._client.domains.get.return_value = mock_domain
client_designate.return_value = self._client
domain = dict(
id='sample-id',
description='updated description',
ttl=4200,
email='[email protected]'
)
self.client_plugin.domain_update(**domain)
self._client.domains.get.assert_called_once_with(
mock_domain.id)
for key in domain.keys():
setattr(mock_domain, key, domain[key])
self._client.domains.update.assert_called_once_with(
mock_domain)
class DesignateClientPluginRecordTest(common.HeatTestCase):
sample_uuid = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
sample_domain_id = '477e8273-60a7-4c41-b683-fdb0bc7cd153'
def _get_mock_record(self):
record = mock.MagicMock()
record.id = self.sample_uuid
record.domain_id = self.sample_domain_id
return record
def setUp(self):
super(DesignateClientPluginRecordTest, self).setUp()
self._client = mock.MagicMock()
self.client_plugin = client.DesignateClientPlugin(
context=mock.MagicMock()
)
self.client_plugin.get_domain_id = mock.Mock(
return_value=self.sample_domain_id)
@mock.patch.object(client.DesignateClientPlugin, 'client')
@mock.patch('designateclient.v1.records.Record')
def test_record_create(self, mock_record, client_designate):
self._client.records.create.return_value = None
client_designate.return_value = self._client
record = dict(
name='test-record.com',
description='updated description',
ttl=4200,
type='',
priority=1,
data='1.1.1.1',
domain=self.sample_domain_id
)
mock_sample_record = mock.Mock()
mock_record.return_value = mock_sample_record
self.client_plugin.record_create(**record)
# Make sure record entity is created with right arguments
domain_id = record.pop('domain')
mock_record.assert_called_once_with(**record)
self._client.records.create.assert_called_once_with(
domain_id,
mock_sample_record)
@mock.patch.object(client.DesignateClientPlugin, 'client')
@mock.patch('designateclient.v1.records.Record')
def test_record_update(self, mock_record, client_designate):
self._client.records.update.return_value = None
mock_record = self._get_mock_record()
self._client.records.get.return_value = mock_record
client_designate.return_value = self._client
record = dict(
id=self.sample_uuid,
name='test-record.com',
description='updated description',
ttl=4200,
type='',
priority=1,
data='1.1.1.1',
domain=self.sample_domain_id
)
self.client_plugin.record_update(**record)
self._client.records.get.assert_called_once_with(
self.sample_domain_id,
self.sample_uuid)
for key in record.keys():
setattr(mock_record, key, record[key])
self._client.records.update.assert_called_once_with(
self.sample_domain_id,
mock_record)
@mock.patch.object(client.DesignateClientPlugin, 'client')
@mock.patch('designateclient.v1.records.Record')
def test_record_delete(self, mock_record, client_designate):
self._client.records.delete.return_value = None
client_designate.return_value = self._client
record = dict(
id=self.sample_uuid,
domain=self.sample_domain_id
)
self.client_plugin.record_delete(**record)
self._client.records.delete.assert_called_once_with(
self.sample_domain_id,
self.sample_uuid)
@mock.patch.object(client.DesignateClientPlugin, 'client')
@mock.patch('designateclient.v1.records.Record')
def test_record_show(self, mock_record, client_designate):
self._client.records.get.return_value = None
client_designate.return_value = self._client
record = dict(
id=self.sample_uuid,
domain=self.sample_domain_id
)
self.client_plugin.record_show(**record)
self._client.records.get.assert_called_once_with(
self.sample_domain_id,
self.sample_uuid)
| gonzolino/heat | heat/tests/clients/test_designate_client.py | Python | apache-2.0 | 11,629 |
from __future__ import absolute_import, division, print_function
from lxml import etree
import os
def open_xml_file(filename, mode):
"""Opens an XML file for use.
:param filename: XML file to create file from
:param mode: file mode for open
:return:
"""
base = os.path.dirname(__file__) + '/xml_test_files/'
return open(os.path.join(base, filename), mode)
def create_node_from_file(filename):
"""Creates an xml node from a given XML file.
:param filename: XML file to create node from
:return: node
"""
node = etree.parse(open_xml_file(filename, 'r'))
return node
| chelseawinfree/symantecssl | tests/unit/utils.py | Python | apache-2.0 | 623 |
class EmptyResult(object):
'''
Null Object pattern to prevent Null reference errors
when there is no result
'''
def __init__(self):
self.status = 0
self.body = ''
self.msg = ''
self.reason = ''
def __nonzero__(self):
return False
class HapiError(ValueError):
"""Any problems get thrown as HapiError exceptions with the relevant info inside"""
as_str_template = u'''
---- request ----
{method} {host}{url}, [timeout={timeout}]
---- body ----
{body}
---- headers ----
{headers}
---- result ----
{result_status}
---- body -----
{result_body}
---- headers -----
{result_headers}
---- reason ----
{result_reason}
---- trigger error ----
{error}
'''
def __init__(self, result, request, err=None):
super(HapiError,self).__init__(result and result.reason or "Unknown Reason")
if result == None:
self.result = EmptyResult()
else:
self.result = result
if request == None:
request = {}
self.request = request
self.err = err
def __str__(self):
return self.__unicode__().encode('ascii', 'replace')
def __unicode__(self):
params = {}
request_keys = ('method', 'host', 'url', 'data', 'headers', 'timeout', 'body')
result_attrs = ('status', 'reason', 'msg', 'body', 'headers')
params['error'] = self.err
for key in request_keys:
params[key] = self.request.get(key)
for attr in result_attrs:
params['result_%s' % attr] = getattr(self.result, attr, '')
params = self._dict_vals_to_unicode(params)
return self.as_str_template.format(**params)
def _dict_vals_to_unicode(self, data):
unicode_data = {}
for key, val in data.items():
if not isinstance(val, basestring):
unicode_data[key] = unicode(val)
elif not isinstance(val, unicode):
unicode_data[key] = unicode(val, 'utf8', 'ignore')
else:
unicode_data[key] = val
return unicode_data
# Create more specific error cases, to make filtering errors easier
class HapiBadRequest(HapiError):
'''Error wrapper for most 40X results and 501 results'''
class HapiNotFound(HapiError):
'''Error wrapper for 404 and 410 results'''
class HapiTimeout(HapiError):
'''Wrapper for socket timeouts, sslerror, and 504'''
class HapiUnauthorized(HapiError):
'''Wrapper for 401 Unauthorized errors'''
class HapiServerError(HapiError):
'''Wrapper for most 500 errors'''
| jonathan-s/happy | happy/error.py | Python | apache-2.0 | 2,606 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from help.models import ConditionsChapter, FAQ
def faqs(request):
extra_context = {}
extra_context['faqs'] = FAQ.objects.all()
return render_to_response('help/faqs.html',
extra_context,
context_instance=RequestContext(request))
def terms(request):
extra_context = {}
extra_context['termsandconditions'] = ConditionsChapter.objects.all()
return render_to_response('help/terms-and-conditions.html',
extra_context,
context_instance=RequestContext(request))
def about(request):
extra_context = {}
return render_to_response('help/about.html',
extra_context,
context_instance=RequestContext(request))
| GDGLima/contentbox | help/views.py | Python | apache-2.0 | 1,559 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.tasks.jvm_tool_task_mixin import JvmToolTaskMixin
from pants.base.exceptions import TaskError
from pants.java import util
from pants.java.executor import SubprocessExecutor
from pants.java.jar.jar_dependency import JarDependency
from pants.java.nailgun_executor import NailgunExecutor, NailgunProcessGroup
from pants.pantsd.subsystem.subprocess import Subprocess
from pants.task.task import Task, TaskBase
class NailgunTaskBase(JvmToolTaskMixin, TaskBase):
ID_PREFIX = 'ng'
@classmethod
def register_options(cls, register):
super(NailgunTaskBase, cls).register_options(register)
register('--use-nailgun', type=bool, default=True,
help='Use nailgun to make repeated invocations of this task quicker.')
register('--nailgun-timeout-seconds', advanced=True, default=10, type=float,
help='Timeout (secs) for nailgun startup.')
register('--nailgun-connect-attempts', advanced=True, default=5, type=int,
help='Max attempts for nailgun connects.')
cls.register_jvm_tool(register,
'nailgun-server',
classpath=[
JarDependency(org='com.martiansoftware',
name='nailgun-server',
rev='0.9.1'),
])
@classmethod
def subsystem_dependencies(cls):
return super(NailgunTaskBase, cls).subsystem_dependencies() + (Subprocess.Factory,)
def __init__(self, *args, **kwargs):
"""
:API: public
"""
super(NailgunTaskBase, self).__init__(*args, **kwargs)
id_tuple = (self.ID_PREFIX, self.__class__.__name__)
self._identity = '_'.join(id_tuple)
self._executor_workdir = os.path.join(self.context.options.for_global_scope().pants_workdir,
*id_tuple)
def create_java_executor(self):
"""Create java executor that uses this task's ng daemon, if allowed.
Call only in execute() or later. TODO: Enforce this.
"""
if self.get_options().use_nailgun:
classpath = os.pathsep.join(self.tool_classpath('nailgun-server'))
return NailgunExecutor(self._identity,
self._executor_workdir,
classpath,
self.dist,
connect_timeout=self.get_options().nailgun_timeout_seconds,
connect_attempts=self.get_options().nailgun_connect_attempts)
else:
return SubprocessExecutor(self.dist)
def runjava(self, classpath, main, jvm_options=None, args=None, workunit_name=None,
workunit_labels=None, workunit_log_config=None):
"""Runs the java main using the given classpath and args.
If --no-use-nailgun is specified then the java main is run in a freshly spawned subprocess,
otherwise a persistent nailgun server dedicated to this Task subclass is used to speed up
amortized run times.
:API: public
"""
executor = self.create_java_executor()
# Creating synthetic jar to work around system arg length limit is not necessary
# when `NailgunExecutor` is used because args are passed through socket, therefore turning off
# creating synthetic jar if nailgun is used.
create_synthetic_jar = not self.get_options().use_nailgun
try:
return util.execute_java(classpath=classpath,
main=main,
jvm_options=jvm_options,
args=args,
executor=executor,
workunit_factory=self.context.new_workunit,
workunit_name=workunit_name,
workunit_labels=workunit_labels,
workunit_log_config=workunit_log_config,
create_synthetic_jar=create_synthetic_jar,
synthetic_jar_dir=self._executor_workdir)
except executor.Error as e:
raise TaskError(e)
# TODO(John Sirois): This just prevents ripple - maybe inline
class NailgunTask(NailgunTaskBase, Task):
"""
:API: public
"""
pass
class NailgunKillall(Task):
"""Kill running nailgun servers."""
@classmethod
def register_options(cls, register):
super(NailgunKillall, cls).register_options(register)
register('--everywhere', type=bool,
help='Kill all nailguns servers launched by pants for all workspaces on the system.')
def execute(self):
NailgunProcessGroup().killall(everywhere=self.get_options().everywhere)
| landism/pants | src/python/pants/backend/jvm/tasks/nailgun_task.py | Python | apache-2.0 | 4,969 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for supervisor.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import time
import uuid
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import meta_graph
def _summary_iterator(test_dir):
"""Reads events from test_dir/events.
Args:
test_dir: Name of the test directory.
Returns:
A summary_iterator
"""
event_paths = sorted(glob.glob(os.path.join(test_dir, "event*")))
return tf.train.summary_iterator(event_paths[-1])
def _test_dir(test_name):
test_dir = os.path.join(tf.test.get_temp_dir(), test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
return test_dir
class SupervisorTest(tf.test.TestCase):
def _wait_for_glob(self, pattern, timeout_secs, for_checkpoint=True):
"""Wait for a checkpoint file to appear.
Args:
pattern: A string.
timeout_secs: How long to wait for in seconds.
for_checkpoint: whether we're globbing for checkpoints.
"""
end_time = time.time() + timeout_secs
while time.time() < end_time:
if for_checkpoint:
if tf.train.checkpoint_exists(pattern):
return
else:
if len(tf.gfile.Glob(pattern)) >= 1:
return
time.sleep(0.05)
self.assertFalse(True, "Glob never matched any file: %s" % pattern)
# This test does not test much.
def testBasics(self):
logdir = _test_dir("basics")
with tf.Graph().as_default():
my_op = tf.constant(1.0)
sv = tf.train.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
for _ in xrange(10):
sess.run(my_op)
sess.close()
sv.stop()
def testManagedSession(self):
logdir = _test_dir("managed_session")
with tf.Graph().as_default():
my_op = tf.constant(1.0)
sv = tf.train.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
for _ in xrange(10):
sess.run(my_op)
# Supervisor has been stopped.
self.assertTrue(sv.should_stop())
def testManagedSessionUserError(self):
logdir = _test_dir("managed_user_error")
with tf.Graph().as_default():
my_op = tf.constant(1.0)
sv = tf.train.Supervisor(logdir=logdir)
last_step = None
with self.assertRaisesRegexp(RuntimeError, "failing here"):
with sv.managed_session("") as sess:
for step in xrange(10):
last_step = step
if step == 1:
raise RuntimeError("failing here")
else:
sess.run(my_op)
# Supervisor has been stopped.
self.assertTrue(sv.should_stop())
self.assertEqual(1, last_step)
def testManagedSessionIgnoreOutOfRangeError(self):
logdir = _test_dir("managed_out_of_range")
with tf.Graph().as_default():
my_op = tf.constant(1.0)
sv = tf.train.Supervisor(logdir=logdir)
last_step = None
with sv.managed_session("") as sess:
for step in xrange(10):
last_step = step
if step == 3:
raise tf.errors.OutOfRangeError(my_op.op.node_def, my_op.op,
"all done")
else:
sess.run(my_op)
# Supervisor has been stopped. OutOfRangeError was not thrown.
self.assertTrue(sv.should_stop())
self.assertEqual(3, last_step)
def testManagedSessionDoNotKeepSummaryWriter(self):
logdir = _test_dir("managed_not_keep_summary_writer")
with tf.Graph().as_default():
tf.summary.scalar("c1", tf.constant(1))
tf.summary.scalar("c2", tf.constant(2))
tf.summary.scalar("c3", tf.constant(3))
summ = tf.summary.merge_all()
sv = tf.train.Supervisor(logdir=logdir, summary_op=None)
with sv.managed_session("", close_summary_writer=True,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
# Sleep 1.2s to make sure that the next event file has a different name
# than the current one.
time.sleep(1.2)
with sv.managed_session("", close_summary_writer=True,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
event_paths = sorted(glob.glob(os.path.join(logdir, "event*")))
self.assertEquals(2, len(event_paths))
# The two event files should have the same contents.
for path in event_paths:
# The summary iterator should report the summary once as we closed the
# summary writer across the 2 sessions.
rr = tf.train.summary_iterator(path)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph and metagraph.
ev = next(rr)
self.assertTrue(ev.graph_def)
ev = next(rr)
self.assertTrue(ev.meta_graph_def)
# The next one should have the values from the summary.
# But only once.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(tf.SessionLog.STOP, ev.session_log.status)
# We should be done.
with self.assertRaises(StopIteration):
next(rr)
def testManagedSessionKeepSummaryWriter(self):
logdir = _test_dir("managed_keep_summary_writer")
with tf.Graph().as_default():
tf.summary.scalar("c1", tf.constant(1))
tf.summary.scalar("c2", tf.constant(2))
tf.summary.scalar("c3", tf.constant(3))
summ = tf.summary.merge_all()
sv = tf.train.Supervisor(logdir=logdir)
with sv.managed_session("", close_summary_writer=False,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
with sv.managed_session("", close_summary_writer=False,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
# Now close the summary writer to flush the events.
sv.summary_writer.close()
# The summary iterator should report the summary twice as we reused
# the same summary writer across the 2 sessions.
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
self.assertTrue(ev.graph_def)
ev = next(rr)
self.assertTrue(ev.meta_graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should also have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def _csv_data(self, logdir):
# Create a small data file with 3 CSV records.
data_path = os.path.join(logdir, "data.csv")
with open(data_path, "w") as f:
f.write("1,2,3\n")
f.write("4,5,6\n")
f.write("7,8,9\n")
return data_path
def testManagedEndOfInputOneQueue(self):
# Tests that the supervisor finishes without an error when using
# a fixed number of epochs, reading from a single queue.
logdir = _test_dir("managed_end_of_input_one_queue")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with tf.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = tf.train.string_input_producer([data_path], num_epochs=3)
reader = tf.TextLineReader()
_, csv = reader.read(filename_queue)
rec = tf.decode_csv(csv, record_defaults=[[1], [1], [1]])
sv = tf.train.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
while not sv.should_stop():
sess.run(rec)
def testManagedEndOfInputTwoQueues(self):
# Tests that the supervisor finishes without an error when using
# a fixed number of epochs, reading from two queues, the second
# one producing a batch from the first one.
logdir = _test_dir("managed_end_of_input_two_queues")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with tf.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = tf.train.string_input_producer([data_path], num_epochs=3)
reader = tf.TextLineReader()
_, csv = reader.read(filename_queue)
rec = tf.decode_csv(csv, record_defaults=[[1], [1], [1]])
shuff_rec = tf.train.shuffle_batch(rec, 1, 6, 4)
sv = tf.train.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
while not sv.should_stop():
sess.run(shuff_rec)
def testManagedMainErrorTwoQueues(self):
# Tests that the supervisor correctly raises a main loop
# error even when using multiple queues for input.
logdir = _test_dir("managed_main_error_two_queues")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with self.assertRaisesRegexp(RuntimeError, "fail at step 3"):
with tf.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = tf.train.string_input_producer([data_path],
num_epochs=3)
reader = tf.TextLineReader()
_, csv = reader.read(filename_queue)
rec = tf.decode_csv(csv, record_defaults=[[1], [1], [1]])
shuff_rec = tf.train.shuffle_batch(rec, 1, 6, 4)
sv = tf.train.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
for step in range(9):
if sv.should_stop():
break
elif step == 3:
raise RuntimeError("fail at step 3")
else:
sess.run(shuff_rec)
def testSessionConfig(self):
logdir = _test_dir("session_config")
with tf.Graph().as_default():
with tf.device("/cpu:1"):
my_op = tf.constant([1.0])
sv = tf.train.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session(
"", config=tf.ConfigProto(device_count={"CPU": 2}))
for _ in xrange(10):
sess.run(my_op)
sess.close()
sv.stop()
def testChiefCanWriteEvents(self):
logdir = _test_dir("can_write")
with tf.Graph().as_default():
tf.summary.scalar("c1", tf.constant(1))
tf.summary.scalar("c2", tf.constant(2))
tf.summary.scalar("c3", tf.constant(3))
summ = tf.summary.merge_all()
sv = tf.train.Supervisor(is_chief=True, logdir=logdir, summary_op=None)
meta_graph_def = meta_graph.create_meta_graph_def()
sess = sv.prepare_or_wait_for_session("")
sv.summary_computed(sess, sess.run(summ))
sess.close()
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
ev_graph = tf.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(tf.SessionLog.STOP, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def testNonChiefCannotWriteEvents(self):
def _summary_computed():
with tf.Graph().as_default():
sv = tf.train.Supervisor(is_chief=False)
sess = sv.prepare_or_wait_for_session("")
tf.summary.scalar("c1", tf.constant(1))
tf.summary.scalar("c2", tf.constant(2))
summ = tf.summary.merge_all()
sv.summary_computed(sess, sess.run(summ))
def _start_standard_services():
with tf.Graph().as_default():
sv = tf.train.Supervisor(is_chief=False)
sess = sv.prepare_or_wait_for_session("")
sv.start_standard_services(sess)
self.assertRaises(RuntimeError, _summary_computed)
self.assertRaises(RuntimeError, _start_standard_services)
def testNoLogdirButWantSummary(self):
with tf.Graph().as_default():
tf.summary.scalar("c1", tf.constant(1))
tf.summary.scalar("c2", tf.constant(2))
tf.summary.scalar("c3", tf.constant(3))
summ = tf.summary.merge_all()
sv = tf.train.Supervisor(logdir="", summary_op=None)
sess = sv.prepare_or_wait_for_session("")
with self.assertRaisesRegexp(RuntimeError, "requires a summary writer"):
sv.summary_computed(sess, sess.run(summ))
def testLogdirButExplicitlyNoSummaryWriter(self):
logdir = _test_dir("explicit_no_summary_writer")
with tf.Graph().as_default():
tf.Variable([1.0], name="foo")
tf.summary.scalar("c1", tf.constant(1))
tf.summary.scalar("c2", tf.constant(2))
tf.summary.scalar("c3", tf.constant(3))
summ = tf.summary.merge_all()
sv = tf.train.Supervisor(logdir=logdir, summary_writer=None)
sess = sv.prepare_or_wait_for_session("")
# Check that a checkpoint is still be generated.
self._wait_for_glob(sv.save_path, 3.0)
# Check that we cannot write a summary
with self.assertRaisesRegexp(RuntimeError, "requires a summary writer"):
sv.summary_computed(sess, sess.run(summ))
def testNoLogdirButExplicitSummaryWriter(self):
logdir = _test_dir("explicit_summary_writer")
with tf.Graph().as_default():
tf.summary.scalar("c1", tf.constant(1))
tf.summary.scalar("c2", tf.constant(2))
tf.summary.scalar("c3", tf.constant(3))
summ = tf.summary.merge_all()
sw = tf.train.SummaryWriter(logdir)
sv = tf.train.Supervisor(logdir="", summary_op=None, summary_writer=sw)
meta_graph_def = meta_graph.create_meta_graph_def()
sess = sv.prepare_or_wait_for_session("")
sv.summary_computed(sess, sess.run(summ))
sess.close()
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# Check the summary was written to 'logdir'
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
ev_graph = tf.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(tf.SessionLog.STOP, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def testNoLogdirSucceeds(self):
with tf.Graph().as_default():
tf.Variable([1.0, 2.0, 3.0])
sv = tf.train.Supervisor(logdir="", summary_op=None)
sess = sv.prepare_or_wait_for_session("")
sess.close()
sv.stop()
def testUseSessionManager(self):
with tf.Graph().as_default():
tf.Variable([1.0, 2.0, 3.0])
sm = tf.train.SessionManager()
# Pass in session_manager. The additional init_op is ignored.
sv = tf.train.Supervisor(logdir="", session_manager=sm)
sv.prepare_or_wait_for_session("")
def testInitOp(self):
logdir = _test_dir("default_init_op")
with tf.Graph().as_default():
v = tf.Variable([1.0, 2.0, 3.0])
sv = tf.train.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testInitFn(self):
logdir = _test_dir("default_init_op")
with tf.Graph().as_default():
v = tf.Variable([1.0, 2.0, 3.0])
def _init_fn(sess):
sess.run(v.initializer)
sv = tf.train.Supervisor(logdir=logdir, init_op=None, init_fn=_init_fn)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testInitOpWithFeedDict(self):
logdir = _test_dir("feed_dict_init_op")
with tf.Graph().as_default():
p = tf.placeholder(tf.float32, shape=(3,))
v = tf.Variable(p, name="v")
sv = tf.train.Supervisor(logdir=logdir,
init_op=tf.initialize_all_variables(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testReadyForLocalInitOp(self):
server = tf.train.Server.create_local_server()
logdir = _test_dir("default_ready_for_local_init_op")
uid = uuid.uuid4().hex
def get_session(is_chief):
g = tf.Graph()
with g.as_default():
with tf.device("/job:local"):
v = tf.Variable(
1, name="default_ready_for_local_init_op_v_" + str(uid))
vadd = v.assign_add(1)
w = tf.Variable(
v,
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name="default_ready_for_local_init_op_w_" + str(uid))
ready_for_local_init_op = tf.report_uninitialized_variables(
tf.all_variables())
sv = tf.train.Supervisor(
logdir=logdir,
is_chief=is_chief,
graph=g,
recovery_wait_secs=1,
init_op=v.initializer,
ready_for_local_init_op=ready_for_local_init_op)
sess = sv.prepare_or_wait_for_session(server.target)
return sv, sess, v, vadd, w
sv0, sess0, v0, _, w0 = get_session(True)
sv1, sess1, _, vadd1, w1 = get_session(False)
self.assertEqual(1, sess0.run(w0))
self.assertEqual(2, sess1.run(vadd1))
self.assertEqual(1, sess1.run(w1))
self.assertEqual(2, sess0.run(v0))
sv0.stop()
sv1.stop()
def testReadyForLocalInitOpRestoreFromCheckpoint(self):
server = tf.train.Server.create_local_server()
logdir = _test_dir("ready_for_local_init_op_restore")
uid = uuid.uuid4().hex
# Create a checkpoint.
with tf.Graph().as_default():
v = tf.Variable(
10.0, name="ready_for_local_init_op_restore_v_" + str(uid))
tf.summary.scalar("ready_for_local_init_op_restore_v_" + str(uid), v)
sv = tf.train.Supervisor(logdir=logdir)
sv.prepare_or_wait_for_session(server.target)
save_path = sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
def get_session(is_chief):
g = tf.Graph()
with g.as_default():
with tf.device("/job:local"):
v = tf.Variable(
1.0, name="ready_for_local_init_op_restore_v_" + str(uid))
vadd = v.assign_add(1)
w = tf.Variable(
v,
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name="ready_for_local_init_op_restore_w_" + str(uid))
ready_for_local_init_op = tf.report_uninitialized_variables(
tf.all_variables())
sv = tf.train.Supervisor(
logdir=logdir,
is_chief=is_chief,
graph=g,
recovery_wait_secs=1,
ready_for_local_init_op=ready_for_local_init_op)
sess = sv.prepare_or_wait_for_session(server.target)
return sv, sess, v, vadd, w
sv0, sess0, v0, _, w0 = get_session(True)
sv1, sess1, _, vadd1, w1 = get_session(False)
self.assertEqual(10, sess0.run(w0))
self.assertEqual(11, sess1.run(vadd1))
self.assertEqual(10, sess1.run(w1))
self.assertEqual(11, sess0.run(v0))
sv0.stop()
sv1.stop()
def testLocalInitOp(self):
logdir = _test_dir("default_local_init_op")
with tf.Graph().as_default():
# A local variable.
v = tf.Variable([1.0, 2.0, 3.0],
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES])
# An entity which is initialized through a TABLE_INITIALIZER.
w = tf.Variable([4, 5, 6], trainable=False, collections=[])
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, w.initializer)
# This shouldn't add a variable to the VARIABLES collection responsible
# for variables that are saved/restored from checkpoints.
self.assertEquals(len(tf.all_variables()), 0)
# Suppress normal variable inits to make sure the local one is
# initialized via local_init_op.
sv = tf.train.Supervisor(logdir=logdir, init_op=None)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
self.assertAllClose([4, 5, 6], sess.run(w))
sv.stop()
def testLocalInitOpForNonChief(self):
logdir = _test_dir("default_local_init_op_non_chief")
with tf.Graph().as_default():
with tf.device("/job:localhost"):
# A local variable.
v = tf.Variable([1.0, 2.0, 3.0],
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES])
# This shouldn't add a variable to the VARIABLES collection responsible
# for variables that are saved/restored from checkpoints.
self.assertEquals(len(tf.all_variables()), 0)
# Suppress normal variable inits to make sure the local one is
# initialized via local_init_op.
sv = tf.train.Supervisor(logdir=logdir, init_op=None, is_chief=False)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testInitOpFails(self):
server = tf.train.Server.create_local_server()
logdir = _test_dir("default_init_op_fails")
with tf.Graph().as_default():
v = tf.Variable([1.0, 2.0, 3.0], name="v")
tf.Variable([4.0, 5.0, 6.0], name="w")
# w will not be initialized.
sv = tf.train.Supervisor(logdir=logdir, init_op=v.initializer)
with self.assertRaisesRegexp(RuntimeError,
"Variables not initialized: w"):
sv.prepare_or_wait_for_session(server.target)
def testInitOpFailsForTransientVariable(self):
server = tf.train.Server.create_local_server()
logdir = _test_dir("default_init_op_fails_for_local_variable")
with tf.Graph().as_default():
v = tf.Variable([1.0, 2.0, 3.0], name="v",
collections=[tf.GraphKeys.LOCAL_VARIABLES])
tf.Variable([1.0, 2.0, 3.0], name="w",
collections=[tf.GraphKeys.LOCAL_VARIABLES])
# w will not be initialized.
sv = tf.train.Supervisor(logdir=logdir, local_init_op=v.initializer)
with self.assertRaisesRegexp(
RuntimeError, "Variables not initialized: w"):
sv.prepare_or_wait_for_session(server.target)
def testSetupFail(self):
logdir = _test_dir("setup_fail")
with tf.Graph().as_default():
tf.Variable([1.0, 2.0, 3.0], name="v")
with self.assertRaisesRegexp(ValueError, "must have their device set"):
tf.train.Supervisor(logdir=logdir, is_chief=False)
with tf.Graph().as_default(), tf.device("/job:ps"):
tf.Variable([1.0, 2.0, 3.0], name="v")
tf.train.Supervisor(logdir=logdir, is_chief=False)
def testDefaultGlobalStep(self):
logdir = _test_dir("default_global_step")
with tf.Graph().as_default():
tf.Variable(287, name="global_step")
sv = tf.train.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
self.assertEquals(287, sess.run(sv.global_step))
sv.stop()
def testRestoreFromMetaGraph(self):
logdir = _test_dir("restore_from_meta_graph")
with tf.Graph().as_default():
tf.Variable(1, name="v0")
sv = tf.train.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
filename = sv.saver.save(sess, sv.save_path)
sv.stop()
# Create a new Graph and Supervisor and recover.
with tf.Graph().as_default():
new_saver = tf.train.import_meta_graph(".".join([filename, "meta"]))
self.assertIsNotNone(new_saver)
sv2 = tf.train.Supervisor(logdir=logdir, saver=new_saver)
sess = sv2.prepare_or_wait_for_session("")
self.assertEquals(1, sess.run("v0:0"))
sv2.saver.save(sess, sv2.save_path)
sv2.stop()
# This test is based on the fact that the standard services start
# right away and get to run once before sv.stop() returns.
# We still sleep a bit to make the test robust.
def testStandardServicesWithoutGlobalStep(self):
logdir = _test_dir("standard_services_without_global_step")
# Create a checkpoint.
with tf.Graph().as_default():
v = tf.Variable([1.0], name="foo")
tf.summary.scalar("v", v[0])
sv = tf.train.Supervisor(logdir=logdir)
meta_graph_def = meta_graph.create_meta_graph_def(
saver_def=sv.saver.saver_def)
sess = sv.prepare_or_wait_for_session("")
save_path = sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# There should be an event file with a version number.
rr = _summary_iterator(logdir)
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
ev = next(rr)
ev_graph = tf.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
ev = next(rr)
self.assertProtoEquals("value { tag: 'v' simple_value: 1.0 }", ev.summary)
ev = next(rr)
self.assertEquals(tf.SessionLog.STOP, ev.session_log.status)
self.assertRaises(StopIteration, lambda: next(rr))
# There should be a checkpoint file with the variable "foo"
with tf.Graph().as_default(), self.test_session() as sess:
v = tf.Variable([10.10], name="foo")
sav = tf.train.Saver([v])
sav.restore(sess, save_path)
self.assertEqual(1.0, v.eval()[0])
# Same as testStandardServicesNoGlobalStep but with a global step.
# We should get a summary about the step time.
def testStandardServicesWithGlobalStep(self):
logdir = _test_dir("standard_services_with_global_step")
# Create a checkpoint.
with tf.Graph().as_default():
v = tf.Variable([123], name="global_step")
sv = tf.train.Supervisor(logdir=logdir)
meta_graph_def = meta_graph.create_meta_graph_def(
saver_def=sv.saver.saver_def)
sess = sv.prepare_or_wait_for_session("")
# This is where the checkpoint will appear, with step number 123.
save_path = "%s-123" % sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# There should be an event file with a version number.
rr = _summary_iterator(logdir)
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
ev = next(rr)
ev_graph = tf.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
ev = next(rr)
# It is actually undeterministic whether SessionLog.START gets written
# before the summary or the checkpoint, but this works when run 10000 times.
self.assertEquals(123, ev.step)
self.assertEquals(tf.SessionLog.START, ev.session_log.status)
first = next(rr)
second = next(rr)
# It is undeterministic whether the value gets written before the checkpoint
# since they are on separate threads, so we check for both conditions.
if first.HasField("summary"):
self.assertProtoEquals("""value { tag: 'global_step/sec'
simple_value: 0.0 }""",
first.summary)
self.assertEquals(123, second.step)
self.assertEquals(tf.SessionLog.CHECKPOINT, second.session_log.status)
else:
self.assertEquals(123, first.step)
self.assertEquals(tf.SessionLog.CHECKPOINT, first.session_log.status)
self.assertProtoEquals("""value { tag: 'global_step/sec'
simple_value: 0.0 }""",
second.summary)
ev = next(rr)
self.assertEquals(tf.SessionLog.STOP, ev.session_log.status)
self.assertRaises(StopIteration, lambda: next(rr))
# There should be a checkpoint file with the variable "foo"
with tf.Graph().as_default(), self.test_session() as sess:
v = tf.Variable([-12], name="global_step")
sav = tf.train.Saver([v])
sav.restore(sess, save_path)
self.assertEqual(123, v.eval()[0])
def testNoQueueRunners(self):
with tf.Graph().as_default(), self.test_session() as sess:
sv = tf.train.Supervisor(logdir=_test_dir("no_queue_runners"))
self.assertEqual(0, len(sv.start_queue_runners(sess)))
sv.stop()
def testPrepareSessionAfterStopForChief(self):
logdir = _test_dir("prepare_after_stop_chief")
with tf.Graph().as_default():
sv = tf.train.Supervisor(logdir=logdir, is_chief=True)
# Create a first session and then stop.
sess = sv.prepare_or_wait_for_session("")
sv.stop()
sess.close()
self.assertTrue(sv.should_stop())
# Now create a second session and test that we don't stay stopped, until
# we ask to stop again.
sess2 = sv.prepare_or_wait_for_session("")
self.assertFalse(sv.should_stop())
sv.stop()
sess2.close()
self.assertTrue(sv.should_stop())
def testPrepareSessionAfterStopForNonChief(self):
logdir = _test_dir("prepare_after_stop_nonchief")
with tf.Graph().as_default():
sv = tf.train.Supervisor(logdir=logdir, is_chief=False)
# Create a first session and then stop.
sess = sv.prepare_or_wait_for_session("")
sv.stop()
sess.close()
self.assertTrue(sv.should_stop())
# Now create a second session and test that we don't stay stopped, until
# we ask to stop again.
sess2 = sv.prepare_or_wait_for_session("")
self.assertFalse(sv.should_stop())
sv.stop()
sess2.close()
self.assertTrue(sv.should_stop())
if __name__ == "__main__":
tf.test.main()
| kamcpp/tensorflow | tensorflow/python/training/supervisor_test.py | Python | apache-2.0 | 33,571 |
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import webob
from nova.api.openstack.compute.schemas.v3 import flavors_extraspecs
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
from nova.i18n import _
from nova import objects
from nova import utils
ALIAS = 'os-flavor-extra-specs'
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class FlavorExtraSpecsController(wsgi.Controller):
"""The flavor extra specs API controller for the OpenStack API."""
def __init__(self, *args, **kwargs):
super(FlavorExtraSpecsController, self).__init__(*args, **kwargs)
def _get_extra_specs(self, context, flavor_id):
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
return dict(extra_specs=flavor.extra_specs)
# NOTE(gmann): Max length for numeric value is being checked
# explicitly as json schema cannot have max length check for numeric value
def _check_extra_specs_value(self, specs):
for key, value in specs.iteritems():
try:
if isinstance(value, (six.integer_types, float)):
value = six.text_type(value)
utils.check_string_length(value, 'extra_specs value',
max_length=255)
except exception.InvalidInput as error:
raise webob.exc.HTTPBadRequest(
explanation=error.format_message())
@extensions.expected_errors(())
def index(self, req, flavor_id):
"""Returns the list of extra specs for a given flavor."""
context = req.environ['nova.context']
authorize(context, action='index')
return self._get_extra_specs(context, flavor_id)
# NOTE(gmann): Here should be 201 instead of 200 by v2.1
# +microversions because the flavor extra specs has been created
# completely when returning a response.
@extensions.expected_errors((400, 404, 409))
@validation.schema(flavors_extraspecs.create)
def create(self, req, flavor_id, body):
context = req.environ['nova.context']
authorize(context, action='create')
specs = body['extra_specs']
self._check_extra_specs_value(specs)
try:
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
flavor.extra_specs = dict(flavor.extra_specs, **specs)
flavor.save()
except exception.FlavorExtraSpecUpdateCreateFailed as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return body
@extensions.expected_errors((400, 404, 409))
@validation.schema(flavors_extraspecs.update)
def update(self, req, flavor_id, id, body):
context = req.environ['nova.context']
authorize(context, action='update')
self._check_extra_specs_value(body)
if id not in body:
expl = _('Request body and URI mismatch')
raise webob.exc.HTTPBadRequest(explanation=expl)
try:
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
flavor.extra_specs = dict(flavor.extra_specs, **body)
flavor.save()
except exception.FlavorExtraSpecUpdateCreateFailed as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
return body
@extensions.expected_errors(404)
def show(self, req, flavor_id, id):
"""Return a single extra spec item."""
context = req.environ['nova.context']
authorize(context, action='show')
try:
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
return {id: flavor.extra_specs[id]}
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except KeyError:
msg = _("Flavor %(flavor_id)s has no extra specs with "
"key %(key)s.") % dict(flavor_id=flavor_id,
key=id)
raise webob.exc.HTTPNotFound(explanation=msg)
# NOTE(gmann): Here should be 204(No Content) instead of 200 by v2.1
# +microversions because the flavor extra specs has been deleted
# completely when returning a response.
@extensions.expected_errors(404)
def delete(self, req, flavor_id, id):
"""Deletes an existing extra spec."""
context = req.environ['nova.context']
authorize(context, action='delete')
try:
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
del flavor.extra_specs[id]
flavor.save()
except (exception.FlavorExtraSpecsNotFound,
exception.FlavorNotFound) as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except KeyError:
msg = _("Flavor %(flavor_id)s has no extra specs with "
"key %(key)s.") % dict(flavor_id=flavor_id,
key=id)
raise webob.exc.HTTPNotFound(explanation=msg)
class FlavorsExtraSpecs(extensions.V3APIExtensionBase):
"""Flavors extra specs support."""
name = 'FlavorExtraSpecs'
alias = ALIAS
version = 1
def get_resources(self):
extra_specs = extensions.ResourceExtension(
'os-extra_specs',
FlavorExtraSpecsController(),
parent=dict(member_name='flavor', collection_name='flavors'))
return [extra_specs]
def get_controller_extensions(self):
return []
| cloudbase/nova-virtualbox | nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py | Python | apache-2.0 | 6,450 |
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bayesian NN using factorized VI (Bayes By Backprop. Blundell et al. 2014).
See https://arxiv.org/abs/1505.05424 for details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from absl import flags
from bandits.core.bayesian_nn import BayesianNN
FLAGS = flags.FLAGS
def log_gaussian(x, mu, sigma, reduce_sum=True):
"""Returns log Gaussian pdf."""
res = (-0.5 * np.log(2 * np.pi) - tf.log(sigma) - tf.square(x - mu) /
(2 * tf.square(sigma)))
if reduce_sum:
return tf.reduce_sum(res)
else:
return res
def analytic_kl(mu_1, sigma_1, mu_2, sigma_2):
"""KL for two Gaussian distributions with diagonal covariance matrix."""
sigma_1_sq = tf.square(sigma_1)
sigma_2_sq = tf.square(sigma_2)
t1 = tf.square(mu_1 - mu_2) / (2. * sigma_2_sq)
t2 = (sigma_1_sq/sigma_2_sq - 1. - tf.log(sigma_1_sq) + tf.log(sigma_2_sq))/2.
return tf.reduce_sum(t1 + t2)
class VariationalNeuralBanditModel(BayesianNN):
"""Implements an approximate Bayesian NN using Variational Inference."""
def __init__(self, hparams, name="BBBNN"):
self.name = name
self.hparams = hparams
self.n_in = self.hparams.context_dim
self.n_out = self.hparams.num_actions
self.layers = self.hparams.layer_sizes
self.init_scale = self.hparams.init_scale
self.f_num_points = None
if "f_num_points" in hparams:
self.f_num_points = self.hparams.f_num_points
self.cleared_times_trained = self.hparams.cleared_times_trained
self.initial_training_steps = self.hparams.initial_training_steps
self.training_schedule = np.linspace(self.initial_training_steps,
self.hparams.training_epochs,
self.cleared_times_trained)
self.verbose = getattr(self.hparams, "verbose", True)
self.weights_m = {}
self.weights_std = {}
self.biases_m = {}
self.biases_std = {}
self.times_trained = 0
if self.hparams.use_sigma_exp_transform:
self.sigma_transform = tf.exp
self.inverse_sigma_transform = np.log
else:
self.sigma_transform = tf.nn.softplus
self.inverse_sigma_transform = lambda y: y + np.log(1. - np.exp(-y))
# Whether to use the local reparameterization trick to compute the loss.
# See details in https://arxiv.org/abs/1506.02557
self.use_local_reparameterization = True
self.build_graph()
def build_mu_variable(self, shape):
"""Returns a mean variable initialized as N(0, 0.05)."""
return tf.Variable(tf.random_normal(shape, 0.0, 0.05))
def build_sigma_variable(self, shape, init=-5.):
"""Returns a sigma variable initialized as N(init, 0.05)."""
# Initialize sigma to be very small initially to encourage MAP opt first
return tf.Variable(tf.random_normal(shape, init, 0.05))
def build_layer(self, input_x, input_x_local, shape,
layer_id, activation_fn=tf.nn.relu):
"""Builds a variational layer, and computes KL term.
Args:
input_x: Input to the variational layer.
input_x_local: Input when the local reparameterization trick was applied.
shape: [number_inputs, number_outputs] for the layer.
layer_id: Number of layer in the architecture.
activation_fn: Activation function to apply.
Returns:
output_h: Output of the variational layer.
output_h_local: Output when local reparameterization trick was applied.
neg_kl: Negative KL term for the layer.
"""
w_mu = self.build_mu_variable(shape)
w_sigma = self.sigma_transform(self.build_sigma_variable(shape))
w_noise = tf.random_normal(shape)
w = w_mu + w_sigma * w_noise
b_mu = self.build_mu_variable([1, shape[1]])
b_sigma = self.sigma_transform(self.build_sigma_variable([1, shape[1]]))
b = b_mu
# Store means and stds
self.weights_m[layer_id] = w_mu
self.weights_std[layer_id] = w_sigma
self.biases_m[layer_id] = b_mu
self.biases_std[layer_id] = b_sigma
# Create outputs
output_h = activation_fn(tf.matmul(input_x, w) + b)
if self.use_local_reparameterization:
# Use analytic KL divergence wrt the prior
neg_kl = -analytic_kl(w_mu, w_sigma,
0., tf.to_float(np.sqrt(2./shape[0])))
else:
# Create empirical KL loss terms
log_p = log_gaussian(w, 0., tf.to_float(np.sqrt(2./shape[0])))
log_q = log_gaussian(w, tf.stop_gradient(w_mu), tf.stop_gradient(w_sigma))
neg_kl = log_p - log_q
# Apply local reparameterization trick: sample activations pre nonlinearity
m_h = tf.matmul(input_x_local, w_mu) + b
v_h = tf.matmul(tf.square(input_x_local), tf.square(w_sigma))
output_h_local = m_h + tf.sqrt(v_h + 1e-6) * tf.random_normal(tf.shape(v_h))
output_h_local = activation_fn(output_h_local)
return output_h, output_h_local, neg_kl
def build_action_noise(self):
"""Defines a model for additive noise per action, and its KL term."""
# Define mean and std variables (log-normal dist) for each action.
noise_sigma_mu = (self.build_mu_variable([1, self.n_out])
+ self.inverse_sigma_transform(self.hparams.noise_sigma))
noise_sigma_sigma = self.sigma_transform(
self.build_sigma_variable([1, self.n_out]))
pre_noise_sigma = (noise_sigma_mu
+ tf.random_normal([1, self.n_out]) * noise_sigma_sigma)
self.noise_sigma = self.sigma_transform(pre_noise_sigma)
# Compute KL for additive noise sigma terms.
if getattr(self.hparams, "infer_noise_sigma", False):
neg_kl_term = log_gaussian(
pre_noise_sigma,
self.inverse_sigma_transform(self.hparams.noise_sigma),
self.hparams.prior_sigma
)
neg_kl_term -= log_gaussian(pre_noise_sigma,
noise_sigma_mu,
noise_sigma_sigma)
else:
neg_kl_term = 0.
return neg_kl_term
def build_model(self, activation_fn=tf.nn.relu):
"""Defines the actual NN model with fully connected layers.
The loss is computed for partial feedback settings (bandits), so only
the observed outcome is backpropagated (see weighted loss).
Selects the optimizer and, finally, it also initializes the graph.
Args:
activation_fn: the activation function used in the nn layers.
"""
if self.verbose:
print("Initializing model {}.".format(self.name))
neg_kl_term, l_number = 0, 0
use_local_reparameterization = self.use_local_reparameterization
# Compute model additive noise for each action with log-normal distribution
neg_kl_term += self.build_action_noise()
# Build network.
input_x = self.x
input_local = self.x
n_in = self.n_in
for l_number, n_nodes in enumerate(self.layers):
if n_nodes > 0:
h, h_local, neg_kl = self.build_layer(input_x, input_local,
[n_in, n_nodes], l_number)
neg_kl_term += neg_kl
input_x, input_local = h, h_local
n_in = n_nodes
# Create last linear layer
h, h_local, neg_kl = self.build_layer(input_x, input_local,
[n_in, self.n_out],
l_number + 1,
activation_fn=lambda x: x)
neg_kl_term += neg_kl
self.y_pred = h
self.y_pred_local = h_local
# Compute log likelihood (with learned or fixed noise level)
if getattr(self.hparams, "infer_noise_sigma", False):
log_likelihood = log_gaussian(
self.y, self.y_pred_local, self.noise_sigma, reduce_sum=False)
else:
y_hat = self.y_pred_local if use_local_reparameterization else self.y_pred
log_likelihood = log_gaussian(
self.y, y_hat, self.hparams.noise_sigma, reduce_sum=False)
# Only take into account observed outcomes (bandits setting)
batch_size = tf.to_float(tf.shape(self.x)[0])
weighted_log_likelihood = tf.reduce_sum(
log_likelihood * self.weights) / batch_size
# The objective is 1/n * (\sum_i log_like_i - KL); neg_kl_term estimates -KL
elbo = weighted_log_likelihood + (neg_kl_term / self.n)
self.loss = -elbo
self.global_step = tf.train.get_or_create_global_step()
self.train_op = tf.train.AdamOptimizer(self.hparams.initial_lr).minimize(
self.loss, global_step=self.global_step)
# Create tensorboard metrics
self.create_summaries()
self.summary_writer = tf.summary.FileWriter(
"{}/graph_{}".format(FLAGS.logdir, self.name), self.sess.graph)
def build_graph(self):
"""Defines graph, session, placeholders, and model.
Placeholders are: n (size of the dataset), x and y (context and observed
reward for each action), and weights (one-hot encoding of selected action
for each context, i.e., only possibly non-zero element in each y).
"""
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf.Session()
self.n = tf.placeholder(shape=[], dtype=tf.float32)
self.x = tf.placeholder(shape=[None, self.n_in], dtype=tf.float32)
self.y = tf.placeholder(shape=[None, self.n_out], dtype=tf.float32)
self.weights = tf.placeholder(shape=[None, self.n_out], dtype=tf.float32)
self.build_model()
self.sess.run(tf.global_variables_initializer())
def create_summaries(self):
"""Defines summaries including mean loss, and global step."""
with self.graph.as_default():
with tf.name_scope(self.name + "_summaries"):
tf.summary.scalar("loss", self.loss)
tf.summary.scalar("global_step", self.global_step)
self.summary_op = tf.summary.merge_all()
def assign_lr(self):
"""Resets the learning rate in dynamic schedules for subsequent trainings.
In bandits settings, we do expand our dataset over time. Then, we need to
re-train the network with the new data. The algorithms that do not keep
the step constant, can reset it at the start of each *training* process.
"""
decay_steps = 1
if self.hparams.activate_decay:
current_gs = self.sess.run(self.global_step)
with self.graph.as_default():
self.lr = tf.train.inverse_time_decay(self.hparams.initial_lr,
self.global_step - current_gs,
decay_steps,
self.hparams.lr_decay_rate)
def train(self, data, num_steps):
"""Trains the BNN for num_steps, using the data in 'data'.
Args:
data: ContextualDataset object that provides the data.
num_steps: Number of minibatches to train the network for.
Returns:
losses: Loss history during training.
"""
if self.times_trained < self.cleared_times_trained:
num_steps = int(self.training_schedule[self.times_trained])
self.times_trained += 1
losses = []
with self.graph.as_default():
if self.verbose:
print("Training {} for {} steps...".format(self.name, num_steps))
for step in range(num_steps):
x, y, weights = data.get_batch_with_weights(self.hparams.batch_size)
_, summary, global_step, loss = self.sess.run(
[self.train_op, self.summary_op, self.global_step, self.loss],
feed_dict={
self.x: x,
self.y: y,
self.weights: weights,
self.n: data.num_points(self.f_num_points),
})
losses.append(loss)
if step % self.hparams.freq_summary == 0:
if self.hparams.show_training:
print("{} | step: {}, loss: {}".format(
self.name, global_step, loss))
self.summary_writer.add_summary(summary, global_step)
return losses
| tombstone/models | research/deep_contextual_bandits/bandits/algorithms/variational_neural_bandit_model.py | Python | apache-2.0 | 12,618 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Reverses xxd dump from to binary file
This script is used to convert models from C++ source file (dumped with xxd) to
the binary model weight file and analyze it with model visualizer like Netron
(https://github.com/lutzroeder/netron) or load the model in TensorFlow Python
API
to evaluate the results in Python.
The command to dump binary file to C++ source file looks like
xxd -i model_data.tflite > model_data.cc
Example usage:
python reverse_xxd_dump_from_cc.py \
--input_cc_file=model_data.cc \
--output_tflite_file=model_data.tflite
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.lite.tools import flatbuffer_utils
from tensorflow.python.platform import app
def main(_):
"""Application run loop."""
parser = argparse.ArgumentParser(
description='Reverses xxd dump from to binary file')
parser.add_argument(
'--input_cc_file',
type=str,
required=True,
help='Full path name to the input cc file.')
parser.add_argument(
'--output_tflite_file',
type=str,
required=True,
help='Full path name to the stripped output tflite file.')
args = parser.parse_args()
# Read the model from xxd output C++ source file
model = flatbuffer_utils.xxd_output_to_object(args.input_cc_file)
# Write the model
flatbuffer_utils.write_model(model, args.output_tflite_file)
if __name__ == '__main__':
app.run(main=main, argv=sys.argv[:1])
| davidzchen/tensorflow | tensorflow/lite/tools/reverse_xxd_dump_from_cc.py | Python | apache-2.0 | 2,210 |
from tests import BaseTestCase
import mock
import time
from redash.models import User
from redash.authentication.account import invite_token
from tests.handlers import get_request, post_request
class TestInvite(BaseTestCase):
def test_expired_invite_token(self):
with mock.patch('time.time') as patched_time:
patched_time.return_value = time.time() - (7 * 24 * 3600) - 10
token = invite_token(self.factory.user)
response = get_request('/invite/{}'.format(token), org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_invalid_invite_token(self):
response = get_request('/invite/badtoken', org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_valid_token(self):
token = invite_token(self.factory.user)
response = get_request('/invite/{}'.format(token), org=self.factory.org)
self.assertEqual(response.status_code, 200)
def test_already_active_user(self):
pass
class TestInvitePost(BaseTestCase):
def test_empty_password(self):
token = invite_token(self.factory.user)
response = post_request('/invite/{}'.format(token), data={'password': ''}, org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_invalid_password(self):
token = invite_token(self.factory.user)
response = post_request('/invite/{}'.format(token), data={'password': '1234'}, org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_bad_token(self):
response = post_request('/invite/{}'.format('jdsnfkjdsnfkj'), data={'password': '1234'}, org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_already_active_user(self):
pass
def test_valid_password(self):
token = invite_token(self.factory.user)
password = 'test1234'
response = post_request('/invite/{}'.format(token), data={'password': password}, org=self.factory.org)
self.assertEqual(response.status_code, 302)
self.factory.user = User.get_by_id(self.factory.user.id)
self.assertTrue(self.factory.user.verify_password(password))
| pubnative/redash | tests/handlers/test_authentication.py | Python | bsd-2-clause | 2,213 |
"""
==============
Blob Detection
==============
Blobs are bright on dark or dark on bright regions in an image. In
this example, blobs are detected using 3 algorithms. The image used
in this case is the Hubble eXtreme Deep Field. Each bright dot in the
image is a star or a galaxy.
Laplacian of Gaussian (LoG)
-----------------------------
This is the most accurate and slowest approach. It computes the Laplacian
of Gaussian images with successively increasing standard deviation and
stacks them up in a cube. Blobs are local maximas in this cube. Detecting
larger blobs is especially slower because of larger kernel sizes during
convolution. Only bright blobs on dark backgrounds are detected. See
:py:meth:`skimage.feature.blob_log` for usage.
Difference of Gaussian (DoG)
----------------------------
This is a faster approximation of LoG approach. In this case the image is
blurred with increasing standard deviations and the difference between
two successively blurred images are stacked up in a cube. This method
suffers from the same disadvantage as LoG approach for detecting larger
blobs. Blobs are again assumed to be bright on dark. See
:py:meth:`skimage.feature.blob_dog` for usage.
Determinant of Hessian (DoH)
----------------------------
This is the fastest approach. It detects blobs by finding maximas in the
matrix of the Determinant of Hessian of the image. The detection speed is
independent of the size of blobs as internally the implementation uses
box filters instead of convolutions. Bright on dark as well as dark on
bright blobs are detected. The downside is that small blobs (<3px) are not
detected accurately. See :py:meth:`skimage.feature.blob_doh` for usage.
"""
from math import sqrt
from skimage import data
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
image = data.hubble_deep_field()[0:500, 0:500]
image_gray = rgb2gray(image)
blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1)
# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)
blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1)
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)
blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01)
blobs_list = [blobs_log, blobs_dog, blobs_doh]
colors = ['yellow', 'lime', 'red']
titles = ['Laplacian of Gaussian', 'Difference of Gaussian',
'Determinant of Hessian']
sequence = zip(blobs_list, colors, titles)
fig, axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax = axes.ravel()
for idx, (blobs, color, title) in enumerate(sequence):
ax[idx].set_title(title)
ax[idx].imshow(image, interpolation='nearest')
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color=color, linewidth=2, fill=False)
ax[idx].add_patch(c)
ax[idx].set_axis_off()
plt.tight_layout()
plt.show()
| paalge/scikit-image | doc/examples/features_detection/plot_blob.py | Python | bsd-3-clause | 2,997 |
# Copyright (c) 2010 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# A tool for automating dealing with bugzilla, posting patches, committing patches, etc.
from optparse import make_option
from webkitpy.common.host import Host
from webkitpy.tool.multicommandtool import MultiCommandTool
from webkitpy.tool import commands
class WebKitPatch(MultiCommandTool, Host):
global_options = [
make_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="enable all logging"),
make_option("-d", "--directory", action="append", dest="patch_directories",
default=[], help="Directory to look at for changed files"),
]
def __init__(self, path):
MultiCommandTool.__init__(self)
Host.__init__(self)
self._path = path
def path(self):
return self._path
def should_show_in_main_help(self, command):
if not command.show_in_main_help:
return False
if command.requires_local_commits:
return self.scm().supports_local_commits()
return True
# FIXME: This may be unnecessary since we pass global options to all commands during execute() as well.
def handle_global_options(self, options):
self.initialize_scm(options.patch_directories)
def should_execute_command(self, command):
if command.requires_local_commits and not self.scm().supports_local_commits():
failure_reason = "%s requires local commits using %s in %s." % (
command.name, self.scm().display_name(), self.scm().checkout_root)
return (False, failure_reason)
return (True, None)
| axinging/chromium-crosswalk | third_party/WebKit/Tools/Scripts/webkitpy/tool/webkit_patch.py | Python | bsd-3-clause | 3,181 |
###
# Copyright (c) 2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Filters bad words on outgoing messages from the bot, so the bot can't be made
to say bad words.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = ""
__author__ = supybot.authors.jemfinch
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
from . import config
from . import plugin
from imp import reload
reload(plugin) # In case we're being reloaded.
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
from . import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| Ban3/Limnoria | plugins/BadWords/__init__.py | Python | bsd-3-clause | 2,470 |
project_slug = '{{ cookiecutter.project_slug }}'
if hasattr(project_slug, 'isidentifier'):
assert project_slug.isidentifier(), 'Project slug should be valid Python identifier!'
| kappataumu/cookiecutter-django | hooks/pre_gen_project.py | Python | bsd-3-clause | 184 |
###
# Copyright (c) 2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
This plugin handles various plugin-related things, such as getting help for
a plugin, getting a list of the loaded plugins, and searching and downloading
plugins from supybot.com.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = "%%VERSION%%"
# XXX Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.authors.jemfinch
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {
supybot.authors.skorobeus: ['contributors'],
}
from . import config
# This had to be renamed because of stupid case-insensitivity issues.
from . import plugin
from imp import reload
reload(plugin) # In case we're being reloaded.
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
from . import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| ProgVal/Limnoria-test | plugins/Plugin/__init__.py | Python | bsd-3-clause | 2,763 |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
)
def test_split(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.split("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.split("__")
tm.assert_series_equal(result, exp)
result = values.str.split("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.split("[,_]")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
def test_split_object_mixed():
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.split("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
@pytest.mark.parametrize("method", ["split", "rsplit"])
def test_split_n(any_string_dtype, method):
s = Series(["a b", pd.NA, "b c"], dtype=any_string_dtype)
expected = Series([["a", "b"], pd.NA, ["b", "c"]])
result = getattr(s.str, method)(" ", n=None)
tm.assert_series_equal(result, expected)
result = getattr(s.str, method)(" ", n=0)
tm.assert_series_equal(result, expected)
def test_rsplit(any_string_dtype):
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_")
exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)
result = values.str.rsplit("__")
tm.assert_series_equal(result, exp)
result = values.str.rsplit("__", expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)
result = values.str.rsplit("[,_]")
exp = Series([["a,b_c"], ["c_d,e"], np.nan, ["f,g,h"]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = values.str.rsplit("_", n=1)
exp = Series([["a_b", "c"], ["c_d", "e"], np.nan, ["f_g", "h"]])
tm.assert_series_equal(result, exp)
def test_rsplit_object_mixed():
# mixed
mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])
result = mixed.str.rsplit("_")
exp = Series(
[
["a", "b", "c"],
np.nan,
["d", "e", "f"],
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
]
)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit("_", expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
def test_split_blank_string(any_string_dtype):
# expand blank split GH 20067
values = Series([""], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame([[]], dtype=any_string_dtype) # NOTE: this is NOT an empty df
tm.assert_frame_equal(result, exp)
values = Series(["a b c", "a b", "", " "], name="test", dtype=any_string_dtype)
result = values.str.split(expand=True)
exp = DataFrame(
[
["a", "b", "c"],
["a", "b", np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_split_noargs(any_string_dtype):
# #1859
s = Series(["Wes McKinney", "Travis Oliphant"], dtype=any_string_dtype)
result = s.str.split()
expected = ["Travis", "Oliphant"]
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
@pytest.mark.parametrize(
"data, pat",
[
(["bd asdf jfg", "kjasdflqw asdfnfk"], None),
(["bd asdf jfg", "kjasdflqw asdfnfk"], "asdf"),
(["bd_asdf_jfg", "kjasdflqw_asdfnfk"], "_"),
],
)
def test_split_maxsplit(data, pat, any_string_dtype):
# re.split 0, str.split -1
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=-1)
xp = s.str.split(pat=pat)
tm.assert_series_equal(result, xp)
result = s.str.split(pat=pat, n=0)
tm.assert_series_equal(result, xp)
@pytest.mark.parametrize(
"data, pat, expected",
[
(
["split once", "split once too!"],
None,
Series({0: ["split", "once"], 1: ["split", "once too!"]}),
),
(
["split_once", "split_once_too!"],
"_",
Series({0: ["split", "once"], 1: ["split", "once_too!"]}),
),
],
)
def test_split_no_pat_with_nonzero_n(data, pat, expected, any_string_dtype):
s = Series(data, dtype=any_string_dtype)
result = s.str.split(pat=pat, n=1)
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(any_string_dtype):
s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)
result = s.str.split("_", expand=True)
exp = DataFrame({0: Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)})
tm.assert_frame_equal(result, exp)
s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)
result = s.str.split("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_unequal_splits", "one_of_these_things_is_not"], dtype=any_string_dtype
)
result = s.str.split("_", expand=True)
exp = DataFrame(
{
0: ["some", "one"],
1: ["unequal", "of"],
2: ["splits", "these"],
3: [np.nan, "things"],
4: [np.nan, "is"],
5: [np.nan, "not"],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype
)
result = s.str.split("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["splits", "index"]},
index=["preserve", "me"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
with pytest.raises(ValueError, match="expand must be"):
s.str.split("_", expand="not_a_boolean")
def test_split_to_multiindex_expand():
# https://github.com/pandas-dev/pandas/issues/23677
idx = Index(["nosplit", "alsonosplit", np.nan])
result = idx.str.split("_", expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(["some_equal_splits", "with_no_nans", np.nan, None])
result = idx.str.split("_", expand=True)
exp = MultiIndex.from_tuples(
[
("some", "equal", "splits"),
("with", "no", "nans"),
[np.nan, np.nan, np.nan],
[None, None, None],
]
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(["some_unequal_splits", "one_of_these_things_is_not", np.nan, None])
result = idx.str.split("_", expand=True)
exp = MultiIndex.from_tuples(
[
("some", "unequal", "splits", np.nan, np.nan, np.nan),
("one", "of", "these", "things", "is", "not"),
(np.nan, np.nan, np.nan, np.nan, np.nan, np.nan),
(None, None, None, None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with pytest.raises(ValueError, match="expand must be"):
idx.str.split("_", expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(any_string_dtype):
s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)
result = s.str.rsplit("_", expand=True)
exp = DataFrame({0: Series(["nosplit", "alsonosplit"])}, dtype=any_string_dtype)
tm.assert_frame_equal(result, exp)
s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)
result = s.str.rsplit("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
result = s.str.rsplit("_", expand=True, n=2)
exp = DataFrame(
{0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
result = s.str.rsplit("_", expand=True, n=1)
exp = DataFrame(
{0: ["some_equal", "with_no"], 1: ["splits", "nans"]}, dtype=any_string_dtype
)
tm.assert_frame_equal(result, exp)
s = Series(
["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype
)
result = s.str.rsplit("_", expand=True)
exp = DataFrame(
{0: ["some", "with"], 1: ["splits", "index"]},
index=["preserve", "me"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand():
idx = Index(["nosplit", "alsonosplit"])
result = idx.str.rsplit("_", expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(["some_equal_splits", "with_no_nans"])
result = idx.str.rsplit("_", expand=True)
exp = MultiIndex.from_tuples([("some", "equal", "splits"), ("with", "no", "nans")])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(["some_equal_splits", "with_no_nans"])
result = idx.str.rsplit("_", expand=True, n=1)
exp = MultiIndex.from_tuples([("some_equal", "splits"), ("with_no", "nans")])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(any_string_dtype):
# gh-18450
s = Series(["foo,bar,baz", np.nan], dtype=any_string_dtype)
result = s.str.split(",", expand=True)
exp = DataFrame(
[["foo", "bar", "baz"], [np.nan, np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan/pd.NA and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
if any_string_dtype == "object":
assert all(np.isnan(x) for x in result.iloc[1])
else:
assert all(x is pd.NA for x in result.iloc[1])
def test_split_with_name(any_string_dtype):
# GH 12617
# should preserve name
s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)
res = s.str.split(",")
exp = Series([["a", "b"], ["c", "d"]], name="xxx")
tm.assert_series_equal(res, exp)
res = s.str.split(",", expand=True)
exp = DataFrame([["a", "b"], ["c", "d"]], dtype=any_string_dtype)
tm.assert_frame_equal(res, exp)
idx = Index(["a,b", "c,d"], name="xxx")
res = idx.str.split(",")
exp = Index([["a", "b"], ["c", "d"]], name="xxx")
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(",", expand=True)
exp = MultiIndex.from_tuples([("a", "b"), ("c", "d")])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/23558
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series(
[("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h"), None]
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series(
[("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h"), None]
)
tm.assert_series_equal(result, expected)
# more than one char
s = Series(["a__b__c", "c__d__e", np.nan, "f__g__h", None])
result = s.str.partition("__", expand=False)
expected = Series(
[
("a", "__", "b__c"),
("c", "__", "d__e"),
np.nan,
("f", "__", "g__h"),
None,
],
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition("__", expand=False)
expected = Series(
[
("a__b", "__", "c"),
("c__d", "__", "e"),
np.nan,
("f__g", "__", "h"),
None,
],
)
tm.assert_series_equal(result, expected)
# None
s = Series(["a b c", "c d e", np.nan, "f g h", None], dtype=any_string_dtype)
result = s.str.partition(expand=False)
expected = Series(
[("a", " ", "b c"), ("c", " ", "d e"), np.nan, ("f", " ", "g h"), None]
)
tm.assert_series_equal(result, expected)
result = s.str.rpartition(expand=False)
expected = Series(
[("a b", " ", "c"), ("c d", " ", "e"), np.nan, ("f g", " ", "h"), None]
)
tm.assert_series_equal(result, expected)
# Not split
s = Series(["abc", "cde", np.nan, "fgh", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series([("abc", "", ""), ("cde", "", ""), np.nan, ("fgh", "", ""), None])
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series([("", "", "abc"), ("", "", "cde"), np.nan, ("", "", "fgh"), None])
tm.assert_series_equal(result, expected)
# unicode
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = s.str.partition("_", expand=False)
expected = Series([("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h")])
tm.assert_series_equal(result, expected)
result = s.str.rpartition("_", expand=False)
expected = Series([("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h")])
tm.assert_series_equal(result, expected)
# compare to standard lib
s = Series(["A_B_C", "B_C_D", "E_F_G", "EFGHEF"], dtype=any_string_dtype)
result = s.str.partition("_", expand=False).tolist()
assert result == [v.partition("_") for v in s]
result = s.str.rpartition("_", expand=False).tolist()
assert result == [v.rpartition("_") for v in s]
def test_partition_index():
# https://github.com/pandas-dev/pandas/issues/23558
values = Index(["a_b_c", "c_d_e", "f_g_h", np.nan, None])
result = values.str.partition("_", expand=False)
exp = Index(
np.array(
[("a", "_", "b_c"), ("c", "_", "d_e"), ("f", "_", "g_h"), np.nan, None],
dtype=object,
)
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition("_", expand=False)
exp = Index(
np.array(
[("a_b", "_", "c"), ("c_d", "_", "e"), ("f_g", "_", "h"), np.nan, None],
dtype=object,
)
)
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition("_")
exp = Index(
[
("a", "_", "b_c"),
("c", "_", "d_e"),
("f", "_", "g_h"),
(np.nan, np.nan, np.nan),
(None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition("_")
exp = Index(
[
("a_b", "_", "c"),
("c_d", "_", "e"),
("f_g", "_", "h"),
(np.nan, np.nan, np.nan),
(None, None, None),
]
)
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/23558
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_")
expected = DataFrame(
{
0: ["a", "c", np.nan, "f", None],
1: ["_", "_", np.nan, "_", None],
2: ["b_c", "d_e", np.nan, "g_h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
result = s.str.rpartition("_")
expected = DataFrame(
{
0: ["a_b", "c_d", np.nan, "f_g", None],
1: ["_", "_", np.nan, "_", None],
2: ["c", "e", np.nan, "h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)
result = s.str.partition("_", expand=True)
expected = DataFrame(
{
0: ["a", "c", np.nan, "f", None],
1: ["_", "_", np.nan, "_", None],
2: ["b_c", "d_e", np.nan, "g_h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
result = s.str.rpartition("_", expand=True)
expected = DataFrame(
{
0: ["a_b", "c_d", np.nan, "f_g", None],
1: ["_", "_", np.nan, "_", None],
2: ["c", "e", np.nan, "h", None],
},
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
def test_partition_with_name(any_string_dtype):
# GH 12617
s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)
result = s.str.partition(",")
expected = DataFrame(
{0: ["a", "c"], 1: [",", ","], 2: ["b", "d"]}, dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# should preserve name
result = s.str.partition(",", expand=False)
expected = Series([("a", ",", "b"), ("c", ",", "d")], name="xxx")
tm.assert_series_equal(result, expected)
def test_partition_index_with_name():
idx = Index(["a,b", "c,d"], name="xxx")
result = idx.str.partition(",")
expected = MultiIndex.from_tuples([("a", ",", "b"), ("c", ",", "d")])
assert result.nlevels == 3
tm.assert_index_equal(result, expected)
# should preserve name
result = idx.str.partition(",", expand=False)
expected = Index(np.array([("a", ",", "b"), ("c", ",", "d")]), name="xxx")
assert result.nlevels == 1
tm.assert_index_equal(result, expected)
def test_partition_sep_kwarg(any_string_dtype):
# GH 22676; depr kwarg "pat" in favor of "sep"
s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
expected = s.str.partition(sep="_")
result = s.str.partition("_")
tm.assert_frame_equal(result, expected)
expected = s.str.rpartition(sep="_")
result = s.str.rpartition("_")
tm.assert_frame_equal(result, expected)
def test_get():
ser = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"])
result = ser.str.split("_").str.get(1)
expected = Series(["b", "d", np.nan, "g"])
tm.assert_series_equal(result, expected)
def test_get_mixed_object():
ser = Series(["a_b_c", np.nan, "c_d_e", True, datetime.today(), None, 1, 2.0])
result = ser.str.split("_").str.get(1)
expected = Series(["b", np.nan, "d", np.nan, np.nan, np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_get_bounds():
ser = Series(["1_2_3_4_5", "6_7_8_9_10", "11_12"])
# positive index
result = ser.str.split("_").str.get(2)
expected = Series(["3", "8", np.nan])
tm.assert_series_equal(result, expected)
# negative index
result = ser.str.split("_").str.get(-3)
expected = Series(["3", "8", np.nan])
tm.assert_series_equal(result, expected)
def test_get_complex():
# GH 20671, getting value not in dict raising `KeyError`
ser = Series([(1, 2, 3), [1, 2, 3], {1, 2, 3}, {1: "a", 2: "b", 3: "c"}])
result = ser.str.get(1)
expected = Series([2, 2, np.nan, "a"])
tm.assert_series_equal(result, expected)
result = ser.str.get(-1)
expected = Series([3, 3, np.nan, np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("to_type", [tuple, list, np.array])
def test_get_complex_nested(to_type):
ser = Series([to_type([to_type([1, 2])])])
result = ser.str.get(0)
expected = Series([to_type([1, 2])])
tm.assert_series_equal(result, expected)
result = ser.str.get(1)
expected = Series([np.nan])
tm.assert_series_equal(result, expected)
def test_get_strings(any_string_dtype):
ser = Series(["a", "ab", np.nan, "abc"], dtype=any_string_dtype)
result = ser.str.get(2)
expected = Series([np.nan, np.nan, np.nan, "c"], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
| dsm054/pandas | pandas/tests/strings/test_split_partition.py | Python | bsd-3-clause | 21,379 |
#
# ImageViewCairo.py -- classes for the display of Ginga canvases
# in Cairo surfaces
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
import sys, re
import cairo
import numpy
import threading
import math
from io import BytesIO
from ginga import ImageView
from ginga.cairow.CanvasRenderCairo import CanvasRenderer
class ImageViewCairoError(ImageView.ImageViewError):
pass
class ImageViewCairo(ImageView.ImageViewBase):
def __init__(self, logger=None, rgbmap=None, settings=None):
ImageView.ImageViewBase.__init__(self, logger=logger,
rgbmap=rgbmap,
settings=settings)
self.surface = None
self.dst_surface = None
if sys.byteorder == 'little':
self._rgb_order = 'BGRA'
self._alpha_idx = 3
else:
self._rgb_order = 'ARGB'
self._alpha_idx = 0
self.renderer = CanvasRenderer(self)
self.cr = None
self.message = None
self.t_.setDefaults(show_pan_position=False,
onscreen_ff='Sans Serif')
def _render_offscreen(self, surface, data, dst_x, dst_y,
width, height):
# NOTE [A]
daht, dawd, depth = data.shape
self.logger.debug("data shape is %dx%dx%d" % (dawd, daht, depth))
cr = cairo.Context(surface)
self.cr = cr
# fill surface with background color
imgwin_wd, imgwin_ht = self.get_window_size()
cr.rectangle(0, 0, imgwin_wd, imgwin_ht)
r, g, b = self.get_bg()
cr.set_source_rgba(r, g, b)
#cr.set_operator(cairo.OPERATOR_OVER)
cr.fill()
## arr8 = data.astype(numpy.uint8).flatten()
arr8 = data
## stride = cairo.ImageSurface.format_stride_for_width(cairo.FORMAT_RGB24,
stride = cairo.ImageSurface.format_stride_for_width(cairo.FORMAT_ARGB32,
width)
img_surface = cairo.ImageSurface.create_for_data(arr8,
#cairo.FORMAT_RGB24,
cairo.FORMAT_ARGB32,
dawd, daht, stride)
cr.set_source_surface(img_surface, dst_x, dst_y)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.mask_surface(img_surface, dst_x, dst_y)
#cr.rectangle(dst_x, dst_y, dawd, daht)
cr.fill()
# Draw a cross in the center of the window in debug mode
if self.t_['show_pan_position']:
cr.set_source_rgb(1.0, 0.0, 0.0)
cr.set_line_width(1)
ctr_x, ctr_y = self.get_center()
cr.move_to(ctr_x - 10, ctr_y)
cr.line_to(ctr_x + 10, ctr_y)
cr.move_to(ctr_x, ctr_y - 10)
cr.line_to(ctr_x, ctr_y + 10)
cr.close_path()
cr.stroke_preserve()
# render self.message
if self.message:
self.draw_message(cr, imgwin_wd, imgwin_ht,
self.message)
def draw_message(self, cr, width, height, message):
r, g, b = self.img_fg
#cr.set_source_rgb(1.0, 1.0, 1.0)
cr.set_source_rgb(r, g, b)
cr.select_font_face(self.t_['onscreen_ff'])
cr.set_font_size(24.0)
a, b, wd, ht, i, j = cr.text_extents(message)
y = ((height // 3) * 2) - (ht // 2)
x = (width // 2) - (wd // 2)
cr.move_to(x, y)
cr.show_text(message)
def get_offscreen_context(self):
if self.surface is None:
raise ImageViewCairoError("No offscreen surface defined")
cr = cairo.Context(self.surface)
return cr
def get_offscreen_surface(self):
return self.surface
def render_image(self, rgbobj, dst_x, dst_y):
"""Render the image represented by (rgbobj) at dst_x, dst_y
in the pixel space.
"""
self.logger.debug("redraw surface")
if self.surface is None:
return
# Prepare array for Cairo rendering
arr = rgbobj.get_array(self._rgb_order)
(height, width) = arr.shape[:2]
return self._render_offscreen(self.surface, arr, dst_x, dst_y,
width, height)
def configure_surface(self, width, height):
arr8 = numpy.zeros(height*width*4, dtype=numpy.uint8)
#stride = cairo.ImageSurface.format_stride_for_width(cairo.FORMAT_RGB24,
stride = cairo.ImageSurface.format_stride_for_width(cairo.FORMAT_ARGB32,
width)
surface = cairo.ImageSurface.create_for_data(arr8,
#cairo.FORMAT_RGB24,
cairo.FORMAT_ARGB32,
width, height, stride)
self.surface = surface
self.configure(width, height)
def save_image_as_surface(self, surface):
try:
self.dst_surface = surface
self.redraw()
finally:
self.dst_surface = None
def get_png_image_as_buffer(self, output=None):
ibuf = output
if ibuf is None:
ibuf = BytesIO()
qimg = self.surface.write_to_png(ibuf)
return ibuf
def update_image(self):
if not self.surface:
return
if not self.dst_surface:
#raise ImageViewCairoError("Please set up the output destination")
self.logger.error("Please set up the output destination")
return
cr = cairo.Context(self.dst_surface)
self.logger.debug("updating destination cairo surface")
# redraw the surface from backing surface
cr.set_source_surface(self.surface, 0, 0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
return False
def set_cursor(self, cursor):
pass
def define_cursor(self, ctype, cursor):
pass
def get_cursor(self, ctype):
return self.cursor[ctype]
def switch_cursor(self, ctype):
self.set_cursor(self.cursor[ctype])
def get_rgb_order(self):
return self._rgb_order
def onscreen_message(self, text, delay=None):
pass
def show_pan_mark(self, tf):
self.t_.set(show_pan_position=tf)
self.redraw(whence=3)
def pix2canvas(self, x, y):
x, y = self.cr.device_to_user(x, y)
return (x, y)
def canvas2pix(self, x, y):
x, y = self.cr.user_to_device(x, y)
return (x, y)
#END
| rupak0577/ginga | ginga/cairow/ImageViewCairo.py | Python | bsd-3-clause | 6,874 |
# -*- coding: utf-8 -*-
"""
logbook._fallback
~~~~~~~~~~~~~~~~~
Fallback implementations in case speedups is not around.
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
from itertools import count
from logbook.helpers import get_iterator_next_method
from logbook.concurrency import (thread_get_ident, greenlet_get_ident,
thread_local, greenlet_local,
ThreadLock, GreenletRLock, is_gevent_enabled)
_missing = object()
_MAX_CONTEXT_OBJECT_CACHE = 256
def group_reflected_property(name, default, fallback=_missing):
"""Returns a property for a given name that falls back to the
value of the group if set. If there is no such group, the
provided default is used.
"""
def _get(self):
rv = getattr(self, '_' + name, _missing)
if rv is not _missing and rv != fallback:
return rv
if self.group is None:
return default
return getattr(self.group, name)
def _set(self, value):
setattr(self, '_' + name, value)
def _del(self):
delattr(self, '_' + name)
return property(_get, _set, _del)
class _StackBound(object):
def __init__(self, obj, push, pop):
self.__obj = obj
self.__push = push
self.__pop = pop
def __enter__(self):
self.__push()
return self.__obj
def __exit__(self, exc_type, exc_value, tb):
self.__pop()
class StackedObject(object):
"""Baseclass for all objects that provide stack manipulation
operations.
"""
def push_greenlet(self):
"""Pushes the stacked object to the greenlet stack."""
raise NotImplementedError()
def pop_greenlet(self):
"""Pops the stacked object from the greenlet stack."""
raise NotImplementedError()
def push_thread(self):
"""Pushes the stacked object to the thread stack."""
raise NotImplementedError()
def pop_thread(self):
"""Pops the stacked object from the thread stack."""
raise NotImplementedError()
def push_application(self):
"""Pushes the stacked object to the application stack."""
raise NotImplementedError()
def pop_application(self):
"""Pops the stacked object from the application stack."""
raise NotImplementedError()
def __enter__(self):
if is_gevent_enabled():
self.push_greenlet()
else:
self.push_thread()
return self
def __exit__(self, exc_type, exc_value, tb):
if is_gevent_enabled():
self.pop_greenlet()
else:
self.pop_thread()
def greenletbound(self, _cls=_StackBound):
"""Can be used in combination with the `with` statement to
execute code while the object is bound to the greenlet.
"""
return _cls(self, self.push_greenlet, self.pop_greenlet)
def threadbound(self, _cls=_StackBound):
"""Can be used in combination with the `with` statement to
execute code while the object is bound to the thread.
"""
return _cls(self, self.push_thread, self.pop_thread)
def applicationbound(self, _cls=_StackBound):
"""Can be used in combination with the `with` statement to
execute code while the object is bound to the application.
"""
return _cls(self, self.push_application, self.pop_application)
class ContextStackManager(object):
"""Helper class for context objects that manages a stack of
objects.
"""
def __init__(self):
self._global = []
self._thread_context_lock = ThreadLock()
self._thread_context = thread_local()
self._greenlet_context_lock = GreenletRLock()
self._greenlet_context = greenlet_local()
self._cache = {}
self._stackop = get_iterator_next_method(count())
def iter_context_objects(self):
"""Returns an iterator over all objects for the combined
application and context cache.
"""
use_gevent = is_gevent_enabled()
tid = greenlet_get_ident() if use_gevent else thread_get_ident()
objects = self._cache.get(tid)
if objects is None:
if len(self._cache) > _MAX_CONTEXT_OBJECT_CACHE:
self._cache.clear()
objects = self._global[:]
objects.extend(getattr(self._thread_context, 'stack', ()))
if use_gevent:
objects.extend(getattr(self._greenlet_context, 'stack', ()))
objects.sort(reverse=True)
objects = [x[1] for x in objects]
self._cache[tid] = objects
return iter(objects)
def push_greenlet(self, obj):
self._greenlet_context_lock.acquire()
try:
self._cache.pop(greenlet_get_ident(), None) # remote chance to conflict with thread ids
item = (self._stackop(), obj)
stack = getattr(self._greenlet_context, 'stack', None)
if stack is None:
self._greenlet_context.stack = [item]
else:
stack.append(item)
finally:
self._greenlet_context_lock.release()
def pop_greenlet(self):
self._greenlet_context_lock.acquire()
try:
self._cache.pop(greenlet_get_ident(), None) # remote chance to conflict with thread ids
stack = getattr(self._greenlet_context, 'stack', None)
assert stack, 'no objects on stack'
return stack.pop()[1]
finally:
self._greenlet_context_lock.release()
def push_thread(self, obj):
self._thread_context_lock.acquire()
try:
self._cache.pop(thread_get_ident(), None)
item = (self._stackop(), obj)
stack = getattr(self._thread_context, 'stack', None)
if stack is None:
self._thread_context.stack = [item]
else:
stack.append(item)
finally:
self._thread_context_lock.release()
def pop_thread(self):
self._thread_context_lock.acquire()
try:
self._cache.pop(thread_get_ident(), None)
stack = getattr(self._thread_context, 'stack', None)
assert stack, 'no objects on stack'
return stack.pop()[1]
finally:
self._thread_context_lock.release()
def push_application(self, obj):
self._global.append((self._stackop(), obj))
self._cache.clear()
def pop_application(self):
assert self._global, 'no objects on application stack'
popped = self._global.pop()[1]
self._cache.clear()
return popped
| agustinhenze/logbook.debian | logbook/_fallback.py | Python | bsd-3-clause | 6,767 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.python import log
from twisted.internet import reactor, defer
from buildbot import util
if False: # for debugging
debuglog = log.msg
else:
debuglog = lambda m: None
class BaseLock:
"""
Class handling claiming and releasing of L{self}, and keeping track of
current and waiting owners.
@note: Ideally, we'd like to maintain FIFO order. The place to do that
would be the L{isAvailable()} function. However, this function is
called by builds/steps both for the first time, and after waking
them up by L{self} from the L{self.waiting} queue. There is
currently no way of distinguishing between them.
"""
description = "<BaseLock>"
def __init__(self, name, maxCount=1):
self.name = name # Name of the lock
self.waiting = [] # Current queue, tuples (LockAccess, deferred)
self.owners = [] # Current owners, tuples (owner, LockAccess)
self.maxCount = maxCount # maximal number of counting owners
def __repr__(self):
return self.description
def _getOwnersCount(self):
""" Return the number of current exclusive and counting owners.
@return: Tuple (number exclusive owners, number counting owners)
"""
num_excl, num_counting = 0, 0
for owner in self.owners:
if owner[1].mode == 'exclusive':
num_excl = num_excl + 1
else: # mode == 'counting'
num_counting = num_counting + 1
assert (num_excl == 1 and num_counting == 0) \
or (num_excl == 0 and num_counting <= self.maxCount)
return num_excl, num_counting
def isAvailable(self, access):
""" Return a boolean whether the lock is available for claiming """
debuglog("%s isAvailable(%s): self.owners=%r"
% (self, access, self.owners))
num_excl, num_counting = self._getOwnersCount()
if access.mode == 'counting':
# Wants counting access
return num_excl == 0 and num_counting < self.maxCount
else:
# Wants exclusive access
return num_excl == 0 and num_counting == 0
def claim(self, owner, access):
""" Claim the lock (lock must be available) """
debuglog("%s claim(%s, %s)" % (self, owner, access.mode))
assert owner is not None
assert self.isAvailable(access), "ask for isAvailable() first"
assert isinstance(access, LockAccess)
assert access.mode in ['counting', 'exclusive']
self.owners.append((owner, access))
debuglog(" %s is claimed '%s'" % (self, access.mode))
def release(self, owner, access):
""" Release the lock """
assert isinstance(access, LockAccess)
debuglog("%s release(%s, %s)" % (self, owner, access.mode))
entry = (owner, access)
assert entry in self.owners
self.owners.remove(entry)
# who can we wake up?
# After an exclusive access, we may need to wake up several waiting.
# Break out of the loop when the first waiting client should not be awakened.
num_excl, num_counting = self._getOwnersCount()
while len(self.waiting) > 0:
access, d = self.waiting[0]
if access.mode == 'counting':
if num_excl > 0 or num_counting == self.maxCount:
break
else:
num_counting = num_counting + 1
else:
# access.mode == 'exclusive'
if num_excl > 0 or num_counting > 0:
break
else:
num_excl = num_excl + 1
del self.waiting[0]
reactor.callLater(0, d.callback, self)
def waitUntilMaybeAvailable(self, owner, access):
"""Fire when the lock *might* be available. The caller will need to
check with isAvailable() when the deferred fires. This loose form is
used to avoid deadlocks. If we were interested in a stronger form,
this would be named 'waitUntilAvailable', and the deferred would fire
after the lock had been claimed.
"""
debuglog("%s waitUntilAvailable(%s)" % (self, owner))
assert isinstance(access, LockAccess)
if self.isAvailable(access):
return defer.succeed(self)
d = defer.Deferred()
self.waiting.append((access, d))
return d
def stopWaitingUntilAvailable(self, owner, access, d):
debuglog("%s stopWaitingUntilAvailable(%s)" % (self, owner))
assert isinstance(access, LockAccess)
assert (access, d) in self.waiting
self.waiting.remove( (access, d) )
def isOwner(self, owner, access):
return (owner, access) in self.owners
class RealMasterLock(BaseLock):
def __init__(self, lockid):
BaseLock.__init__(self, lockid.name, lockid.maxCount)
self.description = "<MasterLock(%s, %s)>" % (self.name, self.maxCount)
def getLock(self, slave):
return self
class RealSlaveLock:
def __init__(self, lockid):
self.name = lockid.name
self.maxCount = lockid.maxCount
self.maxCountForSlave = lockid.maxCountForSlave
self.description = "<SlaveLock(%s, %s, %s)>" % (self.name,
self.maxCount,
self.maxCountForSlave)
self.locks = {}
def __repr__(self):
return self.description
def getLock(self, slavebuilder):
slavename = slavebuilder.slave.slavename
if not self.locks.has_key(slavename):
maxCount = self.maxCountForSlave.get(slavename,
self.maxCount)
lock = self.locks[slavename] = BaseLock(self.name, maxCount)
desc = "<SlaveLock(%s, %s)[%s] %d>" % (self.name, maxCount,
slavename, id(lock))
lock.description = desc
self.locks[slavename] = lock
return self.locks[slavename]
class LockAccess(util.ComparableMixin):
""" I am an object representing a way to access a lock.
@param lockid: LockId instance that should be accessed.
@type lockid: A MasterLock or SlaveLock instance.
@param mode: Mode of accessing the lock.
@type mode: A string, either 'counting' or 'exclusive'.
"""
compare_attrs = ['lockid', 'mode']
def __init__(self, lockid, mode):
self.lockid = lockid
self.mode = mode
assert isinstance(lockid, (MasterLock, SlaveLock))
assert mode in ['counting', 'exclusive']
class BaseLockId(util.ComparableMixin):
""" Abstract base class for LockId classes.
Sets up the 'access()' function for the LockId's available to the user
(MasterLock and SlaveLock classes).
Derived classes should add
- Comparison with the L{util.ComparableMixin} via the L{compare_attrs}
class variable.
- Link to the actual lock class should be added with the L{lockClass}
class variable.
"""
def access(self, mode):
""" Express how the lock should be accessed """
assert mode in ['counting', 'exclusive']
return LockAccess(self, mode)
def defaultAccess(self):
""" For buildbot 0.7.7 compability: When user doesn't specify an access
mode, this one is chosen.
"""
return self.access('counting')
# master.cfg should only reference the following MasterLock and SlaveLock
# classes. They are identifiers that will be turned into real Locks later,
# via the BotMaster.getLockByID method.
class MasterLock(BaseLockId):
"""I am a semaphore that limits the number of simultaneous actions.
Builds and BuildSteps can declare that they wish to claim me as they run.
Only a limited number of such builds or steps will be able to run
simultaneously. By default this number is one, but my maxCount parameter
can be raised to allow two or three or more operations to happen at the
same time.
Use this to protect a resource that is shared among all builders and all
slaves, for example to limit the load on a common SVN repository.
"""
compare_attrs = ['name', 'maxCount']
lockClass = RealMasterLock
def __init__(self, name, maxCount=1):
self.name = name
self.maxCount = maxCount
class SlaveLock(BaseLockId):
"""I am a semaphore that limits simultaneous actions on each buildslave.
Builds and BuildSteps can declare that they wish to claim me as they run.
Only a limited number of such builds or steps will be able to run
simultaneously on any given buildslave. By default this number is one,
but my maxCount parameter can be raised to allow two or three or more
operations to happen on a single buildslave at the same time.
Use this to protect a resource that is shared among all the builds taking
place on each slave, for example to limit CPU or memory load on an
underpowered machine.
Each buildslave will get an independent copy of this semaphore. By
default each copy will use the same owner count (set with maxCount), but
you can provide maxCountForSlave with a dictionary that maps slavename to
owner count, to allow some slaves more parallelism than others.
"""
compare_attrs = ['name', 'maxCount', '_maxCountForSlaveList']
lockClass = RealSlaveLock
def __init__(self, name, maxCount=1, maxCountForSlave={}):
self.name = name
self.maxCount = maxCount
self.maxCountForSlave = maxCountForSlave
# for comparison purposes, turn this dictionary into a stably-sorted
# list of tuples
self._maxCountForSlaveList = self.maxCountForSlave.items()
self._maxCountForSlaveList.sort()
self._maxCountForSlaveList = tuple(self._maxCountForSlaveList)
| eunchong/build | third_party/buildbot_8_4p1/buildbot/locks.py | Python | bsd-3-clause | 10,720 |
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Entry point for running stress tests."""
import argparse
import threading
from grpc.beta import implementations
from six.moves import queue
from src.proto.grpc.testing import metrics_pb2
from src.proto.grpc.testing import test_pb2
from tests.interop import methods
from tests.qps import histogram
from tests.stress import metrics_server
from tests.stress import test_runner
def _args():
parser = argparse.ArgumentParser(description='gRPC Python stress test client')
parser.add_argument(
'--server_addresses',
help='comma seperated list of hostname:port to run servers on',
default='localhost:8080', type=str)
parser.add_argument(
'--test_cases',
help='comma seperated list of testcase:weighting of tests to run',
default='large_unary:100',
type=str)
parser.add_argument(
'--test_duration_secs',
help='number of seconds to run the stress test',
default=-1, type=int)
parser.add_argument(
'--num_channels_per_server',
help='number of channels per server',
default=1, type=int)
parser.add_argument(
'--num_stubs_per_channel',
help='number of stubs to create per channel',
default=1, type=int)
parser.add_argument(
'--metrics_port',
help='the port to listen for metrics requests on',
default=8081, type=int)
return parser.parse_args()
def _test_case_from_arg(test_case_arg):
for test_case in methods.TestCase:
if test_case_arg == test_case.value:
return test_case
else:
raise ValueError('No test case {}!'.format(test_case_arg))
def _parse_weighted_test_cases(test_case_args):
weighted_test_cases = {}
for test_case_arg in test_case_args.split(','):
name, weight = test_case_arg.split(':', 1)
test_case = _test_case_from_arg(name)
weighted_test_cases[test_case] = int(weight)
return weighted_test_cases
def run_test(args):
test_cases = _parse_weighted_test_cases(args.test_cases)
test_servers = args.server_addresses.split(',')
# Propagate any client exceptions with a queue
exception_queue = queue.Queue()
stop_event = threading.Event()
hist = histogram.Histogram(1, 1)
runners = []
server = metrics_pb2.beta_create_MetricsService_server(
metrics_server.MetricsServer(hist))
server.add_insecure_port('[::]:{}'.format(args.metrics_port))
server.start()
for test_server in test_servers:
host, port = test_server.split(':', 1)
for _ in xrange(args.num_channels_per_server):
channel = implementations.insecure_channel(host, int(port))
for _ in xrange(args.num_stubs_per_channel):
stub = test_pb2.beta_create_TestService_stub(channel)
runner = test_runner.TestRunner(stub, test_cases, hist,
exception_queue, stop_event)
runners.append(runner)
for runner in runners:
runner.start()
try:
timeout_secs = args.test_duration_secs
if timeout_secs < 0:
timeout_secs = None
raise exception_queue.get(block=True, timeout=timeout_secs)
except queue.Empty:
# No exceptions thrown, success
pass
finally:
stop_event.set()
for runner in runners:
runner.join()
runner = None
server.stop(0)
if __name__ == '__main__':
run_test(_args())
| tengyifei/grpc | src/python/grpcio_tests/tests/stress/client.py | Python | bsd-3-clause | 4,801 |
from __future__ import print_function
import unittest
import numpy as np
import pydrake
import os.path
class TestRBTCoM(unittest.TestCase):
def testCoM0(self):
r = pydrake.rbtree.RigidBodyTree(os.path.join(pydrake.getDrakePath(),
"examples/Pendulum/Pendulum.urdf"))
kinsol = r.doKinematics(np.zeros((7, 1)), np.zeros((7, 1)))
c = r.centerOfMass(kinsol)
self.assertTrue(np.allclose(c.flat, [0.0, 0.0, -0.2425], atol=1e-4))
def testCoMJacobian(self):
r = pydrake.rbtree.RigidBodyTree(os.path.join(pydrake.getDrakePath(),
"examples/Pendulum/Pendulum.urdf"))
q = r.getRandomConfiguration()
kinsol = r.doKinematics(q, np.zeros((7, 1)))
J = r.centerOfMassJacobian(kinsol)
self.assertTrue(np.shape(J) == (3, 7))
q = r.getZeroConfiguration()
kinsol = r.doKinematics(q, np.zeros((7, 1)))
J = r.centerOfMassJacobian(kinsol)
self.assertTrue(np.allclose(J.flat, [1., 0., 0., 0., -0.2425, 0., -0.25,
0., 1., 0., 0.2425, 0., 0., 0.,
0., 0., 1., 0., 0., 0., 0.], atol=1e-4))
if __name__ == '__main__':
unittest.main()
| billhoffman/drake | drake/bindings/python/pydrake/test/testRBTCoM.py | Python | bsd-3-clause | 1,228 |
import unittest
from mock import Mock
from biicode.common.model.content import Content
from biicode.common.model.content import ContentDeserializer
from biicode.common.model.content import content_diff
from biicode.common.exception import BiiSerializationException
from biicode.common.model.id import ID
class ContentTest(unittest.TestCase):
def test_deserialize_exception(self):
self.assertRaises(BiiSerializationException,
ContentDeserializer(ID((0, 0, 0))).deserialize,
"wrong object")
self.assertIsNone(ContentDeserializer(ID).deserialize(None))
def test_content_diff(self):
content_load1 = Mock()
content_load2 = Mock()
content_load1.is_binary = Mock(return_value=True)
self.assertEquals(content_diff(content_load1, content_load2),
"Unable to diff binary contents of base")
content_load1.is_binary = Mock(return_value=False)
content_load2.is_binary = Mock(return_value=True)
self.assertEquals(content_diff(content_load1, content_load2),
"Unable to diff binary contents of base")
def test_content_similarity(self):
content = Content(ID((0, 0, 0)), load=None)
self.assertEquals(content.similarity(content), 1)
| zhangf911/common | test/model/content_test.py | Python | mit | 1,322 |
from kolibri.auth.api import KolibriAuthPermissions, KolibriAuthPermissionsFilter
from kolibri.content.api import OptionalPageNumberPagination
from rest_framework import filters, viewsets
from .models import ContentRatingLog, ContentSessionLog, ContentSummaryLog, UserSessionLog
from .serializers import ContentRatingLogSerializer, ContentSessionLogSerializer, ContentSummaryLogSerializer, UserSessionLogSerializer
class ContentSessionLogFilter(filters.FilterSet):
class Meta:
model = ContentSessionLog
fields = ['user_id', 'content_id']
class ContentSessionLogViewSet(viewsets.ModelViewSet):
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)
queryset = ContentSessionLog.objects.all()
serializer_class = ContentSessionLogSerializer
pagination_class = OptionalPageNumberPagination
filter_class = ContentSessionLogFilter
class ContentSummaryFilter(filters.FilterSet):
class Meta:
model = ContentSummaryLog
fields = ['user_id', 'content_id']
class ContentSummaryLogViewSet(viewsets.ModelViewSet):
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)
queryset = ContentSummaryLog.objects.all()
serializer_class = ContentSummaryLogSerializer
pagination_class = OptionalPageNumberPagination
filter_class = ContentSummaryFilter
class ContentRatingLogViewSet(viewsets.ModelViewSet):
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter,)
queryset = ContentRatingLog.objects.all()
serializer_class = ContentRatingLogSerializer
pagination_class = OptionalPageNumberPagination
class UserSessionLogViewSet(viewsets.ModelViewSet):
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter,)
queryset = UserSessionLog.objects.all()
serializer_class = UserSessionLogSerializer
pagination_class = OptionalPageNumberPagination
| ralphiee22/kolibri | kolibri/logger/api.py | Python | mit | 2,079 |
# -*- coding: utf-8 -*-
# =================================================================
#
# Authors: Tom Kralidis <[email protected]>
#
# Copyright (c) 2015 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
from pycsw.core import util
from pycsw.core.etree import etree
LOGGER = logging.getLogger(__name__)
class OAIPMH(object):
"""OAI-PMH wrapper class"""
def __init__(self, context, config):
LOGGER.debug('Initializing OAI-PMH constants')
self.oaipmh_version = '2.0'
self.namespaces = {
'oai': 'http://www.openarchives.org/OAI/2.0/',
'oai_dc': 'http://www.openarchives.org/OAI/2.0/oai_dc/',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'
}
self.request_model = {
'Identify': [],
'ListSets': ['resumptiontoken'],
'ListMetadataFormats': ['identifier'],
'GetRecord': ['identifier', 'metadataprefix'],
'ListRecords': ['from', 'until', 'set', 'resumptiontoken', 'metadataprefix'],
'ListIdentifiers': ['from', 'until', 'set', 'resumptiontoken', 'metadataprefix'],
}
self.metadata_formats = {
'iso19139': {
'namespace': 'http://www.isotc211.org/2005/gmd',
'schema': 'http://www.isotc211.org/2005/gmd/gmd.xsd',
'identifier': './/gmd:fileIdentifier/gco:CharacterString',
'dateStamp': './/gmd:dateStamp/gco:DateTime|.//gmd:dateStamp/gco:Date',
'setSpec': './/gmd:hierarchyLevel/gmd:MD_ScopeCode'
},
'csw-record': {
'namespace': 'http://www.opengis.net/cat/csw/2.0.2',
'schema': 'http://schemas.opengis.net/csw/2.0.2/record.xsd',
'identifier': './/dc:identifier',
'dateStamp': './/dct:modified',
'setSpec': './/dc:type'
},
'fgdc-std': {
'namespace': 'http://www.opengis.net/cat/csw/csdgm',
'schema': 'http://www.fgdc.gov/metadata/fgdc-std-001-1998.xsd',
'identifier': './/idinfo/datasetid',
'dateStamp': './/metainfo/metd',
'setSpec': './/dataset'
},
'oai_dc': {
'namespace': '%soai_dc/' % self.namespaces['oai'],
'schema': 'http://www.openarchives.org/OAI/2.0/oai_dc.xsd',
'identifier': './/dc:identifier',
'dateStamp': './/dct:modified',
'setSpec': './/dc:type'
},
'dif': {
'namespace': 'http://gcmd.gsfc.nasa.gov/Aboutus/xml/dif/',
'schema': 'http://gcmd.gsfc.nasa.gov/Aboutus/xml/dif/dif.xsd',
'identifier': './/dif:Entry_ID',
'dateStamp': './/dif:Last_DIF_Revision_Date',
'setSpec': '//dataset'
},
'gm03': {
'namespace': 'http://www.interlis.ch/INTERLIS2.3',
'schema': 'http://www.geocat.ch/internet/geocat/en/home/documentation/gm03.parsys.50316.downloadList.86742.DownloadFile.tmp/gm0321.zip',
'identifier': './/gm03:DATASECTION//gm03:fileIdentifer',
'dateStamp': './/gm03:DATASECTION//gm03:dateStamp',
'setSpec': './/dataset'
}
}
self.metadata_sets = {
'datasets': ('Datasets', 'dataset'),
'interactiveResources': ('Interactive Resources', 'service')
}
self.error_codes = {
'badArgument': 'InvalidParameterValue',
'badVerb': 'OperationNotSupported',
'idDoesNotExist': None,
'noRecordsMatch': None,
}
self.context = context
self.context.namespaces.update(self.namespaces)
self.context.namespaces.update({'gco': 'http://www.isotc211.org/2005/gco'})
self.config = config
def request(self, kvp):
"""process OAI-PMH request"""
kvpout = {'service': 'CSW', 'version': '2.0.2', 'mode': 'oaipmh'}
LOGGER.debug('Incoming kvp: %s', kvp)
if 'verb' in kvp:
if 'metadataprefix' in kvp:
self.metadata_prefix = kvp['metadataprefix']
try:
kvpout['outputschema'] = self._get_metadata_prefix(kvp['metadataprefix'])
except KeyError:
kvpout['outputschema'] = kvp['metadataprefix']
else:
self.metadata_prefix = 'csw-record'
LOGGER.debug('metadataPrefix: %s', self.metadata_prefix)
if kvp['verb'] in ['ListRecords', 'ListIdentifiers', 'GetRecord']:
kvpout['request'] = 'GetRecords'
kvpout['resulttype'] = 'results'
kvpout['typenames'] = 'csw:Record'
kvpout['elementsetname'] = 'full'
if kvp['verb'] in ['Identify', 'ListMetadataFormats', 'ListSets']:
kvpout['request'] = 'GetCapabilities'
elif kvp['verb'] == 'GetRecord':
kvpout['request'] = 'GetRecordById'
if 'identifier' in kvp:
kvpout['id'] = kvp['identifier']
if ('outputschema' in kvpout and
kvp['metadataprefix'] == 'oai_dc'): # just use default DC
del kvpout['outputschema']
elif kvp['verb'] in ['ListRecords', 'ListIdentifiers']:
if 'resumptiontoken' in kvp:
kvpout['startposition'] = kvp['resumptiontoken']
if ('outputschema' in kvpout and
kvp['verb'] == 'ListIdentifiers'): # simple output only
pass #del kvpout['outputschema']
if ('outputschema' in kvpout and
kvp['metadataprefix'] in ['dc', 'oai_dc']): # just use default DC
del kvpout['outputschema']
start = end = None
LOGGER.debug('Scanning temporal parameters')
if 'from' in kvp:
start = 'dc:date >= %s' % kvp['from']
if 'until' in kvp:
end = 'dc:date <= %s' % kvp['until']
if any([start is not None, end is not None]):
if all([start is not None, end is not None]):
time_query = '%s and %s' % (start, end)
elif end is None:
time_query = start
elif start is None:
time_query = end
kvpout['constraintlanguage'] = 'CQL_TEXT'
kvpout['constraint'] = time_query
LOGGER.debug('Resulting parameters: %s', kvpout)
return kvpout
def response(self, response, kvp, repository, server_url):
"""process OAI-PMH request"""
mode = kvp.pop('mode', None)
if 'config' in kvp:
config_val = kvp.pop('config')
url = '%smode=oaipmh' % util.bind_url(server_url)
node = etree.Element(util.nspath_eval('oai:OAI-PMH', self.namespaces), nsmap=self.namespaces)
node.set(util.nspath_eval('xsi:schemaLocation', self.namespaces), '%s http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd' % self.namespaces['oai'])
LOGGER.debug(etree.tostring(node))
etree.SubElement(node, util.nspath_eval('oai:responseDate', self.namespaces)).text = util.get_today_and_now()
etree.SubElement(node, util.nspath_eval('oai:request', self.namespaces), attrib=kvp).text = url
if 'verb' not in kvp:
etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Missing \'verb\' parameter'
return node
if kvp['verb'] not in self.request_model.keys():
etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Unknown verb \'%s\'' % kvp['verb']
return node
if etree.QName(response).localname == 'ExceptionReport':
etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = response.xpath('//ows:ExceptionText|//ows20:ExceptionText', namespaces=self.context.namespaces)[0].text
return node
verb = kvp.pop('verb')
if verb in ['GetRecord', 'ListIdentifiers', 'ListRecords']:
if 'metadataprefix' not in kvp:
etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Missing metadataPrefix parameter'
return node
elif kvp['metadataprefix'] not in self.metadata_formats.keys():
etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Invalid metadataPrefix parameter'
return node
for key, value in kvp.items():
if key != 'mode' and key not in self.request_model[verb]:
etree.SubElement(node, util.nspath_eval('oai:error', self.namespaces), code='badArgument').text = 'Illegal parameter \'%s\'' % key
return node
verbnode = etree.SubElement(node, util.nspath_eval('oai:%s' % verb, self.namespaces))
if verb == 'Identify':
etree.SubElement(verbnode, util.nspath_eval('oai:repositoryName', self.namespaces)).text = self.config.get('metadata:main', 'identification_title')
etree.SubElement(verbnode, util.nspath_eval('oai:baseURL', self.namespaces)).text = url
etree.SubElement(verbnode, util.nspath_eval('oai:protocolVersion', self.namespaces)).text = '2.0'
etree.SubElement(verbnode, util.nspath_eval('oai:adminEmail', self.namespaces)).text = self.config.get('metadata:main', 'contact_email')
etree.SubElement(verbnode, util.nspath_eval('oai:earliestDatestamp', self.namespaces)).text = repository.query_insert('min')
etree.SubElement(verbnode, util.nspath_eval('oai:deletedRecord', self.namespaces)).text = 'no'
etree.SubElement(verbnode, util.nspath_eval('oai:granularity', self.namespaces)).text = 'YYYY-MM-DDThh:mm:ssZ'
elif verb == 'ListSets':
for key, value in sorted(self.metadata_sets.items()):
setnode = etree.SubElement(verbnode, util.nspath_eval('oai:set', self.namespaces))
etree.SubElement(setnode, util.nspath_eval('oai:setSpec', self.namespaces)).text = key
etree.SubElement(setnode, util.nspath_eval('oai:setName', self.namespaces)).text = value[0]
elif verb == 'ListMetadataFormats':
for key, value in sorted(self.metadata_formats.items()):
mdfnode = etree.SubElement(verbnode, util.nspath_eval('oai:metadataFormat', self.namespaces))
etree.SubElement(mdfnode, util.nspath_eval('oai:metadataPrefix', self.namespaces)).text = key
etree.SubElement(mdfnode, util.nspath_eval('oai:schema', self.namespaces)).text = value['schema']
etree.SubElement(mdfnode, util.nspath_eval('oai:metadataNamespace', self.namespaces)).text = value['namespace']
elif verb in ['GetRecord', 'ListIdentifiers', 'ListRecords']:
if verb == 'GetRecord': # GetRecordById
records = response.getchildren()
else: # GetRecords
records = response.getchildren()[1].getchildren()
for child in records:
recnode = etree.SubElement(verbnode, util.nspath_eval('oai:record', self.namespaces))
header = etree.SubElement(recnode, util.nspath_eval('oai:header', self.namespaces))
self._transform_element(header, child, 'oai:identifier')
self._transform_element(header, child, 'oai:dateStamp')
self._transform_element(header, child, 'oai:setSpec')
if verb in ['GetRecord', 'ListRecords']:
metadata = etree.SubElement(recnode, util.nspath_eval('oai:metadata', self.namespaces))
if 'metadataprefix' in kvp and kvp['metadataprefix'] == 'oai_dc':
child.tag = util.nspath_eval('oai_dc:dc', self.namespaces)
metadata.append(child)
if verb != 'GetRecord':
complete_list_size = response.xpath('//@numberOfRecordsMatched')[0]
next_record = response.xpath('//@nextRecord')[0]
cursor = str(int(complete_list_size) - int(next_record) - 1)
resumption_token = etree.SubElement(verbnode, util.nspath_eval('oai:resumptionToken', self.namespaces),
completeListSize=complete_list_size, cursor=cursor).text = next_record
return node
def _get_metadata_prefix(self, prefix):
"""Convenience function to return metadataPrefix as CSW outputschema"""
try:
outputschema = self.metadata_formats[prefix]['namespace']
except KeyError:
outputschema = prefix
return outputschema
def _transform_element(self, parent, element, elname):
"""tests for existence of a given xpath, writes out text if exists"""
xpath = self.metadata_formats[self.metadata_prefix][elname.split(':')[1]]
if xpath.startswith(('.//', '//')):
value = element.xpath(xpath, namespaces=self.context.namespaces)
if value:
value = value[0].text
else: # bare string literal
value = xpath
el = etree.SubElement(parent, util.nspath_eval(elname, self.context.namespaces))
if value:
if elname == 'oai:setSpec':
value = None
for k, v in self.metadata_sets.items():
if v[1] == elname:
value = k
break
el.text = value
| ricardogsilva/pycsw | pycsw/oaipmh.py | Python | mit | 15,091 |
"""
Menu utilities.
"""
from fnmatch import fnmatch
from django.utils.importlib import import_module
from django.core.urlresolvers import reverse
from wpadmin.utils import (
get_wpadmin_settings, get_admin_site, get_admin_site_name)
def get_menu_cls(menu, admin_site_name='admin'):
"""
menu - menu name ('top' or 'left')
"""
return get_wpadmin_settings(admin_site_name).get('menu', {}).get(menu, None)
def get_menu(menu, admin_site_name='admin'):
"""
menu - menu name ('top' or 'left')
"""
menu_cls = get_menu_cls(menu, admin_site_name)
if menu_cls:
mod, inst = menu_cls.rsplit('.', 1)
mod = import_module(mod)
return getattr(mod, inst)()
return None
# I had to copy (and slightly modify) those utils from django-admin-tools
# to override get_admin_site
def get_avail_models(context):
""" Returns (model, perm,) for all models user can possibly see """
items = []
admin_site = get_admin_site(context)
for model, model_admin in list(admin_site._registry.items()):
perms = model_admin.get_model_perms(context.get('request'))
if True not in list(perms.values()):
continue
items.append((model, perms,))
return items
def filter_models(context, models, exclude):
"""
Returns (model, perm,) for all models that match models/exclude patterns
and are visible by current user.
"""
items = get_avail_models(context)
included = []
full_name = lambda m: '%s.%s' % (m.__module__, m.__name__)
# I believe that that implemented
# O(len(patterns)*len(matched_patterns)*len(all_models))
# algorithm is fine for model lists because they are small and admin
# performance is not a bottleneck. If it is not the case then the code
# should be optimized.
if len(models) == 0:
included = items
else:
for pattern in models:
for item in items:
model, perms = item
if fnmatch(full_name(model), pattern) and item not in included:
included.append(item)
result = included[:]
for pattern in exclude:
for item in included:
model, perms = item
if fnmatch(full_name(model), pattern):
try:
result.remove(item)
except ValueError: # if the item was already removed skip
pass
return result
class UserTestElementMixin(object):
"""
Mixin which adds a method for checking if current user is allowed to see
something (menu, menu item, etc.).
"""
def is_user_allowed(self, user):
"""
This method can be overwritten to check if current user can see this
element.
"""
return True
class AppListElementMixin(object):
"""
Mixin class for AppList and ModelList MenuItem.
"""
def _visible_models(self, context):
included = self.models[:]
excluded = self.exclude[:]
if excluded and not included:
included = ["*"]
return filter_models(context, included, excluded)
def _get_admin_app_list_url(self, model, context):
"""
Returns the admin change url.
"""
app_label = model._meta.app_label
return reverse('%s:app_list' % get_admin_site_name(context),
args=(app_label,))
def _get_admin_change_url(self, model, context):
"""
Returns the admin change url.
"""
app_label = model._meta.app_label
return reverse('%s:%s_%s_changelist' % (get_admin_site_name(context),
app_label,
model.__name__.lower()))
def _get_admin_add_url(self, model, context):
"""
Returns the admin add url.
"""
app_label = model._meta.app_label
return reverse('%s:%s_%s_add' % (get_admin_site_name(context),
app_label,
model.__name__.lower()))
def is_empty(self):
return len(self.children) == 0
| nwaxiomatic/django-wpadmin | wpadmin/menu/utils.py | Python | mit | 4,176 |
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import email.utils
import errno
import hashlib
import mimetypes
import os
import re
import base64
import binascii
import math
from hashlib import md5
import boto.utils
from boto.compat import BytesIO, six, urllib, encodebytes
from boto.exception import BotoClientError
from boto.exception import StorageDataError
from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5, compute_hash
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name
class Key(object):
"""
Represents a key (object) in an S3 bucket.
:ivar bucket: The parent :class:`boto.s3.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in S3.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | REDUCED_REDUNDANCY | GLACIER
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar version_id: The version ID of this object, if it is a versioned
object.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
"""
DefaultContentType = 'application/octet-stream'
RestoreBody = """<?xml version="1.0" encoding="UTF-8"?>
<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Days>%s</Days>
</RestoreRequest>"""
BufferSize = boto.config.getint('Boto', 'key_buffer_size', 8192)
# The object metadata fields a user can set, other than custom metadata
# fields (i.e., those beginning with a provider-specific prefix like
# x-amz-meta).
base_user_settable_fields = set(["cache-control", "content-disposition",
"content-encoding", "content-language",
"content-md5", "content-type",
"x-robots-tag", "expires"])
_underscore_base_user_settable_fields = set()
for f in base_user_settable_fields:
_underscore_base_user_settable_fields.add(f.replace('-', '_'))
# Metadata fields, whether user-settable or not, other than custom
# metadata fields (i.e., those beginning with a provider specific prefix
# like x-amz-meta).
base_fields = (base_user_settable_fields |
set(["last-modified", "content-length", "date", "etag"]))
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.metadata = {}
self.cache_control = None
self.content_type = self.DefaultContentType
self.content_encoding = None
self.content_disposition = None
self.content_language = None
self.filename = None
self.etag = None
self.is_latest = False
self.last_modified = None
self.owner = None
self._storage_class = None
self.path = None
self.resp = None
self.mode = None
self.size = None
self.version_id = None
self.source_version_id = None
self.delete_marker = False
self.encrypted = None
# If the object is being restored, this attribute will be set to True.
# If the object is restored, it will be set to False. Otherwise this
# value will be None. If the restore is completed (ongoing_restore =
# False), the expiry_date will be populated with the expiry date of the
# restored object.
self.ongoing_restore = None
self.expiry_date = None
self.local_hashes = {}
def __repr__(self):
if self.bucket:
name = u'<Key: %s,%s>' % (self.bucket.name, self.name)
else:
name = u'<Key: None,%s>' % self.name
# Encode to bytes for Python 2 to prevent display decoding issues
if not isinstance(name, str):
name = name.encode('utf-8')
return name
def __iter__(self):
return self
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def _get_key(self):
return self.name
def _set_key(self, value):
self.name = value
key = property(_get_key, _set_key);
def _get_md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
return binascii.b2a_hex(self.local_hashes['md5'])
def _set_md5(self, value):
if value:
self.local_hashes['md5'] = binascii.a2b_hex(value)
elif 'md5' in self.local_hashes:
self.local_hashes.pop('md5', None)
md5 = property(_get_md5, _set_md5);
def _get_base64md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
md5 = self.local_hashes['md5']
if not isinstance(md5, bytes):
md5 = md5.encode('utf-8')
return binascii.b2a_base64(md5).decode('utf-8').rstrip('\n')
def _set_base64md5(self, value):
if value:
if not isinstance(value, six.string_types):
value = value.decode('utf-8')
self.local_hashes['md5'] = binascii.a2b_base64(value)
elif 'md5' in self.local_hashes:
del self.local_hashes['md5']
base64md5 = property(_get_base64md5, _set_base64md5);
def _get_storage_class(self):
if self._storage_class is None and self.bucket:
# Attempt to fetch storage class
list_items = list(self.bucket.list(self.name.encode('utf-8')))
if len(list_items) and getattr(list_items[0], '_storage_class',
None):
self._storage_class = list_items[0]._storage_class
else:
# Key is not yet saved? Just use default...
self._storage_class = 'STANDARD'
return self._storage_class
def _set_storage_class(self, value):
self._storage_class = value
storage_class = property(_get_storage_class, _set_storage_class)
def get_md5_from_hexdigest(self, md5_hexdigest):
"""
A utility function to create the 2-tuple (md5hexdigest, base64md5)
from just having a precalculated md5_hexdigest.
"""
digest = binascii.unhexlify(md5_hexdigest)
base64md5 = encodebytes(digest)
if base64md5[-1] == '\n':
base64md5 = base64md5[0:-1]
return (md5_hexdigest, base64md5)
def handle_encryption_headers(self, resp):
provider = self.bucket.connection.provider
if provider.server_side_encryption_header:
self.encrypted = resp.getheader(
provider.server_side_encryption_header, None)
else:
self.encrypted = None
def handle_storage_class_header(self, resp):
provider = self.bucket.connection.provider
if provider.storage_class_header:
self._storage_class = resp.getheader(
provider.storage_class_header, None)
if (self._storage_class is None and
provider.get_provider_name() == 'aws'):
# S3 docs for HEAD object requests say S3 will return this
# header for all objects except Standard storage class objects.
self._storage_class = 'STANDARD'
def handle_version_headers(self, resp, force=False):
provider = self.bucket.connection.provider
# If the Key object already has a version_id attribute value, it
# means that it represents an explicit version and the user is
# doing a get_contents_*(version_id=<foo>) to retrieve another
# version of the Key. In that case, we don't really want to
# overwrite the version_id in this Key object. Comprende?
if self.version_id is None or force:
self.version_id = resp.getheader(provider.version_id, None)
self.source_version_id = resp.getheader(provider.copy_source_version_id,
None)
if resp.getheader(provider.delete_marker, 'false') == 'true':
self.delete_marker = True
else:
self.delete_marker = False
def handle_restore_headers(self, response):
provider = self.bucket.connection.provider
header = response.getheader(provider.restore_header)
if header is None:
return
parts = header.split(',', 1)
for part in parts:
key, val = [i.strip() for i in part.split('=')]
val = val.replace('"', '')
if key == 'ongoing-request':
self.ongoing_restore = True if val.lower() == 'true' else False
elif key == 'expiry-date':
self.expiry_date = val
def handle_addl_headers(self, headers):
"""
Used by Key subclasses to do additional, provider-specific
processing of response headers. No-op for this base class.
"""
pass
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.resp is None:
self.mode = 'r'
provider = self.bucket.connection.provider
self.resp = self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries)
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name, value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() in Key.base_fields:
self.__dict__[name.lower().replace('-', '_')] = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
self.handle_restore_headers(self.resp)
self.handle_addl_headers(self.resp.getheaders())
def open_write(self, headers=None, override_num_retries=None):
"""
Open this key for writing.
Not yet implemented
:type headers: dict
:param headers: Headers to pass in the write request
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying PUT.
"""
raise BotoClientError('Not Implemented')
def open(self, mode='r', headers=None, query_args=None,
override_num_retries=None):
if mode == 'r':
self.mode = 'r'
self.open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries)
elif mode == 'w':
self.mode = 'w'
self.open_write(headers=headers,
override_num_retries=override_num_retries)
else:
raise BotoClientError('Invalid mode: %s' % mode)
closed = False
def close(self, fast=False):
"""
Close this key.
:type fast: bool
:param fast: True if you want the connection to be closed without first
reading the content. This should only be used in cases where subsequent
calls don't need to return the content from the open HTTP connection.
Note: As explained at
http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse,
callers must read the whole response before sending a new request to the
server. Calling Key.close(fast=True) and making a subsequent request to
the server will work because boto will get an httplib exception and
close/reopen the connection.
"""
if self.resp and not fast:
self.resp.read()
self.resp = None
self.mode = None
self.closed = True
def next(self):
"""
By providing a next method, the key object supports use as an iterator.
For example, you can now say:
for bytes in key:
write bytes to a file or whatever
All of the HTTP connection stuff is handled for you.
"""
self.open_read()
data = self.resp.read(self.BufferSize)
if not data:
self.close()
raise StopIteration
return data
# Python 3 iterator support
__next__ = next
def read(self, size=0):
self.open_read()
if size == 0:
data = self.resp.read()
else:
data = self.resp.read(size)
if not data:
self.close()
return data
def change_storage_class(self, new_storage_class, dst_bucket=None,
validate_dst_bucket=True):
"""
Change the storage class of an existing key.
Depending on whether a different destination bucket is supplied
or not, this will either move the item within the bucket, preserving
all metadata and ACL info bucket changing the storage class or it
will copy the item to the provided destination bucket, also
preserving metadata and ACL info.
:type new_storage_class: string
:param new_storage_class: The new storage class for the Key.
Possible values are:
* STANDARD
* REDUCED_REDUNDANCY
:type dst_bucket: string
:param dst_bucket: The name of a destination bucket. If not
provided the current bucket of the key will be used.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
"""
bucket_name = dst_bucket or self.bucket.name
if new_storage_class == 'STANDARD':
return self.copy(bucket_name, self.name,
reduced_redundancy=False, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
elif new_storage_class == 'REDUCED_REDUNDANCY':
return self.copy(bucket_name, self.name,
reduced_redundancy=True, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
else:
raise BotoClientError('Invalid storage class: %s' %
new_storage_class)
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
encrypt_key=False, validate_dst_bucket=True):
"""
Copy this Key to another bucket.
:type dst_bucket: string
:param dst_bucket: The name of the destination bucket
:type dst_key: string
:param dst_key: The name of the destination key
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will force the
storage class of the new Key to be REDUCED_REDUNDANCY
regardless of the storage class of the key being copied.
The Reduced Redundancy Storage (RRS) feature of S3,
provides lower redundancy at lower storage cost.
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to S3, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL, a value of False will be
significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
dst_bucket = self.bucket.connection.lookup(dst_bucket,
validate_dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = self.storage_class
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
encrypt_key=encrypt_key,
src_version_id=self.version_id)
def startElement(self, name, attrs, connection):
if name == 'Owner':
self.owner = User(self)
return self.owner
else:
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
else:
setattr(self, name, value)
def exists(self, headers=None):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
"""
return bool(self.bucket.lookup(self.name, headers=headers))
def delete(self, headers=None):
"""
Delete this key from S3
"""
return self.bucket.delete_key(self.name, version_id=self.version_id,
headers=headers)
def get_metadata(self, name):
return self.metadata.get(name)
def set_metadata(self, name, value):
# Ensure that metadata that is vital to signing is in the correct
# case. Applies to ``Content-Type`` & ``Content-MD5``.
if name.lower() == 'content-type':
self.metadata['Content-Type'] = value
elif name.lower() == 'content-md5':
self.metadata['Content-MD5'] = value
else:
self.metadata[name] = value
if name.lower() in Key.base_user_settable_fields:
self.__dict__[name.lower().replace('-', '_')] = value
def update_metadata(self, d):
self.metadata.update(d)
# convenience methods for setting/getting ACL
def set_acl(self, acl_str, headers=None):
if self.bucket is not None:
self.bucket.set_acl(acl_str, self.name, headers=headers)
def get_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers)
def get_xml_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers)
def set_xml_acl(self, acl_str, headers=None):
if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
def set_canned_acl(self, acl_str, headers=None):
return self.bucket.set_canned_acl(acl_str, self.name, headers)
def get_redirect(self):
"""Return the redirect location configured for this key.
If no redirect is configured (via set_redirect), then None
will be returned.
"""
response = self.bucket.connection.make_request(
'HEAD', self.bucket.name, self.name)
if response.status == 200:
return response.getheader('x-amz-website-redirect-location')
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def set_redirect(self, redirect_location, headers=None):
"""Configure this key to redirect to another location.
When the bucket associated with this key is accessed from the website
endpoint, a 301 redirect will be issued to the specified
`redirect_location`.
:type redirect_location: string
:param redirect_location: The location to redirect.
"""
if headers is None:
headers = {}
else:
headers = headers.copy()
headers['x-amz-website-redirect-location'] = redirect_location
response = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers)
if response.status == 200:
return True
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def make_public(self, headers=None):
return self.bucket.set_canned_acl('public-read', self.name, headers)
def generate_url(self, expires_in, method='GET', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None,
policy=None, reduced_redundancy=False, encrypt_key=False):
"""
Generate a URL to access this key.
:type expires_in: int
:param expires_in: How long the url is valid for, in seconds.
:type method: string
:param method: The method to use for retrieving the file
(default is GET).
:type headers: dict
:param headers: Any headers to pass along in the request.
:type query_auth: bool
:param query_auth: If True, signs the request in the URL.
:type force_http: bool
:param force_http: If True, http will be used instead of https.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type expires_in_absolute: bool
:param expires_in_absolute:
:type version_id: string
:param version_id: The version_id of the object to GET. If specified
this overrides any value in the key.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:rtype: string
:return: The URL to access the key
"""
provider = self.bucket.connection.provider
version_id = version_id or self.version_id
if headers is None:
headers = {}
else:
headers = headers.copy()
# add headers accordingly (usually PUT case)
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
return self.bucket.connection.generate_url(expires_in, method,
self.bucket.name, self.name,
headers, query_auth,
force_http,
response_headers,
expires_in_absolute,
version_id)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: (optional) Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
def _send_file_internal(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
provider = self.bucket.connection.provider
try:
spos = fp.tell()
except IOError:
spos = None
self.read_from_stream = False
# If hash_algs is unset and the MD5 hasn't already been computed,
# default to an MD5 hash_alg to hash the data on-the-fly.
if hash_algs is None and not self.md5:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
def sender(http_conn, method, path, data, headers):
# This function is called repeatedly for temporary retries
# so we must be sure the file pointer is pointing at the
# start of the data.
if spos is not None and spos != fp.tell():
fp.seek(spos)
elif spos is None and self.read_from_stream:
# if seek is not supported, and we've read from this
# stream already, then we need to abort retries to
# avoid setting bad data.
raise provider.storage_data_error(
'Cannot retry failed request. fp does not support seeking.')
# If the caller explicitly specified host header, tell putrequest
# not to add a second host header. Similarly for accept-encoding.
skips = {}
if boto.utils.find_matching_headers('host', headers):
skips['skip_host'] = 1
if boto.utils.find_matching_headers('accept-encoding', headers):
skips['skip_accept_encoding'] = 1
http_conn.putrequest(method, path, **skips)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 4 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 4:
http_conn.set_debuglevel(0)
data_len = 0
if cb:
if size:
cb_size = size
elif self.size:
cb_size = self.size
else:
cb_size = 0
if chunked_transfer and cb_size == 0:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred, except when we know size.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(
math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
bytes_togo = size
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
if spos is None:
# read at least something from a non-seekable fp.
self.read_from_stream = True
while chunk:
chunk_len = len(chunk)
data_len += chunk_len
if chunked_transfer:
http_conn.send('%x;\r\n' % chunk_len)
http_conn.send(chunk)
http_conn.send('\r\n')
else:
http_conn.send(chunk)
for alg in digesters:
digesters[alg].update(chunk)
if bytes_togo:
bytes_togo -= chunk_len
if bytes_togo <= 0:
break
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
self.size = data_len
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if chunked_transfer:
http_conn.send('0\r\n')
# http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
http_conn.send('\r\n')
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
response = http_conn.getresponse()
body = response.read()
if not self.should_retry(response, chunked_transfer):
raise provider.storage_response_error(
response.status, response.reason, body)
return response
if not headers:
headers = {}
else:
headers = headers.copy()
# Overwrite user-supplied user-agent.
for header in find_matching_headers('User-Agent', headers):
del headers[header]
headers['User-Agent'] = UserAgent
# If storage_class is None, then a user has not explicitly requested
# a storage class, so we can assume STANDARD here
if self._storage_class not in [None, 'STANDARD']:
headers[provider.storage_class_header] = self.storage_class
if find_matching_headers('Content-Encoding', headers):
self.content_encoding = merge_headers_by_name(
'Content-Encoding', headers)
if find_matching_headers('Content-Language', headers):
self.content_language = merge_headers_by_name(
'Content-Language', headers)
content_type_headers = find_matching_headers('Content-Type', headers)
if content_type_headers:
# Some use cases need to suppress sending of the Content-Type
# header and depend on the receiving server to set the content
# type. This can be achieved by setting headers['Content-Type']
# to None when calling this method.
if (len(content_type_headers) == 1 and
headers[content_type_headers[0]] is None):
# Delete null Content-Type value to skip sending that header.
del headers[content_type_headers[0]]
else:
self.content_type = merge_headers_by_name(
'Content-Type', headers)
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type is None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if chunked_transfer:
headers['Transfer-Encoding'] = 'chunked'
#if not self.base64md5:
# headers['Trailer'] = "Content-MD5"
else:
headers['Content-Length'] = str(self.size)
# This is terrible. We need a SHA256 of the body for SigV4, but to do
# the chunked ``sender`` behavior above, the ``fp`` isn't available to
# the auth mechanism (because closures). Detect if it's SigV4 & embelish
# while we can before the auth calculations occur.
if 'hmac-v4-s3' in self.bucket.connection._required_auth_capability():
kwargs = {'fp': fp, 'hash_algorithm': hashlib.sha256}
if size is not None:
kwargs['size'] = size
headers['_sha256'] = compute_hash(**kwargs)[0]
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request(
'PUT',
self.bucket.name,
self.name,
headers,
sender=sender,
query_args=query_args
)
self.handle_version_headers(resp, force=True)
self.handle_addl_headers(resp.getheaders())
def should_retry(self, response, chunked_transfer=False):
provider = self.bucket.connection.provider
if not chunked_transfer:
if response.status in [500, 503]:
# 500 & 503 can be plain retries.
return True
if response.getheader('location'):
# If there's a redirect, plain retry.
return True
if 200 <= response.status <= 299:
self.etag = response.getheader('etag')
md5 = self.md5
if isinstance(md5, bytes):
md5 = md5.decode('utf-8')
# If you use customer-provided encryption keys, the ETag value that
# Amazon S3 returns in the response will not be the MD5 of the
# object.
server_side_encryption_customer_algorithm = response.getheader(
'x-amz-server-side-encryption-customer-algorithm', None)
if server_side_encryption_customer_algorithm is None:
if self.etag != '"%s"' % md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5. '
'%s vs. %s' % (self.etag, self.md5))
return True
if response.status == 400:
# The 400 must be trapped so the retry handler can check to
# see if it was a timeout.
# If ``RequestTimeout`` is present, we'll retry. Otherwise, bomb
# out.
body = response.read()
err = provider.storage_response_error(
response.status,
response.reason,
body
)
if err.error_code in ['RequestTimeout']:
raise PleaseRetryException(
"Saw %s, retrying" % err.error_code,
response=response
)
return False
def compute_md5(self, fp, size=None):
"""
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file
pointer will be reset to the same position before the
method returns.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_md5(fp, size=size)
# Returned values are MD5 hash, base64 encoded MD5 hash, and data size.
# The internal implementation of compute_md5() needs to return the
# data size but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None,
size=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket is not None:
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, size=size)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
encrypt_key=False, size=None, rewind=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
contents. The data is read from 'fp' from its current position until
'size' bytes have been read or EOF.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will
first check to see if an object exists in the bucket with
the same key. If it does, it won't overwrite it. The
default value is True which will overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will
be rewound to the start before any bytes are read from
it. The default behaviour is False which reads from the
current position of the file pointer (fp).
:rtype: int
:return: The number of bytes written to the key.
"""
provider = self.bucket.connection.provider
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
# TODO - What if provider doesn't support reduced reduncancy?
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket is not None:
if not md5 and provider.supports_chunked_transfer():
# defer md5 calculation to on the fly and
# we don't know anything about size yet.
chunked_transfer = True
self.size = None
else:
chunked_transfer = False
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if not md5:
# compute_md5() and also set self.size to actual
# size of the bytes read computing the md5.
md5 = self.compute_md5(fp, size)
# adjust size if required
size = self.size
elif size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
# return number of bytes written.
return self.size
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost. :type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object
will be encrypted on the server-side by S3 and will be
stored in an encrypted form while at rest in S3.
:rtype: int
:return: The number of bytes written to the key.
"""
with open(filename, 'rb') as fp:
return self.set_contents_from_file(fp, headers, replace, cb,
num_cb, policy, md5,
reduced_redundancy,
encrypt_key=encrypt_key)
def set_contents_from_string(self, string_data, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
num_cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
"""
if not isinstance(string_data, bytes):
string_data = string_data.encode("utf-8")
fp = BytesIO(string_data)
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
fp.close()
return r
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
torrent=torrent, version_id=version_id,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=None,
query_args=None)
def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None, query_args=None):
if headers is None:
headers = {}
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = query_args or []
if torrent:
query_args.append('torrent')
if hash_algs is None and not torrent:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (
key, urllib.parse.quote(response_headers[key])))
query_args = '&'.join(query_args)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries)
data_len = 0
if cb:
if self.size is None:
cb_size = 0
else:
cb_size = self.size
if self.size is None and num_cb != -1:
# If size is not available due to chunked transfer for example,
# we'll call the cb for every 1MB of data transferred.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
try:
for bytes in self:
fp.write(bytes)
data_len += len(bytes)
for alg in digesters:
digesters[alg].update(bytes)
if cb:
if cb_size > 0 and data_len >= cb_size:
break
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
except IOError as e:
if e.errno == errno.ENOSPC:
raise StorageDataError('Out of space for destination file '
'%s' % fp.name)
raise
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if self.size is None and not torrent and "Range" not in headers:
self.size = data_len
self.close()
self.bucket.connection.debug = save_debug
def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
"""
Get a torrent file (see to get_file)
:type fp: file
:param fp: The file pointer of where to put the torrent
:type headers: dict
:param headers: Headers to be passed
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
"""
return self.get_file(fp, headers, cb, num_cb, torrent=True)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
try:
with open(filename, 'wb') as fp:
self.get_contents_to_file(fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers)
except Exception:
os.remove(filename)
raise
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified is not None:
try:
modified_tuple = email.utils.parsedate_tz(self.last_modified)
modified_stamp = int(email.utils.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception:
pass
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
response_headers=None, encoding=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Return the contents of the object as a string.
See get_contents_to_file method for details about the
parameters.
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
:type encoding: str
:param encoding: The text encoding to use, such as ``utf-8``
or ``iso-8859-1``. If set, then a string will be returned.
Defaults to ``None`` and returns bytes.
:rtype: bytes or str
:returns: The contents of the file as bytes or a string
"""
fp = BytesIO()
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
value = fp.getvalue()
if encoding is not None:
value = value.decode(encoding)
return value
def add_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
"""
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers)
def add_user_grant(self, permission, user_id, headers=None,
display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a key. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
"""
policy = self.get_acl(headers=headers)
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers)
def _normalize_metadata(self, metadata):
if type(metadata) == set:
norm_metadata = set()
for k in metadata:
norm_metadata.add(k.lower())
else:
norm_metadata = {}
for k in metadata:
norm_metadata[k.lower()] = metadata[k]
return norm_metadata
def _get_remote_metadata(self, headers=None):
"""
Extracts metadata from existing URI into a dict, so we can
overwrite/delete from it to form the new set of metadata to apply to a
key.
"""
metadata = {}
for underscore_name in self._underscore_base_user_settable_fields:
if hasattr(self, underscore_name):
value = getattr(self, underscore_name)
if value:
# Generate HTTP field name corresponding to "_" named field.
field_name = underscore_name.replace('_', '-')
metadata[field_name.lower()] = value
# self.metadata contains custom metadata, which are all user-settable.
prefix = self.provider.metadata_prefix
for underscore_name in self.metadata:
field_name = underscore_name.replace('_', '-')
metadata['%s%s' % (prefix, field_name.lower())] = (
self.metadata[underscore_name])
return metadata
def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl,
headers=None):
metadata_plus = self._normalize_metadata(metadata_plus)
metadata_minus = self._normalize_metadata(metadata_minus)
metadata = self._get_remote_metadata()
metadata.update(metadata_plus)
for h in metadata_minus:
if h in metadata:
del metadata[h]
src_bucket = self.bucket
# Boto prepends the meta prefix when adding headers, so strip prefix in
# metadata before sending back in to copy_key() call.
rewritten_metadata = {}
for h in metadata:
if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')):
rewritten_h = (h.replace('x-goog-meta-', '')
.replace('x-amz-meta-', ''))
else:
rewritten_h = h
rewritten_metadata[rewritten_h] = metadata[h]
metadata = rewritten_metadata
src_bucket.copy_key(self.name, self.bucket.name, self.name,
metadata=metadata, preserve_acl=preserve_acl,
headers=headers)
def restore(self, days, headers=None):
"""Restore an object from an archive.
:type days: int
:param days: The lifetime of the restored object (must
be at least 1 day). If the object is already restored
then this parameter can be used to readjust the lifetime
of the restored object. In this case, the days
param is with respect to the initial time of the request.
If the object has not been restored, this param is with
respect to the completion time of the request.
"""
response = self.bucket.connection.make_request(
'POST', self.bucket.name, self.name,
data=self.RestoreBody % days,
headers=headers, query_args='restore')
if response.status not in (200, 202):
provider = self.bucket.connection.provider
raise provider.storage_response_error(response.status,
response.reason,
response.read())
| akashlevy/Lyff | lyff_lambda/boto/s3/key.py | Python | mit | 83,034 |
import os
import shutil
import biicode.common.test
from biicode.common.utils import file_utils as common_file_utils
def load(filepath):
"""Return binary load of given test resource."""
abspath = file_path(filepath)
with open(abspath, "rb") as f:
return f.read()
def read(filepath):
"""Return system text content of given test resource."""
abspath = file_path(filepath)
with open(abspath, "r") as f:
return f.read()
def write(file_, content):
try:
os.makedirs(os.path.split(file_)[0])
except:
pass
with open(file_, "wb") as f:
return f.write(content)
test_resources = os.path.join(os.path.dirname(biicode.common.test.__file__),
"resources/")
def append(content, dest):
with open(dest, "a") as f:
f.write(content)
def get_dir_files(path):
"""Returns a list of files within given test folder
Paths are relative to test/resources/path"""
abs_paths = common_file_utils.get_visible_files_recursive(file_path(path))
base_path = os.path.join(test_resources, path)
return [os.path.relpath(p, base_path) for p in abs_paths]
def file_path(name):
"""Return full path to given test resource. """
return os.path.join(test_resources, name)
def copyFiles(container, dest_folder, files=None):
'''Copies files from container to dst_folder, filtering by files if provided
'''
new_files = []
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
if not files:
files = get_dir_files(container)
for f in files:
srcpath = file_path(os.path.join(container, f))
dest = os.path.join(dest_folder, f)
dst_subfolder = os.path.join(dest_folder, os.path.dirname(f))
if not os.path.isdir(dst_subfolder):
os.makedirs(dst_subfolder)
if os.path.isdir(srcpath):
shutil.copytree(srcpath, dest)
else:
shutil.copyfile(srcpath, dest)
new_files.append(dest)
return new_files
def copyFile(src, dst_folder, dst_name=None):
'''Copies src file from test/resources folder to dst_folder
renamed to dst_name if provided
'''
srcpath = file_path(src)
if not dst_name:
dst_name = os.path.split(src)[1]
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
dst = os.path.join(dst_folder, dst_name)
shutil.copyfile(srcpath, dst)
return dst
def createFile(name, dst_folder, content):
path = os.path.join(dst_folder, name)
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
with open(path, 'w+') as f:
f.write(content)
return path
def removeFolderContents(path):
'''Recursively deletes all content in given directory'''
for root, dirs, files in os.walk(path):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
def search_pattern_and_replace(path, pattern, replacement):
'''Performs inline search and replace in given file'''
import fileinput, re
for line in fileinput.FileInput(path, inplace=1):
line = re.sub(pattern, replacement, line)
print line, # DO NOT REMOVE THIS PRINT, it is necessary for replace to work
def copy_directory(origin, dest):
shutil.copytree(origin, dest)
return dest
import filecmp
import os.path
def are_dir_trees_equal(dir1, dir2):
"""
Compare two directories recursively. Files in each directory are
assumed to be equal if their names and contents are equal.
@param dir1: First directory path
@param dir2: Second directory path
@return: True if the directory trees are the same and
there were no errors while accessing the directories or files,
False otherwise.
"""
dirs_cmp = filecmp.dircmp(dir1, dir2)
if len(dirs_cmp.left_only) > 0 or len(dirs_cmp.right_only) > 0 or \
len(dirs_cmp.funny_files) > 0:
return False
(_, mismatch, errors) = filecmp.cmpfiles(
dir1, dir2, dirs_cmp.common_files, shallow=False)
if len(mismatch) > 0 or len(errors) > 0:
return False
for common_dir in dirs_cmp.common_dirs:
new_dir1 = os.path.join(dir1, common_dir)
new_dir2 = os.path.join(dir2, common_dir)
if not are_dir_trees_equal(new_dir1, new_dir2):
return False
return True
def replace_content(folder, file_name, tag, tag_content):
""" Replace content from folder/file_name of tag with tag content."""
file_path = os.path.join(folder, file_name)
content = read(file_path)
content = content.replace(tag, tag_content)
return write(file_path, content)
| zhangf911/common | test/testfileutils.py | Python | mit | 4,715 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core import AzCommandsLoader
import azure.cli.command_modules.sql._help # pylint: disable=unused-import
class SqlCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
from azure.cli.core.commands import CliCommandType
from azure.cli.core.profiles import ResourceType
sql_custom = CliCommandType(operations_tmpl='azure.cli.command_modules.sql.custom#{}')
super(SqlCommandsLoader, self).__init__(cli_ctx=cli_ctx,
custom_command_type=sql_custom,
resource_type=ResourceType.MGMT_SQL)
def load_command_table(self, args):
from azure.cli.command_modules.sql.commands import load_command_table
load_command_table(self, args)
return self.command_table
def load_arguments(self, command):
from azure.cli.command_modules.sql._params import load_arguments
load_arguments(self, command)
COMMAND_LOADER_CLS = SqlCommandsLoader
| yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/sql/__init__.py | Python | mit | 1,376 |
import os.path
from pipeline.conf import settings
from pipeline.compilers import SubProcessCompiler
class LessCompiler(SubProcessCompiler):
output_extension = 'css'
def match_file(self, filename):
return filename.endswith('.less')
def compile_file(self, content, path):
command = '%s %s %s' % (
settings.PIPELINE_LESS_BINARY,
settings.PIPELINE_LESS_ARGUMENTS,
path
)
cwd = os.path.dirname(path)
content = self.execute_command(command, cwd=cwd)
return content
| pdr/django-pipeline | pipeline/compilers/less.py | Python | mit | 560 |
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
pipe2py.modules.pipeurlinput
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
http://pipes.yahoo.com/pipes/docs?doc=user_inputs#URL
"""
from pipe2py.lib import utils
def pipe_urlinput(context=None, _INPUT=None, conf=None, **kwargs):
"""An input that prompts the user for a url and yields it forever.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : unused
conf : {
'name': {'value': 'parameter name'},
'prompt': {'value': 'User prompt'},
'default': {'value': 'default value'},
'debug': {'value': 'debug value'}
}
Yields
------
_OUTPUT : url
"""
value = utils.get_input(context, conf)
value = utils.url_quote(value)
while True:
yield value
| klyap/pipe2py | pipe2py/modules/pipeurlinput.py | Python | gpl-2.0 | 822 |
#!/usr/bin/env python
"""buildpkg.py -- Build OS X packages for Apple's Installer.app.
This is an experimental command-line tool for building packages to be
installed with the Mac OS X Installer.app application.
It is much inspired by Apple's GUI tool called PackageMaker.app, that
seems to be part of the OS X developer tools installed in the folder
/Developer/Applications. But apparently there are other free tools to
do the same thing which are also named PackageMaker like Brian Hill's
one:
http://personalpages.tds.net/~brian_hill/packagemaker.html
Beware of the multi-package features of Installer.app (which are not
yet supported here) that can potentially screw-up your installation
and are discussed in these articles on Stepwise:
http://www.stepwise.com/Articles/Technical/Packages/InstallerWoes.html
http://www.stepwise.com/Articles/Technical/Packages/InstallerOnX.html
Beside using the PackageMaker class directly, by importing it inside
another module, say, there are additional ways of using this module:
the top-level buildPackage() function provides a shortcut to the same
feature and is also called when using this module from the command-
line.
****************************************************************
NOTE: For now you should be able to run this even on a non-OS X
system and get something similar to a package, but without
the real archive (needs pax) and bom files (needs mkbom)
inside! This is only for providing a chance for testing to
folks without OS X.
****************************************************************
TODO:
- test pre-process and post-process scripts (Python ones?)
- handle multi-volume packages (?)
- integrate into distutils (?)
Dinu C. Gherman,
[email protected]
November 2001
!! USE AT YOUR OWN RISK !!
"""
__version__ = 0.2
__license__ = "FreeBSD"
import os, sys, glob, fnmatch, shutil, string, copy, getopt
from os.path import basename, dirname, join, islink, isdir, isfile
Error = "buildpkg.Error"
PKG_INFO_FIELDS = """\
Title
Version
Description
DefaultLocation
DeleteWarning
NeedsAuthorization
DisableStop
UseUserMask
Application
Relocatable
Required
InstallOnly
RequiresReboot
RootVolumeOnly
LongFilenames
LibrarySubdirectory
AllowBackRev
OverwritePermissions
InstallFat\
"""
######################################################################
# Helpers
######################################################################
# Convenience class, as suggested by /F.
class GlobDirectoryWalker:
"A forward iterator that traverses files in a directory tree."
def __init__(self, directory, pattern="*"):
self.stack = [directory]
self.pattern = pattern
self.files = []
self.index = 0
def __getitem__(self, index):
while 1:
try:
file = self.files[self.index]
self.index = self.index + 1
except IndexError:
# pop next directory from stack
self.directory = self.stack.pop()
self.files = os.listdir(self.directory)
self.index = 0
else:
# got a filename
fullname = join(self.directory, file)
if isdir(fullname) and not islink(fullname):
self.stack.append(fullname)
if fnmatch.fnmatch(file, self.pattern):
return fullname
######################################################################
# The real thing
######################################################################
class PackageMaker:
"""A class to generate packages for Mac OS X.
This is intended to create OS X packages (with extension .pkg)
containing archives of arbitrary files that the Installer.app
will be able to handle.
As of now, PackageMaker instances need to be created with the
title, version and description of the package to be built.
The package is built after calling the instance method
build(root, **options). It has the same name as the constructor's
title argument plus a '.pkg' extension and is located in the same
parent folder that contains the root folder.
E.g. this will create a package folder /my/space/distutils.pkg/:
pm = PackageMaker("distutils", "1.0.2", "Python distutils.")
pm.build("/my/space/distutils")
"""
packageInfoDefaults = {
'Title': None,
'Version': None,
'Description': '',
'DefaultLocation': '/',
'DeleteWarning': '',
'NeedsAuthorization': 'NO',
'DisableStop': 'NO',
'UseUserMask': 'YES',
'Application': 'NO',
'Relocatable': 'YES',
'Required': 'NO',
'InstallOnly': 'NO',
'RequiresReboot': 'NO',
'RootVolumeOnly' : 'NO',
'InstallFat': 'NO',
'LongFilenames': 'YES',
'LibrarySubdirectory': 'Standard',
'AllowBackRev': 'YES',
'OverwritePermissions': 'NO',
}
def __init__(self, title, version, desc):
"Init. with mandatory title/version/description arguments."
info = {"Title": title, "Version": version, "Description": desc}
self.packageInfo = copy.deepcopy(self.packageInfoDefaults)
self.packageInfo.update(info)
# variables set later
self.packageRootFolder = None
self.packageResourceFolder = None
self.sourceFolder = None
self.resourceFolder = None
def build(self, root, resources=None, **options):
"""Create a package for some given root folder.
With no 'resources' argument set it is assumed to be the same
as the root directory. Option items replace the default ones
in the package info.
"""
# set folder attributes
self.sourceFolder = root
if resources == None:
self.resourceFolder = root
else:
self.resourceFolder = resources
# replace default option settings with user ones if provided
fields = self. packageInfoDefaults.keys()
for k, v in options.items():
if k in fields:
self.packageInfo[k] = v
elif not k in ["OutputDir"]:
raise Error, "Unknown package option: %s" % k
# Check where we should leave the output. Default is current directory
outputdir = options.get("OutputDir", os.getcwd())
packageName = self.packageInfo["Title"]
self.PackageRootFolder = os.path.join(outputdir, packageName + ".pkg")
# do what needs to be done
self._makeFolders()
self._addInfo()
self._addBom()
self._addArchive()
self._addResources()
self._addSizes()
self._addLoc()
def _makeFolders(self):
"Create package folder structure."
# Not sure if the package name should contain the version or not...
# packageName = "%s-%s" % (self.packageInfo["Title"],
# self.packageInfo["Version"]) # ??
contFolder = join(self.PackageRootFolder, "Contents")
self.packageResourceFolder = join(contFolder, "Resources")
os.mkdir(self.PackageRootFolder)
os.mkdir(contFolder)
os.mkdir(self.packageResourceFolder)
def _addInfo(self):
"Write .info file containing installing options."
# Not sure if options in PKG_INFO_FIELDS are complete...
info = ""
for f in string.split(PKG_INFO_FIELDS, "\n"):
if self.packageInfo.has_key(f):
info = info + "%s %%(%s)s\n" % (f, f)
info = info % self.packageInfo
base = self.packageInfo["Title"] + ".info"
path = join(self.packageResourceFolder, base)
f = open(path, "w")
f.write(info)
def _addBom(self):
"Write .bom file containing 'Bill of Materials'."
# Currently ignores if the 'mkbom' tool is not available.
try:
base = self.packageInfo["Title"] + ".bom"
bomPath = join(self.packageResourceFolder, base)
cmd = "mkbom %s %s" % (self.sourceFolder, bomPath)
res = os.system(cmd)
except:
pass
def _addArchive(self):
"Write .pax.gz file, a compressed archive using pax/gzip."
# Currently ignores if the 'pax' tool is not available.
cwd = os.getcwd()
# create archive
os.chdir(self.sourceFolder)
base = basename(self.packageInfo["Title"]) + ".pax"
self.archPath = join(self.packageResourceFolder, base)
cmd = "pax -w -f %s %s" % (self.archPath, ".")
res = os.system(cmd)
# compress archive
cmd = "gzip %s" % self.archPath
res = os.system(cmd)
os.chdir(cwd)
def _addResources(self):
"Add Welcome/ReadMe/License files, .lproj folders and scripts."
# Currently we just copy everything that matches the allowed
# filenames. So, it's left to Installer.app to deal with the
# same file available in multiple formats...
if not self.resourceFolder:
return
# find candidate resource files (txt html rtf rtfd/ or lproj/)
allFiles = []
for pat in string.split("*.txt *.html *.rtf *.rtfd *.lproj", " "):
pattern = join(self.resourceFolder, pat)
allFiles = allFiles + glob.glob(pattern)
# find pre-process and post-process scripts
# naming convention: packageName.{pre,post}_{upgrade,install}
# Alternatively the filenames can be {pre,post}_{upgrade,install}
# in which case we prepend the package name
packageName = self.packageInfo["Title"]
for pat in ("*upgrade", "*install", "*flight"):
pattern = join(self.resourceFolder, packageName + pat)
pattern2 = join(self.resourceFolder, pat)
allFiles = allFiles + glob.glob(pattern)
allFiles = allFiles + glob.glob(pattern2)
# check name patterns
files = []
for f in allFiles:
for s in ("Welcome", "License", "ReadMe"):
if string.find(basename(f), s) == 0:
files.append((f, f))
if f[-6:] == ".lproj":
files.append((f, f))
elif basename(f) in ["pre_upgrade", "pre_install", "post_upgrade", "post_install"]:
files.append((f, packageName+"."+basename(f)))
elif basename(f) in ["preflight", "postflight"]:
files.append((f, f))
elif f[-8:] == "_upgrade":
files.append((f,f))
elif f[-8:] == "_install":
files.append((f,f))
# copy files
for src, dst in files:
src = basename(src)
dst = basename(dst)
f = join(self.resourceFolder, src)
if isfile(f):
shutil.copy(f, os.path.join(self.packageResourceFolder, dst))
elif isdir(f):
# special case for .rtfd and .lproj folders...
d = join(self.packageResourceFolder, dst)
os.mkdir(d)
files = GlobDirectoryWalker(f)
for file in files:
shutil.copy(file, d)
def _addSizes(self):
"Write .sizes file with info about number and size of files."
# Not sure if this is correct, but 'installedSize' and
# 'zippedSize' are now in Bytes. Maybe blocks are needed?
# Well, Installer.app doesn't seem to care anyway, saying
# the installation needs 100+ MB...
numFiles = 0
installedSize = 0
zippedSize = 0
files = GlobDirectoryWalker(self.sourceFolder)
for f in files:
numFiles = numFiles + 1
installedSize = installedSize + os.lstat(f)[6]
try:
zippedSize = os.stat(self.archPath+ ".gz")[6]
except OSError: # ignore error
pass
base = self.packageInfo["Title"] + ".sizes"
f = open(join(self.packageResourceFolder, base), "w")
format = "NumFiles %d\nInstalledSize %d\nCompressedSize %d\n"
f.write(format % (numFiles, installedSize, zippedSize))
def _addLoc(self):
"Write .loc file."
base = self.packageInfo["Title"] + ".loc"
f = open(join(self.packageResourceFolder, base), "w")
f.write('/')
# Shortcut function interface
def buildPackage(*args, **options):
"A Shortcut function for building a package."
o = options
title, version, desc = o["Title"], o["Version"], o["Description"]
pm = PackageMaker(title, version, desc)
apply(pm.build, list(args), options)
######################################################################
# Tests
######################################################################
def test0():
"Vanilla test for the distutils distribution."
pm = PackageMaker("distutils2", "1.0.2", "Python distutils package.")
pm.build("/Users/dinu/Desktop/distutils2")
def test1():
"Test for the reportlab distribution with modified options."
pm = PackageMaker("reportlab", "1.10",
"ReportLab's Open Source PDF toolkit.")
pm.build(root="/Users/dinu/Desktop/reportlab",
DefaultLocation="/Applications/ReportLab",
Relocatable="YES")
def test2():
"Shortcut test for the reportlab distribution with modified options."
buildPackage(
"/Users/dinu/Desktop/reportlab",
Title="reportlab",
Version="1.10",
Description="ReportLab's Open Source PDF toolkit.",
DefaultLocation="/Applications/ReportLab",
Relocatable="YES")
######################################################################
# Command-line interface
######################################################################
def printUsage():
"Print usage message."
format = "Usage: %s <opts1> [<opts2>] <root> [<resources>]"
print format % basename(sys.argv[0])
print
print " with arguments:"
print " (mandatory) root: the package root folder"
print " (optional) resources: the package resources folder"
print
print " and options:"
print " (mandatory) opts1:"
mandatoryKeys = string.split("Title Version Description", " ")
for k in mandatoryKeys:
print " --%s" % k
print " (optional) opts2: (with default values)"
pmDefaults = PackageMaker.packageInfoDefaults
optionalKeys = pmDefaults.keys()
for k in mandatoryKeys:
optionalKeys.remove(k)
optionalKeys.sort()
maxKeyLen = max(map(len, optionalKeys))
for k in optionalKeys:
format = " --%%s:%s %%s"
format = format % (" " * (maxKeyLen-len(k)))
print format % (k, repr(pmDefaults[k]))
def main():
"Command-line interface."
shortOpts = ""
keys = PackageMaker.packageInfoDefaults.keys()
longOpts = map(lambda k: k+"=", keys)
try:
opts, args = getopt.getopt(sys.argv[1:], shortOpts, longOpts)
except getopt.GetoptError, details:
print details
printUsage()
return
optsDict = {}
for k, v in opts:
optsDict[k[2:]] = v
ok = optsDict.keys()
if not (1 <= len(args) <= 2):
print "No argument given!"
elif not ("Title" in ok and \
"Version" in ok and \
"Description" in ok):
print "Missing mandatory option!"
else:
apply(buildPackage, args, optsDict)
return
printUsage()
# sample use:
# buildpkg.py --Title=distutils \
# --Version=1.0.2 \
# --Description="Python distutils package." \
# /Users/dinu/Desktop/distutils
if __name__ == "__main__":
main()
| xbmc/atv2 | xbmc/lib/libPython/Python/Mac/scripts/buildpkg.py | Python | gpl-2.0 | 15,904 |
from rsf.proj import *
from math import *
import fdmod,pcsutil,wefd
def data(par):
# ------------------------------------------------------------
Fetch('vp_marmousi-ii.segy',"marm2")
Fetch('vs_marmousi-ii.segy',"marm2")
Fetch('density_marmousi-ii.segy',"marm2")
# ------------------------------------------------------------
for file in ('vp','vs','ro'):
if(file=='ro'):
ifile='density_marmousi-ii.segy'
else:
ifile=file+'_marmousi-ii.segy'
Flow(['z'+file,'t'+file,'./s'+file,'./b'+file],ifile,
'''
segyread tape=$SOURCE
tfile=${TARGETS[1]}
hfile=${TARGETS[2]}
bfile=${TARGETS[3]}
''',stdin=0)
Flow('_'+file,'z'+file,
'''
put
o1=0 d1=0.001249 label1=%(lz)s unit1=%(uz)s
o2=0 d2=0.001249 label2=%(lx)s unit2=%(ux)s |
window j1=2 j2=2
''' % par)
if(file=='ro'):
Flow(file+'raw','_'+file,'window n1=%(nz)d n2=%(nx)d min1=%(oz)g min2=%(ox)g | scale rscale=1000000' % par)
else:
Flow(file+'raw','_'+file,'window n1=%(nz)d n2=%(nx)d min1=%(oz)g min2=%(ox)g' % par)
# ------------------------------------------------------------
Flow( 'wmask','vpraw','mask max=1.5 | dd type=float')
# Result('wmask',fdmod.cgrey('allpos=y',par))
Flow('rx','vpraw','math output="1.0e6+1.5e6*(input-1.5)/3" ')
Flow('ro','roraw','math output=1')
Flow('vp','vpraw','smooth rect1=35 rect2=35 repeat=5')
Flow('vs','vp wmask','scale rscale=0.5 | math w=${SOURCES[1]} output="input*(1-w)"')
# velocity ratio at cig location x
Flow('vratio1_1','vp vp','add mode=d ${SOURCES[1]}');
Flow('vratio1_2','vp vs','add mode=d ${SOURCES[1]}');
Flow('vratio2_1','vs vp','add mode=d ${SOURCES[1]}');
Flow('vratio2_2','vs vs','add mode=d ${SOURCES[1]}');
Flow('vratio','vratio1_1 vratio1_2 vratio2_1 vratio2_2',
'''
cat axis=3 space=n ${SOURCES[0:4]}
''',stdin=0)
def mask(mask,xsou,tmin,tmax,par):
dipline1(mask+'ml',
0.15+tmin,par['xmin'],
0.15,xsou,
0,1,
par['nt'],par['ot'],par['dt'],
par['nx'],par['ox'],par['dx'])
dipline1(mask+'mr',
0.15,xsou,
0.15+tmax,par['xmax'],
0,1,
par['nt'],par['ot'],par['dt'],
par['nx'],par['ox'],par['dx'])
Flow(mask,[mask+'ml',mask+'mr'],
'''
spike nsp=1 mag=1.0
n1=%(nx)d o1=%(ox)g d1=%(dx)g k1=%(ltap)d l1=%(rtap)d
n2=%(nt)d o2=%(ot)g d2=%(dt)g |
smooth rect1=100 repeat=1 |
scale axis=123 |
transp |
add mode=p ${SOURCES[0]} |
add mode=p ${SOURCES[1]} |
transp |
smooth rect2=100 repeat=3 |
put label1=x label2=t unit1=km unit2=s |
spray axis=3 n=2 o=0 d=1 |
transp plane=23
''' % par)
Result(mask,
'window n2=1 | transp|' + fdmod.dgrey('',par))
def dip(dip,img,par):
Flow( dip,img,'dip rect1=40 rect2=40 order=3 liter=100 verb=y ')
Result(dip,fdmod.cgrey('color=j wantscalebar=n',par))
def psang(x,img,dip,vpvs,tag,par):
#dip angle at cig location x
Flow( dip+'-one',dip,'window n2=1 min2=%g'%x)
#vpvs ratio at cig location x
Flow('vratioPP',vpvs,'window n3=1 f3=0 n2=1 min2=%g'%x)
Flow('vratioPS',vpvs,'window n3=1 f3=1 n2=1 min2=%g'%x)
Flow('vratioSP',vpvs,'window n3=1 f3=2 n2=1 min2=%g'%x)
Flow('vratioSS',vpvs,'window n3=1 f3=3 n2=1 min2=%g'%x)
nhx=200
nhz=0
nht=0
wefd.elaps('S'+tag,
img+tag+'_ds',
img+tag+'_dr',
nhx,nhz,nht,
dip+'-one',x,par)
def dipline1(mod,s1,s2,e1,e2,vi,vt,n1,o1,d1,n2,o2,d2):
min1=o1
max1=o1+(n1-1)*d1
min2=o2
max2=o2+(n2-1)*d2
ra = (e1-s1)/(e2-s2)
vels = "%s,%s,%s" %(vi,vt,vt)
drvs = "%s,%s" %(tan(ra),tan(ra))
dim1 = 'd1=%g o1=%g n1=%d' % (d2,o2,n2)
dim2 = 'd2=%g o2=%g n2=%d' % (d1,o1,n1)
Flow(mod+'lay2',None,
'''
spike nsp=4 mag=%g,%g,%g,%g
n1=4 n2=1 k1=1,2,3,4 |
put n1=2 n2=2 |
spline %s fp=%s
'''%(min2,min1,max2,max1,dim1,drvs))
Flow(mod+'lay1',None,
'''
spike nsp=4 mag=%g,%g,%g,%g
n1=4 n2=1 k1=1,2,3,4 |
put n1=2 n2=2 |
spline %s fp=%s
'''%(s2,s1,e2,e1,dim1,drvs))
Flow( mod+'layers',[mod+'lay1',mod+'lay2'],'cat axis=2 ${SOURCES[1:2]}')
Flow(mod,mod+'layers',
'''
unif2 v00=%s n1=%d d1=%g o1=%g
''' % (vels,n1,d1,o1) )
| zxtstarry/src | book/cwp/geo2008IsotropicAngleDomainElasticRTM/marm2allA/marm2.py | Python | gpl-2.0 | 4,919 |
#!/usr/bin/env python
#
# Copyright (c) 2020 by VMware, Inc. ("VMware")
# Used Copyright (c) 2018 by Network Device Education Foundation,
# Inc. ("NetDEF") in this file.
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
"""
Following tests are covered to test BGP Multi-VRF:
FUNC_1:
Within each VRF, each address must be unambiguous on DUT.
FUNC_2:
Different VRFs can have ambiguous/overlapping
addresses on DUT.
FUNC_3:
Create static routes(IPv4+IPv6) associated to specific VRFs
and verify on DUT that same prefixes are present in corresponding
routing table.
FUNC_4_&_5:
Each VRF should be mapped with a unique VLAN on DUT
for traffic segregation, when using a single physical interface.
FUNC_6:
Advertise same set of prefixes from different VRFs
and verify on remote router that these prefixes are not
leaking to each other
FUNC_7:
Redistribute Static routes and verify on remote routers
that routes are advertised within specific VRF instance, which
those static routes belong to.
FUNC_8:
Test end to end traffic isolation based on VRF tables.
FUNC_9:
Use static routes for inter-vrf communication
(route-leaking) on DUT.
FUNC_10:
Verify intra-vrf and inter-vrf communication between
iBGP peers.
FUNC_11:
Verify intra-vrf and inter-vrf communication
between eBGP peers.
FUNC_12_a:
Configure route-maps within a VRF, to alter BGP attributes.
Verify that route-map doesn't affect any other VRF instances'
routing on DUT.
FUNC_12_b:
Configure route-maps within a VRF, to alter BGP attributes.
Verify that route-map doesn't affect any other VRF instances'
routing on DUT.
FUNC_12_c:
Configure route-maps within a VRF, to alter BGP attributes.
Verify that route-map doesn't affect any other VRF instances'
routing on DUT.
FUNC_12_d:
Configure route-maps within a VRF, to alter BGP attributes.
Verify that route-map doesn't affect any other VRF instances'
routing on DUT.
FUNC_12_e:
Configure route-maps within a VRF, to alter BGP attributes.
Verify that route-map doesn't affect any other VRF instances'
routing on DUT.
FUNC_12_f:
Configure route-maps within a VRF, to alter BGP attributes.
Verify that route-map doesn't affect any other VRF instances'
routing on DUT.
FUNC_13:
Configure a route-map on DUT to match traffic based
on a VRF interfaces.
FUNC_14:
Test VRF-lite with Static+BGP originated routes.
FUNC_15:
Configure prefix-lists on DUT and apply to BGP peers to
permit/deny prefixes.
FUNC_16_1:
Configure a route-map on DUT to match traffic based various
match/set causes.
FUNC_16_2:
Configure a route-map on DUT to match traffic based various
match/set causes.
FUNC_16_3:
Configure a route-map on DUT to match traffic based various
match/set causes.
"""
import os
import sys
import time
import pytest
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
sys.path.append(os.path.join(CWD, "../lib/"))
# Required to instantiate the topology builder class.
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
from lib.topotest import iproute2_is_vrf_capable
from lib.common_config import (
step,
verify_rib,
start_topology,
write_test_header,
check_address_types,
write_test_footer,
reset_config_on_routers,
create_route_maps,
create_static_routes,
create_prefix_lists,
create_interface_in_kernel,
create_bgp_community_lists,
check_router_status,
apply_raw_config,
required_linux_kernel_version,
)
from lib.topolog import logger
from lib.bgp import (
verify_bgp_rib,
create_router_bgp,
verify_bgp_community,
verify_bgp_convergence,
verify_best_path_as_per_bgp_attribute,
)
from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
# Global variables
NETWORK1_1 = {"ipv4": "1.1.1.1/32", "ipv6": "1::1/128"}
NETWORK1_2 = {"ipv4": "1.1.1.2/32", "ipv6": "1::2/128"}
NETWORK2_1 = {"ipv4": "2.1.1.1/32", "ipv6": "2::1/128"}
NETWORK2_2 = {"ipv4": "2.1.1.2/32", "ipv6": "2::2/128"}
NETWORK3_1 = {"ipv4": "3.1.1.1/32", "ipv6": "3::1/128"}
NETWORK3_2 = {"ipv4": "3.1.1.2/32", "ipv6": "3::2/128"}
NETWORK4_1 = {"ipv4": "4.1.1.1/32", "ipv6": "4::1/128"}
NETWORK4_2 = {"ipv4": "4.1.1.2/32", "ipv6": "4::2/128"}
NETWORK5_1 = {"ipv4": "5.1.1.1/32", "ipv6": "5::1/128"}
NETWORK5_2 = {"ipv4": "5.1.1.2/32", "ipv6": "5::2/128"}
NETWORK6_1 = {"ipv4": "6.1.1.1/32", "ipv6": "6::1/128"}
NETWORK6_2 = {"ipv4": "6.1.1.2/32", "ipv6": "6::2/128"}
NETWORK7_1 = {"ipv4": "7.1.1.1/32", "ipv6": "7::1/128"}
NETWORK7_2 = {"ipv4": "7.1.1.2/32", "ipv6": "7::2/128"}
NETWORK8_1 = {"ipv4": "8.1.1.1/32", "ipv6": "8::1/128"}
NETWORK8_2 = {"ipv4": "8.1.1.2/32", "ipv6": "8::2/128"}
NEXT_HOP_IP = {"ipv4": "Null0", "ipv6": "Null0"}
LOOPBACK_1 = {
"ipv4": "10.10.10.10/32",
"ipv6": "10::10:10/128",
}
LOOPBACK_2 = {
"ipv4": "20.20.20.20/32",
"ipv6": "20::20:20/128",
}
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
# Required linux kernel version for this suite to run.
result = required_linux_kernel_version("4.15")
if result is not True:
pytest.skip("Kernel requirements are not met")
# iproute2 needs to support VRFs for this suite to run.
if not iproute2_is_vrf_capable():
pytest.skip("Installed iproute2 version does not support VRFs")
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: {}".format(testsuite_run_time))
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
json_file = "{}/bgp_multi_vrf_topo1.json".format(CWD)
tgen = Topogen(json_file, mod.__name__)
global topo
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
# to start deamons and then start routers
start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
global BGP_CONVERGENCE
global ADDR_TYPES
ADDR_TYPES = check_address_types()
BGP_CONVERGENCE = verify_bgp_convergence(tgen, topo)
assert BGP_CONVERGENCE is True, "setup_module : Failed \n Error: {}".format(
BGP_CONVERGENCE
)
logger.info("Running setup_module() done")
def teardown_module():
"""Teardown the pytest environment"""
logger.info("Running teardown_module to delete topology")
tgen = get_topogen()
# Stop toplogy and Remove tmp files
tgen.stop_topology()
logger.info(
"Testsuite end time: {}".format(time.asctime(time.localtime(time.time())))
)
logger.info("=" * 40)
#####################################################
#
# Testcases
#
#####################################################
def test_address_unambiguous_within_each_vrf_p0(request):
"""
FUNC_1:
Within each VRF, each address must be unambiguous on DUT.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
if tgen.routers_have_failure():
check_router_status(tgen)
step("Configure a set of static routes(IPv4+IPv6) in " "RED_A on router RED-1")
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": NETWORK1_1[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
}
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Configure the same static routes(IPv4+IPv6) with a TAG value"
"of 500 in RED_A on router RED-1"
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"red1": {
"static_routes": [
{
"network": NETWORK1_1[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
"tag": 500,
"vrf": "RED_A",
}
]
}
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {
"red1": {
"bgp": {
"local_as": "500",
"vrf": "RED_A",
"address_family": {
"ipv4": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
"ipv6": {"unicast": {"redistribute": [{"redist_type": "static"}]}},
},
}
}
}
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that static routes(IPv4+IPv6) is overridden and doesn't"
" have duplicate entries within VRF RED_A on router RED-1"
)
for addr_type in ADDR_TYPES:
dut = "red1"
input_dict_2 = {
"red1": {
"static_routes": [
{
"network": NETWORK1_1[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
"tag": 500,
"vrf": "RED_A",
}
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_2, tag=500)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
step("Make sure routes are not present in global routing table")
for addr_type in ADDR_TYPES:
dut = "red1"
input_dict_2 = {
"red1": {
"static_routes": [
{
"network": NETWORK1_1[addr_type],
"next_hop": NEXT_HOP_IP[addr_type],
}
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_2, expected=False)
assert result is not True, (
"Testcase {} : Failed \n Expected Behaviour: Routes are not "
"present on Global Routing table \n Error {}".format(tc_name, result)
)
write_test_footer(tc_name)
def test_ambiguous_overlapping_addresses_in_different_vrfs_p0(request):
"""
FUNC_2:
Different VRFs can have ambiguous/overlapping
addresses on DUT.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step("Configure a set of static routes(IPv4+IPv6) in vrf RED_A" "on router RED-1")
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
}
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Configure the same static routes(IPv4+IPv6) with a"
" TAG value of 500 in vrf RED_B on router RED-1"
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"tag": 500,
"vrf": "RED_B",
}
]
}
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["red1"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
if "red" in dut:
VRFS = ["RED_A", "RED_B"]
AS_NUM = [500, 500]
elif "blue" in dut:
VRFS = ["BLUE_A", "BLUE_B"]
AS_NUM = [800, 800]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify that RED_A has the static routes without any" " TAG value")
for addr_type in ADDR_TYPES:
dut = "red1"
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
}
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, dut, input_dict_1, tag=500, expected=False)
assert result is not True, (
"Testcase {} : Failed \n "
"Routes are present with tag value 500 \n Error: {}".format(tc_name, result)
)
logger.info("Expected Behavior: {}".format(result))
step(
"Verify that RED_B has the same routes with TAG value "
"500 on same device RED-1"
)
for addr_type in ADDR_TYPES:
dut = "red1"
input_dict_2 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"tag": 500,
"vrf": "RED_B",
}
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_2, tag=500)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
step("Make sure routes are not present in global routing table")
for addr_type in ADDR_TYPES:
dut = "red1"
input_dict_2 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
}
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_2, expected=False)
assert result is not True, (
"Testcase {} : Failed \n Expected Behaviour: Routes are not "
"present on Global Routing table \n Error {}".format(tc_name, result)
)
write_test_footer(tc_name)
def test_static_routes_associated_to_specific_vrfs_p0(request):
"""
FUNC_3:
Create static routes(IPv4+IPv6) associated to specific VRFs
and verify on DUT that same prefixes are present in corresponding
routing table.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Configure a set of unique static(IPv4+IPv6) routes in vrf"
" RED_A on router RED-1"
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Configure set of unique static routes(IPv4+IPv6) in vrf "
"RED_B on router RED-1"
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["red1", "blue1"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
if "red" in dut:
VRFS = ["RED_A", "RED_B"]
AS_NUM = [500, 500]
elif "blue" in dut:
VRFS = ["BLUE_A", "BLUE_B"]
AS_NUM = [800, 800]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that static routes 1.x.x.x/32 and 1::x/128 appear " "in VRF RED_A table"
)
step(
"Verify that static routes 2.x.x.x/32 and 2::x/128 appear " "in VRF RED_B table"
)
for addr_type in ADDR_TYPES:
dut = "red1"
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
step(
"Verify that static routes 1.x.x.x/32 and 1::x/128 appear "
"in VRF BLUE_A table"
)
step(
"Verify that static routes 2.x.x.x/32 and 2::x/128 appear "
"in VRF BLUE_B table"
)
for addr_type in ADDR_TYPES:
dut = "blue1"
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
step("Make sure routes are not present in global routing table")
for addr_type in ADDR_TYPES:
dut = "blue1"
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_2, expected=False)
assert result is not True, (
"Testcase {} : Failed \n Expected Behaviour: Routes are not "
"present on Global Routing table \n Error {}".format(tc_name, result)
)
write_test_footer(tc_name)
def test_vrf_with_unique_physical_interface_p0(request):
"""
FUNC_4_&_5:
Each VRF should be mapped with a unique VLAN on DUT
for traffic segregation, when using a single physical interface.
Each VRF should be mapped to a unique physical
interface(without VLAN tagging) on DUT for traffic segregation.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"R1 is receiving routes in 4 VRFs instances "
"(RED_A, RED_B, BLUE_A, BLUE_B) from RED_1 and BLUE_1."
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Advertise a set of unique BGP prefixes(IPv4+IPv6) from "
"routers RED_1 & BLUE_1 in each VRF using static redistribution"
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK3_1[addr_type]] + [NETWORK3_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK4_1[addr_type]] + [NETWORK4_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["red1", "blue1"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
if "red" in dut:
VRFS = ["RED_A", "RED_B"]
AS_NUM = [500, 500]
elif "blue" in dut:
VRFS = ["BLUE_A", "BLUE_B"]
AS_NUM = [800, 800]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Each VRF table on R2 should maintain it's associated "
"routes and and accordingly install in zebra"
)
for addr_type in ADDR_TYPES:
dut = "r2"
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK3_1[addr_type]] + [NETWORK3_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK4_1[addr_type]] + [NETWORK4_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, dut, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_prefixes_leaking_p0(request):
"""
FUNC_6:
Advertise same set of prefixes from different VRFs
and verify on remote router that these prefixes are not
leaking to each other
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step("Configure a set of static routes(IPv4+IPv6) in vrf " "RED_A on router RED-1")
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
}
]
},
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
}
]
},
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Configure a set of static routes(IPv4+IPv6) in vrf " "BLUE_A on router BLUE-1"
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"red1": {
"static_routes": [
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
}
]
},
"blue1": {
"static_routes": [
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
}
]
},
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Configure the same set of static routes with a "
"metric value of 123 in vrf RED_B on router RED-1"
)
step(
"Configure the same set of static routes with a "
"metric value of 123 in vrf BLUE_B on router BLUE-1"
)
input_dict_3 = {
"red1": {
"bgp": [
{
"local_as": "500",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
},
{
"local_as": "500",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {
"redistribute": [
{
"redist_type": "static",
"attribute": {"metric": 123},
}
]
}
},
"ipv6": {
"unicast": {
"redistribute": [
{
"redist_type": "static",
"attribute": {"metric": 123},
}
]
}
},
},
},
]
},
"blue1": {
"bgp": [
{
"local_as": "800",
"vrf": "BLUE_A",
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
},
{
"local_as": "800",
"vrf": "BLUE_B",
"address_family": {
"ipv4": {
"unicast": {
"redistribute": [
{
"redist_type": "static",
"attribute": {"metric": 123},
}
]
}
},
"ipv6": {
"unicast": {
"redistribute": [
{
"redist_type": "static",
"attribute": {"metric": 123},
}
]
}
},
},
},
]
},
}
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify on R1 that RED_A doesn't receive any static "
"route with metric value 123"
)
for addr_type in ADDR_TYPES:
dut = "r1"
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
}
]
},
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
}
]
},
}
input_dict_2 = {
"red1": {
"static_routes": [
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
}
]
},
"blue1": {
"static_routes": [
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
}
]
},
}
result = verify_rib(tgen, addr_type, dut, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(
tgen, addr_type, dut, input_dict_1, metric=123, expected=False
)
assert result is not True, (
"Testcase {} : Failed \n "
"Routes are present with metric value 123 \n Error: {}".format(
tc_name, result
)
)
logger.info("Expected Behavior: {}".format(result))
result = verify_rib(tgen, addr_type, dut, input_dict_2, metric=123)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(
tgen, addr_type, dut, input_dict_2, metric=0, expected=False
)
assert result is not True, (
"Testcase {} : Failed \n "
"Routes are present with metric value 0 \n Error: {}".format(
tc_name, result
)
)
logger.info("Expected Behavior: {}".format(result))
write_test_footer(tc_name)
def test_static_routes_advertised_within_specific_vrf_p0(request):
"""
FUNC_7:
Redistribute Static routes and verify on remote routers
that routes are advertised within specific VRF instance, which
those static routes belong to.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Advertise a set of unique BGP prefixes(IPv4+IPv6) "
"through static redistribution into VRF RED_A and RED_B"
" from router RED-1."
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Advertise same as above set of BGP prefixes(IPv4+IPv6) "
"through static redistribution into VRF BLUE_A and BLUE_B"
" from router BLUE-1."
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["red1", "blue1"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
if "red" in dut:
VRFS = ["RED_A", "RED_B"]
AS_NUM = [500, 500]
elif "blue" in dut:
VRFS = ["BLUE_A", "BLUE_B"]
AS_NUM = [800, 800]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that static routes are installed into vrfs RED_A"
"and RED_B tables only, not in global routing table of RED_1"
)
for addr_type in ADDR_TYPES:
dut = "red1"
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_1, protocol="static")
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
step(
"Verify that static routes are installed into vrfs BLUE_A and"
"BLUE_B tables only, not in global routing table of BLUE_1."
)
for addr_type in ADDR_TYPES:
dut = "blue1"
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_2, protocol="static")
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
step(
"Verify on router R1, that each set of prefixes is received"
" into associated vrf tables only."
)
result = verify_bgp_convergence(tgen, topo)
assert result is True, "Testcase {} : Failed \n Error {}".format(tc_name, result)
for addr_type in ADDR_TYPES:
dut = "r1"
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, dut, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_end_to_end_traffic_isolation_p0(request):
"""
FUNC_8:
Test end to end traffic isolation based on VRF tables.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Advertise unique BGP prefixes(IPv4+IPv6) from RED_1 "
"in vrf instances(RED_A and RED_B)."
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Advertise unique BGP prefixes(IPv4+IPv6) from from BLUE_1 in"
" vrf instances(BLUE_A and BLUE_B)."
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK3_1[addr_type]] + [NETWORK3_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK4_1[addr_type]] + [NETWORK4_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["red1", "blue1"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
if "red" in dut:
VRFS = ["RED_A", "RED_B"]
AS_NUM = [500, 500]
elif "blue" in dut:
VRFS = ["BLUE_A", "BLUE_B"]
AS_NUM = [800, 800]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Use below commands to send prefixes with as-path prepend"
"VRF BLUE_A and BLUE_B from router BLUE-1."
)
for addr_type in ADDR_TYPES:
input_dict_4 = {
"blue1": {
"route_maps": {
"ASP_{}".format(addr_type): [
{
"action": "permit",
"set": {"path": {"as_num": 123, "as_action": "prepend"}},
}
]
}
}
}
result = create_route_maps(tgen, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Apply route-map to neighbours")
input_dict_5 = {
"blue1": {
"bgp": [
{
"local_as": "800",
"vrf": "BLUE_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"blue1-link1": {
"route_maps": [
{
"name": "ASP_ipv4",
"direction": "out",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"blue1-link1": {
"route_maps": [
{
"name": "ASP_ipv6",
"direction": "out",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "800",
"vrf": "BLUE_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"blue1-link2": {
"route_maps": [
{
"name": "ASP_ipv4",
"direction": "out",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"blue1-link2": {
"route_maps": [
{
"name": "ASP_ipv6",
"direction": "out",
}
]
}
}
}
}
}
},
},
},
]
}
}
result = create_router_bgp(tgen, topo, input_dict_5)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify on R1 that BLUE_A and BLUE_B VRFs are receiving the"
" prefixes with as-path 123 prepended."
)
for addr_type in ADDR_TYPES:
dut = "r1"
input_dict_6 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK3_1[addr_type]] + [NETWORK3_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
]
}
}
result = verify_bgp_rib(tgen, addr_type, dut, input_dict_6)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, dut, input_dict_6)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
dut = "r1"
input_dict_7 = {
"red1": {
"static_routes": [
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
{
"network": [NETWORK4_1[addr_type]] + [NETWORK4_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = verify_bgp_rib(tgen, addr_type, dut, input_dict_7)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, dut, input_dict_7)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
step(
"Use below commands to send prefixes with as-path prepend VRF"
" BLUE_A and BLUE_B from router BLUE-1."
)
input_dict_6 = {
"red2": {
"bgp": [
{
"local_as": "500",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r3": {
"dest_link": {
"red2-link1": {
"allowas-in": {"number_occurences": 2}
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r3": {
"dest_link": {
"red2-link1": {
"allowas-in": {"number_occurences": 2}
}
}
}
}
}
},
},
},
{
"local_as": "500",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r3": {
"dest_link": {
"red2-link2": {
"allowas-in": {"number_occurences": 2}
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r3": {
"dest_link": {
"red2-link2": {
"allowas-in": {"number_occurences": 2}
}
}
}
}
}
},
},
},
]
},
"blue2": {
"bgp": [
{
"local_as": "800",
"vrf": "BLUE_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r3": {
"dest_link": {
"blue2-link1": {
"allowas-in": {"number_occurences": 2}
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r3": {
"dest_link": {
"blue2-link1": {
"allowas-in": {"number_occurences": 2}
}
}
}
}
}
},
},
},
{
"local_as": "800",
"vrf": "BLUE_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r3": {
"dest_link": {
"blue2-link2": {
"allowas-in": {"number_occurences": 2}
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r3": {
"dest_link": {
"blue2-link2": {
"allowas-in": {"number_occurences": 2}
}
}
}
}
}
},
},
},
]
},
}
result = create_router_bgp(tgen, topo, input_dict_6)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify that router RED-2 receives the prefixes in respective" " VRF tables.")
for addr_type in ADDR_TYPES:
dut = "red2"
input_dict_6 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = verify_bgp_rib(tgen, addr_type, dut, input_dict_6)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_bgp_rib(tgen, addr_type, dut, input_dict_6)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
dut = "blue2"
input_dict_7 = {
"red1": {
"static_routes": [
{
"network": [NETWORK3_1[addr_type]] + [NETWORK3_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK4_1[addr_type]] + [NETWORK4_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = verify_bgp_rib(tgen, addr_type, dut, input_dict_7)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_bgp_rib(tgen, addr_type, dut, input_dict_7)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_static_routes_for_inter_vrf_route_leaking_p0(request):
"""
FUNC_9:
Use static routes for inter-vrf communication
(route-leaking) on DUT.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Configure unique loopback interfaces in VRFs RED_A "
"and RED_B on router RED_1."
)
for addr_type in ADDR_TYPES:
create_interface_in_kernel(
tgen,
"red1",
"loopback1",
LOOPBACK_1[addr_type],
"RED_A",
)
create_interface_in_kernel(
tgen,
"red1",
"loopback2",
LOOPBACK_2[addr_type],
"RED_B",
)
step(
"Create a static routes in vrf RED_B on router RED_1 pointing"
" next-hop as interface's IP in vrf RED_A"
)
intf_red1_r11 = topo["routers"]["red1"]["links"]["r1-link1"]["interface"]
intf_red1_r10 = topo["routers"]["red1"]["links"]["r1-link2"]["interface"]
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": LOOPBACK_1[addr_type],
"interface": intf_red1_r10,
"nexthop_vrf": "RED_B",
"vrf": "RED_A",
},
{
"network": LOOPBACK_2[addr_type],
"interface": intf_red1_r11,
"nexthop_vrf": "RED_A",
"vrf": "RED_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["red1"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
if "red" in dut:
VRFS = ["RED_A", "RED_B"]
AS_NUM = [500, 500]
elif "blue" in dut:
VRFS = ["BLUE_A", "BLUE_B"]
AS_NUM = [800, 800]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that static routes are installed into vrfs RED_A"
"and RED_B tables only, not in global routing table of RED_1"
)
for addr_type in ADDR_TYPES:
dut = "red1"
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": LOOPBACK_1[addr_type],
"interface": intf_red1_r10,
"nexthop_vrf": "RED_B",
"vrf": "RED_A",
},
{
"network": LOOPBACK_2[addr_type],
"interface": intf_red1_r11,
"nexthop_vrf": "RED_A",
"vrf": "RED_B",
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_1, protocol="static")
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_inter_vrf_and_intra_vrf_communication_iBGP_p0(request):
"""
FUNC_10:
Verify intra-vrf and inter-vrf communication between
iBGP peers.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Configure unique loopback IP(IPv4+IPv6) in vrf RED_A on router"
" R1 and advertise it in BGP process using redistribute "
"connected command."
)
for addr_type in ADDR_TYPES:
create_interface_in_kernel(
tgen,
"r1",
"loopback1",
LOOPBACK_1[addr_type],
"RED_A",
)
create_interface_in_kernel(
tgen,
"r1",
"loopback2",
LOOPBACK_2[addr_type],
"BLUE_A",
)
step(
"Create a static routes in vrf RED_B on router RED_1 pointing"
" next-hop as interface's IP in vrf RED_A"
)
intf_r2_r12 = topo["routers"]["r2"]["links"]["r1-link1"]["interface"]
intf_r2_r10 = topo["routers"]["r2"]["links"]["r1-link3"]["interface"]
for addr_type in ADDR_TYPES:
input_dict_1 = {
"r2": {
"static_routes": [
{
"network": LOOPBACK_2[addr_type],
"interface": intf_r2_r10,
"nexthop_vrf": "BLUE_A",
"vrf": "RED_A",
},
{
"network": LOOPBACK_1[addr_type],
"interface": intf_r2_r12,
"nexthop_vrf": "RED_A",
"vrf": "BLUE_A",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute connected..")
input_dict_3 = {}
for dut in ["r1"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
VRFS = ["RED_A", "BLUE_A"]
AS_NUM = [100, 100]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "connected"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "connected"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["r2"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
VRFS = ["RED_A", "BLUE_A"]
AS_NUM = [100, 100]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that static routes are installed into vrfs RED_A"
"and RED_B tables only, not in global routing table of RED_1"
)
for addr_type in ADDR_TYPES:
dut = "r2"
input_dict = {
"r2": {
"static_routes": [
{
"network": LOOPBACK_2[addr_type],
"interface": intf_r2_r10,
"nexthop_vrf": "BLUE_A",
"vrf": "RED_A",
},
{
"network": LOOPBACK_1[addr_type],
"interface": intf_r2_r12,
"nexthop_vrf": "RED_A",
"vrf": "BLUE_A",
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_inter_vrf_and_intra_vrf_communication_eBGP_p0(request):
"""
FUNC_11:
Verify intra-vrf and inter-vrf communication
between eBGP peers.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Configure unique loopback IP(IPv4+IPv6) in vrf RED_A on router"
" R2 and advertise it in BGP process using redistribute "
"connected command."
)
step(
"Configure unique loopback IP(IPv4+IPv6) in vrf BLUE_A on router"
" R2 and advertise it in BGP process using redistribute "
"connected command."
)
for addr_type in ADDR_TYPES:
create_interface_in_kernel(
tgen,
"r2",
"loopback1",
LOOPBACK_1[addr_type],
"RED_A",
)
create_interface_in_kernel(
tgen,
"r2",
"loopback2",
LOOPBACK_2[addr_type],
"BLUE_A",
)
step(
"Create a static routes in vrf RED_B on router RED_1 pointing"
" next-hop as interface's IP in vrf RED_A"
)
intf_r3_r21 = topo["routers"]["r3"]["links"]["r2-link1"]["interface"]
intf_r3_r23 = topo["routers"]["r3"]["links"]["r2-link3"]["interface"]
for addr_type in ADDR_TYPES:
input_dict_1 = {
"r3": {
"static_routes": [
{
"network": LOOPBACK_2[addr_type],
"interface": intf_r3_r23,
"nexthop_vrf": "BLUE_A",
"vrf": "RED_A",
},
{
"network": LOOPBACK_1[addr_type],
"interface": intf_r3_r21,
"nexthop_vrf": "RED_A",
"vrf": "BLUE_A",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["r3"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
VRFS = ["RED_A", "BLUE_A"]
AS_NUM = [200, 200]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Redistribute connected..")
input_dict_3 = {}
for dut in ["r2"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
VRFS = ["RED_A", "BLUE_A"]
AS_NUM = [100, 100]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "connected"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "connected"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that static routes are installed into vrfs RED_A"
"and RED_B tables only, not in global routing table of RED_1"
)
for addr_type in ADDR_TYPES:
dut = "r3"
input_dict = {
"r3": {
"static_routes": [
{
"network": LOOPBACK_2[addr_type],
"interface": intf_r3_r23,
"nexthop_vrf": "BLUE_A",
"vrf": "RED_A",
},
{
"network": LOOPBACK_1[addr_type],
"interface": intf_r3_r21,
"nexthop_vrf": "RED_A",
"vrf": "BLUE_A",
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_route_map_within_vrf_to_alter_bgp_attribute_nexthop_p0(request):
"""
FUNC_12_a:
Configure route-maps within a VRF, to alter BGP attributes.
Verify that route-map doesn't affect any other VRF instances'
routing on DUT.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Advertise a set of BGP prefixes(IPv4+IPv6) from RED_1 and"
" RED_2 in vrf instances(RED_A and RED_B)."
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Advertise same set of BGP prefixes(IPv4+IPv6) from BLUE_1 and"
"BLUE_2 in vrf instances(BLUE_A and BLUE_B)"
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["red1", "blue1"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
if "red" in dut:
VRFS = ["RED_A", "RED_B"]
AS_NUM = [500, 500]
elif "blue" in dut:
VRFS = ["BLUE_A", "BLUE_B"]
AS_NUM = [800, 800]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that within vrf instances, BGP best path selection"
" algorithm remains intact and doesn't affect any other VRFs"
" routing decision."
)
for addr_type in ADDR_TYPES:
dut = "r2"
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, dut, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
step("Delete nexthop-self configure from r1")
input_dict_4 = {
"r1": {
"bgp": [
{
"local_as": "100",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link1": {"next_hop_self": False}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link1": {"next_hop_self": False}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link2": {"next_hop_self": False}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link2": {"next_hop_self": False}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link3": {"next_hop_self": False}
}
},
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link3": {"next_hop_self": False}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link4": {"next_hop_self": False}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link4": {"next_hop_self": False}
}
}
}
}
},
},
},
]
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that within vrf instances, BGP best path selection"
" algorithm remains intact and doesn't affect any other VRFs"
" routing decision."
)
for addr_type in ADDR_TYPES:
dut = "r2"
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_1, expected=False)
assert (
result is not True
), "Testcase {} : Failed \n Expected Behaviour: Routes are rejected because nexthop-self config is deleted \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, dut, input_dict_2, expected=False)
assert (
result is not True
), "Testcase {} : Failed \n Expected Behaviour: Routes are rejected because nexthop-self config is deleted \n Error {}".format(
tc_name, result
)
write_test_footer(tc_name)
@pytest.mark.parametrize("attribute", ["locPrf", "weight", "metric"])
def test_route_map_within_vrf_to_alter_bgp_attribute_p0(request, attribute):
"""
FUNC_12_b/c/d:
Configure route-maps within a VRF, to alter BGP attributes.
Verify that route-map doesn't affect any other VRF instances'
routing on DUT.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Advertise a set of BGP prefixes(IPv4+IPv6) from RED_1 and"
" RED_2 in vrf instances(RED_A and RED_B)."
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
},
"red2": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
},
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Advertise same set of BGP prefixes(IPv4+IPv6) from BLUE_1 and"
"BLUE_2 in vrf instances(BLUE_A and BLUE_B)"
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
},
"blue2": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
},
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["red1", "red2", "blue1", "blue2"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
if "red" in dut:
VRFS = ["RED_A", "RED_B"]
AS_NUM = [500, 500]
elif "blue" in dut:
VRFS = ["BLUE_A", "BLUE_B"]
AS_NUM = [800, 800]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure a route-maps to influence BGP parameters - " " Local Preference")
for addr_type in ADDR_TYPES:
input_dict_4 = {
"r2": {
"route_maps": {
"rmap_r1_{}".format(addr_type): [
{"action": "permit", "set": {attribute: 120}}
],
"rmap_r3_{}".format(addr_type): [
{"action": "permit", "set": {attribute: 150}}
],
}
}
}
result = create_route_maps(tgen, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Configure neighbor for route map")
input_dict_4 = {
"r2": {
"bgp": [
{
"local_as": "100",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link1": {
"route_maps": [
{
"name": "rmap_r1_ipv4",
"direction": "in",
}
]
}
}
},
"r3": {
"dest_link": {
"r2-link1": {
"route_maps": [
{
"name": "rmap_r3_ipv4",
"direction": "in",
}
]
}
}
},
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link1": {
"route_maps": [
{
"name": "rmap_r1_ipv6",
"direction": "in",
}
]
}
}
},
"r3": {
"dest_link": {
"r2-link1": {
"route_maps": [
{
"name": "rmap_r3_ipv6",
"direction": "in",
}
]
}
}
},
}
}
},
},
},
{
"local_as": "100",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link2": {
"route_maps": [
{
"name": "rmap_r1_ipv4",
"direction": "in",
}
]
}
}
},
"r3": {
"dest_link": {
"r2-link2": {
"route_maps": [
{
"name": "rmap_r3_ipv4",
"direction": "in",
}
]
}
}
},
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link2": {
"route_maps": [
{
"name": "rmap_r1_ipv6",
"direction": "in",
}
]
}
}
},
"r3": {
"dest_link": {
"r2-link2": {
"route_maps": [
{
"name": "rmap_r3_ipv6",
"direction": "in",
}
]
}
}
},
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link3": {
"route_maps": [
{
"name": "rmap_r1_ipv4",
"direction": "in",
}
]
}
}
},
"r3": {
"dest_link": {
"r2-link3": {
"route_maps": [
{
"name": "rmap_r3_ipv4",
"direction": "in",
}
]
}
}
},
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link3": {
"route_maps": [
{
"name": "rmap_r1_ipv6",
"direction": "in",
}
]
}
}
},
"r3": {
"dest_link": {
"r2-link3": {
"route_maps": [
{
"name": "rmap_r3_ipv6",
"direction": "in",
}
]
}
}
},
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link4": {
"route_maps": [
{
"name": "rmap_r1_ipv4",
"direction": "in",
}
]
}
}
},
"r3": {
"dest_link": {
"r2-link4": {
"route_maps": [
{
"name": "rmap_r3_ipv4",
"direction": "in",
}
]
}
}
},
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link4": {
"route_maps": [
{
"name": "rmap_r1_ipv6",
"direction": "in",
}
]
}
}
},
"r3": {
"dest_link": {
"r2-link4": {
"route_maps": [
{
"name": "rmap_r3_ipv6",
"direction": "in",
}
]
}
}
},
}
}
},
},
},
]
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that within vrf instances, BGP best path selection"
" algorithm remains intact and doesn't affect any other VRFs"
" routing decision."
)
dut = "r2"
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = verify_best_path_as_per_bgp_attribute(
tgen, addr_type, dut, input_dict_1, attribute
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
result = verify_best_path_as_per_bgp_attribute(
tgen, addr_type, dut, input_dict_2, attribute
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_route_map_within_vrf_to_alter_bgp_attribute_aspath_p0(request):
"""
FUNC_12_e:
Configure route-maps within a VRF, to alter BGP attributes.
Verify that route-map doesn't affect any other VRF instances'
routing on DUT.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Advertise a set of BGP prefixes(IPv4+IPv6) from RED_1 and"
" RED_2 in vrf instances(RED_A and RED_B)."
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
},
"red2": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
},
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Advertise same set of BGP prefixes(IPv4+IPv6) from BLUE_1 and"
"BLUE_2 in vrf instances(BLUE_A and BLUE_B)"
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
},
"blue2": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
},
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["red1", "red2", "blue1", "blue2"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
if "red" in dut:
VRFS = ["RED_A", "RED_B"]
AS_NUM = [500, 500]
elif "blue" in dut:
VRFS = ["BLUE_A", "BLUE_B"]
AS_NUM = [800, 800]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure a route-maps to influence BGP parameters - " " Local Preference")
for addr_type in ADDR_TYPES:
input_dict_4 = {
"r2": {
"route_maps": {
"rmap_r1_{}".format(addr_type): [
{
"action": "permit",
"set": {
"path": {"as_num": "111 222", "as_action": "prepend"}
},
}
]
}
}
}
result = create_route_maps(tgen, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Configure neighbor for route map")
input_dict_4 = {
"r2": {
"bgp": [
{
"local_as": "100",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link1": {
"route_maps": [
{
"name": "rmap_r1_ipv4",
"direction": "in",
}
]
}
}
},
"r3": {"dest_link": {"r2-link1": {}}},
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link1": {
"route_maps": [
{
"name": "rmap_r1_ipv6",
"direction": "in",
}
]
}
}
},
"r3": {"dest_link": {"r2-link1": {}}},
}
}
},
},
},
{
"local_as": "100",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link2": {
"route_maps": [
{
"name": "rmap_r1_ipv4",
"direction": "in",
}
]
}
}
},
"r3": {"dest_link": {"r2-link2": {}}},
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link2": {
"route_maps": [
{
"name": "rmap_r1_ipv6",
"direction": "in",
}
]
}
}
},
"r3": {"dest_link": {"r2-link2": {}}},
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link3": {
"route_maps": [
{
"name": "rmap_r1_ipv4",
"direction": "in",
}
]
}
}
},
"r3": {"dest_link": {"r2-link3": {}}},
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link3": {
"route_maps": [
{
"name": "rmap_r1_ipv6",
"direction": "in",
}
]
}
}
},
"r3": {"dest_link": {"r2-link3": {}}},
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link4": {
"route_maps": [
{
"name": "rmap_r1_ipv4",
"direction": "in",
}
]
}
}
},
"r3": {"dest_link": {"r2-link4": {}}},
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link4": {
"route_maps": [
{
"name": "rmap_r1_ipv6",
"direction": "in",
}
]
}
}
},
"r3": {"dest_link": {"r2-link4": {}}},
}
}
},
},
},
]
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that within vrf instances, BGP best path selection"
" algorithm remains intact and doesn't affect any other VRFs"
" routing decision."
)
dut = "r2"
attribute = "path"
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = verify_best_path_as_per_bgp_attribute(
tgen, addr_type, dut, input_dict_1, attribute
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
result = verify_best_path_as_per_bgp_attribute(
tgen, addr_type, dut, input_dict_2, attribute
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_route_map_within_vrf_to_alter_bgp_attribute_lcomm_p0(request):
"""
FUNC_12_f:
Configure route-maps within a VRF, to alter BGP attributes.
Verify that route-map doesn't affect any other VRF instances'
routing on DUT.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Advertise a set of BGP prefixes(IPv4+IPv6) from RED_1 and"
" RED_2 in vrf instances(RED_A and RED_B)."
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
},
"red2": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
},
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Advertise same set of BGP prefixes(IPv4+IPv6) from BLUE_1 and"
"BLUE_2 in vrf instances(BLUE_A and BLUE_B)"
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
},
"blue2": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
},
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["red1", "red2", "blue1", "blue2"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
if "red" in dut:
VRFS = ["RED_A", "RED_B"]
AS_NUM = [500, 500]
elif "blue" in dut:
VRFS = ["BLUE_A", "BLUE_B"]
AS_NUM = [800, 800]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure a route-maps to influence BGP parameters - " " Large-community")
step("Create standard large commumity-list in r2")
for addr_type in ADDR_TYPES:
input_dict_1 = {
"r2": {
"bgp_community_lists": [
{
"community_type": "standard",
"action": "permit",
"name": "rmap_lcomm_{}".format(addr_type),
"value": "1:1:1 1:2:3 2:1:1 2:2:2",
"large": True,
}
]
}
}
result = create_bgp_community_lists(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Create route-maps in red1 and r1")
for addr_type in ADDR_TYPES:
input_dict_4 = {
"red1": {
"route_maps": {
"rmap_red1_{}".format(addr_type): [
{
"action": "permit",
"set": {
"large_community": {"num": "1:1:1 1:2:3 2:1:1 2:2:2"}
},
}
]
}
},
"r2": {
"route_maps": {
"rmap_r1_{}".format(addr_type): [
{
"action": "permit",
"match": {
"large_community_list": {
"id": "rmap_lcomm_" + addr_type
}
},
}
]
}
},
}
result = create_route_maps(tgen, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Configure neighbor for route map in red1")
input_dict_4 = {
"red1": {
"bgp": [
{
"local_as": "500",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"red1-link1": {
"route_maps": [
{
"name": "rmap_red1_ipv4",
"direction": "out",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"red1-link1": {
"route_maps": [
{
"name": "rmap_red1_ipv6",
"direction": "out",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "500",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"red1-link2": {
"route_maps": [
{
"name": "rmap_red1_ipv4",
"direction": "out",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"red1-link2": {
"route_maps": [
{
"name": "rmap_red1_ipv6",
"direction": "out",
}
]
}
}
}
}
}
},
},
},
]
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure neighbor for route map in r2")
input_dict_4 = {
"r2": {
"bgp": [
{
"local_as": "100",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link1": {
"route_maps": [
{
"name": "rmap_r1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link1": {
"route_maps": [
{
"name": "rmap_r1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link2": {
"route_maps": [
{
"name": "rmap_r1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link2": {
"route_maps": [
{
"name": "rmap_r1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link3": {
"route_maps": [
{
"name": "rmap_r1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link3": {
"route_maps": [
{
"name": "rmap_r1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link4": {
"route_maps": [
{
"name": "rmap_r1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"r2-link4": {
"route_maps": [
{
"name": "rmap_r1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
]
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"All the prefixes advertised from RED_1 and BLUE_1 should carry"
" attributes set by outbound route-maps within specific vrfs. "
"Router R1 should be able to match and permit/deny those "
"prefixes based on received attributes. Please use below "
"commands to verify."
)
input_dict = {
"largeCommunity": "1:1:1 1:2:3 2:1:1 2:2:2",
}
for addr_type in ADDR_TYPES:
vrf = "RED_A"
routes = [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]]
result = verify_bgp_community(tgen, addr_type, "r2", routes, input_dict, vrf)
assert result is True, "Test case {} : Failed \n Error: {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
vrf = "RED_B"
routes = [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]]
result = verify_bgp_community(tgen, addr_type, "r2", routes, input_dict, vrf)
assert result is True, "Test case {} : Failed \n Error: {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_route_map_match_traffic_based_on_vrf_p0(request):
"""
FUNC_13:
Configure a route-map on DUT to match traffic based
on a VRF interfaces.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Advertise unique BGP prefixes(IPv4+IPv6) from RED_1 "
"in vrf instances(RED_A and RED_B)."
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Advertise unique BGP prefixes(IPv4+IPv6) from from BLUE_1 in"
" vrf instances(BLUE_A and BLUE_B)."
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK3_1[addr_type]] + [NETWORK3_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK4_1[addr_type]] + [NETWORK4_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["red1", "blue1"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
if "red" in dut:
VRFS = ["RED_A", "RED_B"]
AS_NUM = [500, 500]
elif "blue" in dut:
VRFS = ["BLUE_A", "BLUE_B"]
AS_NUM = [800, 800]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Configure a route-map on R1 to match the prefixes "
"coming from vrf RED_A and set as-prepend to these routes."
)
input_dict_4 = {
"r1": {
"route_maps": {
"ABC": [
{
"action": "permit",
"match": {"source-vrf": "RED_A"},
"set": {"path": {"as_num": 1, "as_action": "prepend"}},
}
]
}
}
}
result = create_route_maps(tgen, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"On R1, import the routes form vrf RED_A and RED_B to BLUE_A and"
" apply the route-map under vrf BLUE_A while importing"
)
raw_config = {
"r1": {
"raw_config": [
"router bgp 100 vrf BLUE_A",
"address-family ipv4 unicast",
"import vrf RED_A",
"import vrf RED_B",
"import vrf route-map ABC",
"address-family ipv6 unicast",
"import vrf RED_A",
"import vrf RED_B",
"import vrf route-map ABC",
]
}
}
result = apply_raw_config(tgen, raw_config)
assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result)
step(
"All the prefixes advertised from RED_1 and BLUE_1 in vrfs "
"RED_B and BLUE_B must prepend the AS number in as-path on R2."
)
for addr_type in ADDR_TYPES:
input_dict_7 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK3_1[addr_type]] + [NETWORK3_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
]
}
}
result = verify_bgp_rib(tgen, addr_type, "r1", input_dict_7)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_vrf_lite_with_static_bgp_originated_routes_p0(request):
"""
FUNC_14:
Test VRF-lite with Static+BGP originated routes.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Advertise unique BGP prefixes(IPv4+IPv6) from from RED_1"
" in vrf instances(RED_A and RED_B)."
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Advertise unique BGP prefixes(IPv4+IPv6) from from BLUE_1 in"
" vrf instances(BLUE_A and BLUE_B)."
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
input_dict_3 = {
"red1": {
"bgp": [
{
"local_as": "500",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {
"advertise_networks": [
{
"network": [NETWORK5_1["ipv4"]]
+ [NETWORK5_2["ipv4"]]
}
],
"redistribute": [{"redist_type": "static"}],
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{
"network": [NETWORK5_1["ipv6"]]
+ [NETWORK5_2["ipv6"]]
}
],
"redistribute": [{"redist_type": "static"}],
}
},
},
},
{
"local_as": "500",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {
"advertise_networks": [
{
"network": [NETWORK6_1["ipv4"]]
+ [NETWORK6_2["ipv4"]]
}
],
"redistribute": [{"redist_type": "static"}],
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{
"network": [NETWORK6_1["ipv6"]]
+ [NETWORK6_2["ipv6"]]
}
],
"redistribute": [{"redist_type": "static"}],
}
},
},
},
]
},
"blue1": {
"bgp": [
{
"local_as": "800",
"vrf": "BLUE_A",
"address_family": {
"ipv4": {
"unicast": {
"advertise_networks": [
{
"network": [NETWORK7_1["ipv4"]]
+ [NETWORK7_2["ipv4"]]
}
],
"redistribute": [{"redist_type": "static"}],
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{
"network": [NETWORK7_1["ipv6"]]
+ [NETWORK7_2["ipv6"]]
}
],
"redistribute": [{"redist_type": "static"}],
}
},
},
},
{
"local_as": "800",
"vrf": "BLUE_B",
"address_family": {
"ipv4": {
"unicast": {
"advertise_networks": [
{
"network": [NETWORK8_1["ipv4"]]
+ [NETWORK8_2["ipv4"]]
}
],
"redistribute": [{"redist_type": "static"}],
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{
"network": [NETWORK8_1["ipv6"]]
+ [NETWORK8_2["ipv6"]]
}
],
"redistribute": [{"redist_type": "static"}],
}
},
},
},
]
},
}
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Static routes must be installed in associated VRF" " table only.")
for addr_type in ADDR_TYPES:
dut = "r1"
result = verify_bgp_rib(tgen, addr_type, dut, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
step(
"All the routers must receive advertised as well as "
"redistributed(static) prefixes in associated VRF tables."
)
for addr_type in ADDR_TYPES:
dut = "r1"
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, dut, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_prefix_list_to_permit_deny_prefixes_p0(request):
"""
FUNC_15:
Configure prefix-lists on DUT and apply to BGP peers to
permit/deny prefixes.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Advertise unique BGP prefixes(IPv4+IPv6) from from RED_1"
" in vrf instances(RED_A and RED_B)."
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Advertise unique BGP prefixes(IPv4+IPv6) from from BLUE_1 in"
" vrf instances(BLUE_A and BLUE_B)."
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["red1", "blue1"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
if "red" in dut:
VRFS = ["RED_A", "RED_B"]
AS_NUM = [500, 500]
elif "blue" in dut:
VRFS = ["BLUE_A", "BLUE_B"]
AS_NUM = [800, 800]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Verify routes are present before applying prefix-list")
for addr_type in ADDR_TYPES:
dut = "r1"
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, dut, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
step(
"On routers RED_1 and BLUE_1, configure prefix-lists to permit"
" 4 prefixes and deny 1 prefix x.x.x.5. Apply these in outbound"
"direction for each neighbour."
)
for addr_type in ADDR_TYPES:
input_dict_4 = {
"red1": {
"prefix_lists": {
addr_type: {
"pflist_red1_{}".format(addr_type): [
{
"seqid": 10,
"network": NETWORK1_1[addr_type],
"action": "permit",
},
{
"seqid": 11,
"network": NETWORK2_1[addr_type],
"action": "permit",
},
{
"seqid": 12,
"network": NETWORK1_2[addr_type],
"action": "deny",
},
{
"seqid": 13,
"network": NETWORK2_2[addr_type],
"action": "deny",
},
]
}
}
},
"blue1": {
"prefix_lists": {
addr_type: {
"pflist_blue1_{}".format(addr_type): [
{
"seqid": 10,
"network": NETWORK1_1[addr_type],
"action": "permit",
},
{
"seqid": 11,
"network": NETWORK2_1[addr_type],
"action": "permit",
},
{
"seqid": 12,
"network": NETWORK1_2[addr_type],
"action": "deny",
},
{
"seqid": 13,
"network": NETWORK2_2[addr_type],
"action": "deny",
},
]
}
}
},
"r1": {
"prefix_lists": {
addr_type: {
"pflist_r1_{}".format(addr_type): [
{
"seqid": 10,
"network": NETWORK1_1[addr_type],
"action": "permit",
},
{
"seqid": 11,
"network": NETWORK2_1[addr_type],
"action": "deny",
},
]
}
}
},
}
result = create_prefix_lists(tgen, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
input_dict_5 = {
"red1": {
"bgp": [
{
"local_as": "500",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"red1-link1": {
"prefix_lists": [
{
"name": "pflist_red1_ipv4",
"direction": "out",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"red1-link1": {
"prefix_lists": [
{
"name": "pflist_red1_ipv6",
"direction": "out",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "500",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"red1-link2": {
"prefix_lists": [
{
"name": "pflist_red1_ipv4",
"direction": "out",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"red1-link2": {
"prefix_lists": [
{
"name": "pflist_red1_ipv6",
"direction": "out",
}
]
}
}
}
}
}
},
},
},
]
},
"blue1": {
"bgp": [
{
"local_as": "800",
"vrf": "BLUE_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"blue1-link1": {
"prefix_lists": [
{
"name": "pflist_blue1_ipv4",
"direction": "out",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"blue1-link1": {
"prefix_lists": [
{
"name": "pflist_blue1_ipv6",
"direction": "out",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "800",
"vrf": "BLUE_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"blue1-link2": {
"prefix_lists": [
{
"name": "pflist_blue1_ipv4",
"direction": "out",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"blue1-link2": {
"prefix_lists": [
{
"name": "pflist_blue1_ipv6",
"direction": "out",
}
]
}
}
}
}
}
},
},
},
]
},
}
result = create_router_bgp(tgen, topo, input_dict_5)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that within vrf instances, each BGP neighbor receives 1"
" prefixes in routing table and drops (x.x.x.2)."
)
for addr_type in ADDR_TYPES:
dut = "r1"
permitted_routes = {
"red1": {
"static_routes": [
{"network": [NETWORK1_1[addr_type]], "vrf": "RED_A"},
{"network": [NETWORK2_1[addr_type]], "vrf": "RED_B"},
]
}
}
denied_routes = {
"red1": {
"static_routes": [
{"network": [NETWORK1_2[addr_type]], "vrf": "RED_A"},
{"network": [NETWORK2_2[addr_type]], "vrf": "RED_B"},
]
}
}
result = verify_rib(tgen, addr_type, dut, permitted_routes)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, dut, denied_routes, expected=False)
assert result is not True, "Testcase {} : Failed \n"
"{}:Expected behaviour: Routes are denied by prefix-list \nError {}".format(
tc_name, result
)
step(
"On router R1, configure prefix-lists to permit 2 "
"prefixes(x.x.x.1-2) and deny 2 prefix(x.x.x.3-4). Apply"
" these in inbound direction for each neighbour."
)
input_dict_6 = {
"r1": {
"bgp": [
{
"local_as": "100",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"red1": {
"dest_link": {
"r1-link1": {
"prefix_lists": [
{
"name": "pflist_r1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"red1": {
"dest_link": {
"r1-link1": {
"prefix_lists": [
{
"name": "pflist_r1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"red1": {
"dest_link": {
"r1-link2": {
"prefix_lists": [
{
"name": "pflist_r1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"red1": {
"dest_link": {
"r1-link2": {
"prefix_lists": [
{
"name": "pflist_r1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"blue1": {
"dest_link": {
"r1-link1": {
"prefix_lists": [
{
"name": "pflist_r1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"blue1": {
"dest_link": {
"r1-link1": {
"prefix_lists": [
{
"name": "pflist_r1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"blue1": {
"dest_link": {
"r1-link2": {
"prefix_lists": [
{
"name": "pflist_r1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"blue1": {
"dest_link": {
"r1-link2": {
"prefix_lists": [
{
"name": "pflist_r1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
]
}
}
result = create_router_bgp(tgen, topo, input_dict_6)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that within vrf instances, each BGP neighbor installs"
" only 1 prefix (x.x.x.1)."
)
for addr_type in ADDR_TYPES:
dut = "r2"
permitted_routes = {
"red1": {
"static_routes": [{"network": [NETWORK1_1[addr_type]], "vrf": "RED_A"}]
}
}
denied_routes = {
"red1": {
"static_routes": [{"network": [NETWORK2_1[addr_type]], "vrf": "RED_A"}]
}
}
result = verify_rib(tgen, addr_type, dut, permitted_routes)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, dut, denied_routes, expected=False)
assert (
result is not True
), "Testcase {} : Failed \nExpected behaviour: Routes are denied by prefix-list \nError {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_route_map_set_and_match_tag_p0(request):
"""
FUNC_16_1:
Configure a route-map on DUT to match traffic based various
match/set causes.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Advertise unique BGP prefixes(IPv4+IPv6) from RED_1"
" in vrf instances(RED_A and RED_B)."
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"tag": 4001,
"vrf": "RED_A",
},
{
"network": [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Advertise same set of BGP prefixes(IPv4+IPv6) from BLUE_1 and"
"BLUE_2 in vrf instances(BLUE_A and BLUE_B)"
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK3_1[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"tag": 4001,
"vrf": "BLUE_A",
},
{
"network": [NETWORK3_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK4_1[addr_type]] + [NETWORK4_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["red1", "blue1"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
if "red" in dut:
VRFS = ["RED_A", "RED_B"]
AS_NUM = [500, 500]
elif "blue" in dut:
VRFS = ["BLUE_A", "BLUE_B"]
AS_NUM = [800, 800]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure a route-maps to match tag")
for addr_type in ADDR_TYPES:
input_dict_4 = {
"red1": {
"route_maps": {
"rmap1_{}".format(addr_type): [
{"action": "permit", "match": {addr_type: {"tag": "4001"}}}
]
}
}
}
result = create_route_maps(tgen, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Configure neighbor for route map")
input_dict_4 = {
"red1": {
"bgp": [
{
"local_as": "500",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"red1-link1": {
"route_maps": [
{
"name": "rmap1_ipv4",
"direction": "out",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"red1-link1": {
"route_maps": [
{
"name": "rmap1_ipv6",
"direction": "out",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "500",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"red1-link2": {
"route_maps": [
{
"name": "rmap1_ipv4",
"direction": "out",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"red1-link2": {
"route_maps": [
{
"name": "rmap1_ipv6",
"direction": "out",
}
]
}
}
}
}
}
},
},
},
]
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that within vrf instances, BGP best path selection"
" algorithm remains intact and doesn't affect any other VRFs"
" routing decision."
)
dut = "r1"
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"tag": 4001,
"vrf": "RED_A",
}
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_2, expected=False)
assert (
result is not True
), "Testcase {} : Failed \n Expected Behavior: Routes are denied \nError {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_route_map_set_and_match_metric_p0(request):
"""
FUNC_16_2:
Configure a route-map on DUT to match traffic based various
match/set causes.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Advertise unique BGP prefixes(IPv4+IPv6) from RED_1"
" in vrf instances(RED_A and RED_B)."
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Advertise same set of BGP prefixes(IPv4+IPv6) from BLUE_1 and"
"BLUE_2 in vrf instances(BLUE_A and BLUE_B)"
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK3_1[addr_type]] + [NETWORK3_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK4_1[addr_type]] + [NETWORK4_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {
"red1": {
"bgp": [
{
"local_as": "500",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {
"redistribute": [
{
"redist_type": "static",
"attribute": {"metric": 123},
}
]
}
},
"ipv6": {
"unicast": {
"redistribute": [
{
"redist_type": "static",
"attribute": {"metric": 123},
}
]
}
},
},
},
{
"local_as": "500",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
},
]
},
"blue1": {
"bgp": [
{
"local_as": "800",
"vrf": "BLUE_A",
"address_family": {
"ipv4": {
"unicast": {
"redistribute": [
{
"redist_type": "static",
"attribute": {"metric": 123},
}
]
}
},
"ipv6": {
"unicast": {
"redistribute": [
{
"redist_type": "static",
"attribute": {"metric": 123},
}
]
}
},
},
},
{
"local_as": "800",
"vrf": "BLUE_B",
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
},
]
},
}
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Configure a route-maps to match tag")
for addr_type in ADDR_TYPES:
input_dict_4 = {
"r1": {
"route_maps": {
"rmap1_{}".format(addr_type): [
{"action": "permit", "match": {"metric": 123}}
]
}
}
}
result = create_route_maps(tgen, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Configure neighbor for route map")
input_dict_4 = {
"r1": {
"bgp": [
{
"local_as": "100",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"red1": {
"dest_link": {
"r1-link1": {
"route_maps": [
{
"name": "rmap1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"red1": {
"dest_link": {
"r1-link1": {
"route_maps": [
{
"name": "rmap1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"red1": {
"dest_link": {
"r1-link2": {
"route_maps": [
{
"name": "rmap1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"red1": {
"dest_link": {
"r1-link2": {
"route_maps": [
{
"name": "rmap1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"blue1": {
"dest_link": {
"r1-link1": {
"route_maps": [
{
"name": "rmap1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"blue1": {
"dest_link": {
"r1-link1": {
"route_maps": [
{
"name": "rmap1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"blue1": {
"dest_link": {
"r1-link2": {
"route_maps": [
{
"name": "rmap1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"blue1": {
"dest_link": {
"r1-link2": {
"route_maps": [
{
"name": "rmap1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
]
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that within vrf instances, BGP best path selection"
" algorithm remains intact and doesn't affect any other VRFs"
" routing decision."
)
dut = "r1"
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
}
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"red1": {
"static_routes": [
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
}
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_2, expected=False)
assert (
result is not True
), "Testcase {} : Failed \n Expected Behavior: Routes are denied \nError {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_route_map_set_and_match_community_p0(request):
"""
FUNC_16_3:
Configure a route-map on DUT to match traffic based various
match/set causes.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
step(
"Advertise unique BGP prefixes(IPv4+IPv6) from RED_1"
" in vrf instances(RED_A and RED_B)."
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Advertise same set of BGP prefixes(IPv4+IPv6) from BLUE_1 and"
"BLUE_2 in vrf instances(BLUE_A and BLUE_B)"
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK3_1[addr_type]] + [NETWORK3_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK4_1[addr_type]] + [NETWORK4_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["red1", "blue1"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
if "red" in dut:
VRFS = ["RED_A", "RED_B"]
AS_NUM = [500, 500]
elif "blue" in dut:
VRFS = ["BLUE_A", "BLUE_B"]
AS_NUM = [800, 800]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Create community-list")
for addr_type in ADDR_TYPES:
input_dict_4 = {
"r1": {
"bgp_community_lists": [
{
"community_type": "standard",
"action": "permit",
"name": "rmap_lcomm_{}".format(addr_type),
"value": "1:1 1:2 1:3 1:4 1:5",
}
]
}
}
result = create_bgp_community_lists(tgen, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Configure a route-maps to match tag")
step("Create route-maps in red1 and r1")
for addr_type in ADDR_TYPES:
input_dict_4 = {
"red1": {
"route_maps": {
"rmap_red1_{}".format(addr_type): [
{
"action": "permit",
"set": {"community": {"num": "1:1 1:2 1:3 1:4 1:5"}},
}
]
}
},
"r1": {
"route_maps": {
"rmap1_{}".format(addr_type): [
{
"action": "permit",
"match": {
"community_list": {"id": "rmap_lcomm_" + addr_type}
},
}
]
}
},
}
result = create_route_maps(tgen, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Configure neighbor for route map")
input_dict_4 = {
"red1": {
"bgp": [
{
"local_as": "500",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"red1-link1": {
"route_maps": [
{
"name": "rmap_red1_ipv4",
"direction": "out",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"red1-link1": {
"route_maps": [
{
"name": "rmap_red1_ipv6",
"direction": "out",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "500",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"red1-link2": {
"route_maps": [
{
"name": "rmap_red1_ipv4",
"direction": "out",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {
"dest_link": {
"red1-link2": {
"route_maps": [
{
"name": "rmap_red1_ipv6",
"direction": "out",
}
]
}
}
}
}
}
},
},
},
]
},
"r1": {
"bgp": [
{
"local_as": "100",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"red1": {
"dest_link": {
"r1-link1": {
"route_maps": [
{
"name": "rmap1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"red1": {
"dest_link": {
"r1-link1": {
"route_maps": [
{
"name": "rmap1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"red1": {
"dest_link": {
"r1-link2": {
"route_maps": [
{
"name": "rmap1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"red1": {
"dest_link": {
"r1-link2": {
"route_maps": [
{
"name": "rmap1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"blue1": {
"dest_link": {
"r1-link1": {
"route_maps": [
{
"name": "rmap1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"blue1": {
"dest_link": {
"r1-link1": {
"route_maps": [
{
"name": "rmap1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"blue1": {
"dest_link": {
"r1-link2": {
"route_maps": [
{
"name": "rmap1_ipv4",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"blue1": {
"dest_link": {
"r1-link2": {
"route_maps": [
{
"name": "rmap1_ipv6",
"direction": "in",
}
]
}
}
}
}
}
},
},
},
]
},
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"All the prefixes advertised from RED_1 and BLUE_1 should carry"
" attributes set by outbound route-maps within specific vrfs. "
"Router R1 should be able to match and permit/deny those "
"prefixes based on received attributes. Please use below "
"commands to verify."
)
input_dict = {
"community": "1:1 1:2 1:3 1:4 1:5",
}
for addr_type in ADDR_TYPES:
vrf = "RED_A"
routes = [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]]
result = verify_bgp_community(tgen, addr_type, "r1", routes, input_dict, vrf)
assert result is True, "Test case {} : Failed \n Error: {}".format(
tc_name, result
)
for addr_type in ADDR_TYPES:
vrf = "RED_B"
routes = [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]]
result = verify_bgp_community(tgen, addr_type, "r1", routes, input_dict, vrf)
assert result is True, "Test case {} : Failed \n Error: {}".format(
tc_name, result
)
write_test_footer(tc_name)
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
| freerangerouting/frr | tests/topotests/bgp_multi_vrf_topo1/test_bgp_multi_vrf_topo1.py | Python | gpl-2.0 | 229,521 |
# $HeadURL: $
'''
:mod: Utils
Module that collects utility functions.
'''
import fnmatch
from DIRAC import gConfig, S_OK
from DIRAC.Core.Utilities import List
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
__RCSID__ = '$Id: $'
def voimport( base_mod ):
'''
Function to import from extensions, if not found, tries from DIRAC.
'''
# FIXME: A.T.: Use Core.Utilities.ObjectLoader
for ext in gConfig.getValue( 'DIRAC/Extensions', [] ):
try:
return __import__( ext + base_mod, globals(), locals(), ['*'] )
except ImportError:
continue
# If not found in extensions, import it in DIRAC base.
return __import__( base_mod, globals(), locals(), ['*'] )
def getCSTree( csPath = '' ):
'''
Gives the configuration rooted at path in a Python dict. The
result is a Python dictionary that reflects the structure of the
configuration file.
'''
opHelper = Operations()
def getCSTreeAsDict( treePath ):
'''
Function to recursively iterate over a CS tree
'''
csTreeDict = {}
opts = opHelper.getOptionsDict( treePath )
if opts[ 'OK' ]:
opts = opts[ 'Value' ]
for optKey, optValue in opts.items():
if optValue.find( ',' ) > -1:
optValue = List.fromChar( optValue )
else:
optValue = [ optValue ]
csTreeDict[ optKey ] = optValue
secs = opHelper.getSections( treePath )
if secs[ 'OK' ]:
secs = secs[ 'Value' ]
for sec in secs:
secTree = getCSTreeAsDict( '%s/%s' % ( treePath, sec ) )
if not secTree[ 'OK' ]:
return secTree
csTreeDict[ sec ] = secTree[ 'Value' ]
return S_OK( csTreeDict )
return getCSTreeAsDict( csPath )
def configMatch( candidateParams, configParams ):
'''
For a given configuration, the candidate will be rejected if:
- it is missing at least one of the params in the config
- if a param of the candidate does not match the config params
- if a candidate param is None, is considered as wildcard
'''
for key in candidateParams:
if not key in configParams:
# The candidateParams is missing one of the parameters required
# return False
continue
if candidateParams[ key ] is None:
# None is assumed to be a wildcard (*)
continue
cParameter = candidateParams[ key ]
if not isinstance( cParameter, list ):
cParameter = [ cParameter ]
# We allow using UNIX-like regular expression ( wild-cards ) on the CS
_matches = False
for configItem in configParams[ key ]:
if fnmatch.filter( set( cParameter ), configItem ):
_matches = True
break
if not _matches:
return False
return True
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| Sbalbp/DIRAC | ResourceStatusSystem/Utilities/Utils.py | Python | gpl-3.0 | 3,106 |
"""
URLResolver Addon for Kodi
Copyright (C) 2016 t0mm0, tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from urlresolver.lib import kodi
from urlresolver.lib import log_utils
from urlresolver.lib import cache
from urlresolver.lib.url_dispatcher import URL_Dispatcher
url_dispatcher = URL_Dispatcher()
def __enum(**enums):
return type('Enum', (), enums)
MODES = __enum(AUTH_RD='auth_rd', RESET_RD='reset_rd', RESET_CACHE='reset_cache')
@url_dispatcher.register(MODES.AUTH_RD)
def auth_rd():
kodi.close_all()
kodi.sleep(500) # sleep or authorize won't work for some reason
from urlresolver.plugins import realdebrid
if realdebrid.RealDebridResolver().authorize_resolver():
kodi.notify(msg=kodi.i18n('rd_authorized'), duration=5000)
@url_dispatcher.register(MODES.RESET_RD)
def reset_rd():
kodi.close_all()
kodi.sleep(500) # sleep or reset won't work for some reason
from urlresolver.plugins import realdebrid
rd = realdebrid.RealDebridResolver()
rd.reset_authorization()
kodi.notify(msg=kodi.i18n('rd_auth_reset'), duration=5000)
@url_dispatcher.register(MODES.RESET_CACHE)
def reset_cache():
if cache.reset_cache():
kodi.notify(msg=kodi.i18n('cache_reset'))
else:
kodi.notify(msg=kodi.i18n('cache_reset_failed'))
def main(argv=None):
if sys.argv: argv = sys.argv
queries = kodi.parse_query(sys.argv[2])
log_utils.log('Version: |%s| Queries: |%s|' % (kodi.get_version(), queries))
log_utils.log('Args: |%s|' % (argv))
# don't process params that don't match our url exactly. (e.g. plugin://plugin.video.1channel/extrafanart)
plugin_url = 'plugin://%s/' % (kodi.get_id())
if argv[0] != plugin_url:
return
mode = queries.get('mode', None)
url_dispatcher.dispatch(mode, queries)
if __name__ == '__main__':
sys.exit(main())
| fapeci/Filma | script.module.urlresolver/lib/default.py | Python | gpl-3.0 | 2,499 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| andpp/cherrymusic | backport/collections/_backported.py | Python | gpl-3.0 | 4,139 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .base import Capability, BaseObject, StringField, FloatField
__all__ = ['IpLocation', 'CapGeolocIp']
class IpLocation(BaseObject):
"""
Represents the location of an IP address.
"""
city = StringField('City')
region = StringField('Region')
zipcode = StringField('Zip code')
country = StringField('Country')
lt = FloatField('Latitude')
lg = FloatField('Longitude')
osmlink = StringField('Link to OpenStreetMap location page')
host = StringField('Hostname')
tld = StringField('Top Level Domain')
isp = StringField('Internet Service Provider')
def __init__(self, ipaddr):
BaseObject.__init__(self, ipaddr)
class CapGeolocIp(Capability):
"""
Access information about IP addresses database.
"""
def get_location(self, ipaddr):
"""
Get location of an IP address.
:param ipaddr: IP address
:type ipaddr: str
:rtype: :class:`IpLocation`
"""
raise NotImplementedError()
| sputnick-dev/weboob | weboob/capabilities/geolocip.py | Python | agpl-3.0 | 1,791 |
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from copy import deepcopy
from django import forms
from django.conf import settings
from django.core.urlresolvers import reverse
from django.forms.models import modelform_factory
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from shoop.admin.base import MenuEntry
from shoop.admin.toolbar import Toolbar, URLActionButton, get_default_edit_toolbar
from shoop.admin.utils.views import CreateOrUpdateView
from shoop.core.models import PaymentMethod, ShippingMethod
from shoop.core.modules.interface import ModuleNotFound
from shoop.utils.multilanguage_model_form import MultiLanguageModelForm
class MethodEditToolbar(Toolbar):
def __init__(self, view_object):
super(Toolbar, self).__init__()
self.view_object = view_object
get_default_edit_toolbar(toolbar=self, view_object=view_object, save_form_id="method_form")
method = view_object.object
if method.pk:
self.build_detail_button(method)
def build_detail_button(self, method):
disable_reason = None
try:
if not method.module.admin_detail_view_class:
disable_reason = _("The selected module has no details to configure")
except ModuleNotFound:
disable_reason = _("The selected module is not currently available")
self.append(URLActionButton(
url=reverse(
"shoop_admin:%s.edit-detail" % self.view_object.action_url_name_prefix,
kwargs={"pk": method.pk}
),
text=_("Edit Details"),
icon="fa fa-pencil",
extra_css_class="btn-info",
disable_reason=disable_reason
))
class _BaseMethodEditView(CreateOrUpdateView):
model = None # Overridden below
action_url_name_prefix = None
template_name = "shoop/admin/methods/edit.jinja"
form_class = forms.Form
context_object_name = "method"
@property
def title(self):
return _(u"Edit %(model)s") % {"model": self.model._meta.verbose_name}
def get_breadcrumb_parents(self):
return [
MenuEntry(
text=force_text(self.model._meta.verbose_name_plural).title(),
url="shoop_admin:%s.list" % self.action_url_name_prefix
)
]
def get_form(self, form_class=None):
form_class = modelform_factory(
model=self.model,
form=MultiLanguageModelForm,
fields=("name", "status", "tax_class", "module_identifier"),
widgets={"module_identifier": forms.Select},
)
form = form_class(languages=settings.LANGUAGES, **self.get_form_kwargs())
form.fields["module_identifier"].widget.choices = self.model.get_module_choices(
empty_label=(_("Default %s module") % self.model._meta.verbose_name).title()
)
# Add fields from the module, if any...
form.module_option_field_names = []
for field_name, field in self.object.module.option_fields:
form.fields[field_name] = deepcopy(field)
form.module_option_field_names.append(field_name)
if self.object.module_data and field_name in self.object.module_data:
form.initial[field_name] = self.object.module_data[field_name]
return form
def get_success_url(self):
return reverse("shoop_admin:%s.edit" % self.action_url_name_prefix, kwargs={"pk": self.object.pk})
def get_toolbar(self):
return MethodEditToolbar(self)
def save_form(self, form):
self.object = form.save()
if not self.object.module_data:
self.object.module_data = {}
for field_name in form.module_option_field_names:
if field_name in form.cleaned_data:
self.object.module_data[field_name] = form.cleaned_data[field_name]
self.object.save()
class ShippingMethodEditView(_BaseMethodEditView):
model = ShippingMethod
action_url_name_prefix = "method.shipping"
class PaymentMethodEditView(_BaseMethodEditView):
model = PaymentMethod
action_url_name_prefix = "method.payment"
| lawzou/shoop | shoop/admin/modules/methods/views/edit.py | Python | agpl-3.0 | 4,422 |
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BlockStructureConfiguration',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('num_versions_to_keep', models.IntegerField(default=5, null=True, blank=True)),
('cache_timeout_in_seconds', models.IntegerField(default=86400, null=True, blank=True)),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'db_table': 'block_structure_config',
},
),
]
| edx/edx-platform | openedx/core/djangoapps/content/block_structure/migrations/0001_config.py | Python | agpl-3.0 | 1,174 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""fix description field in connection to be text
Revision ID: 64a7d6477aae
Revises: f5b5ec089444
Create Date: 2020-11-25 08:56:11.866607
"""
import sqlalchemy as sa # noqa
from alembic import op # noqa
# revision identifiers, used by Alembic.
revision = '64a7d6477aae'
down_revision = '61ec73d9401f'
branch_labels = None
depends_on = None
def upgrade():
"""Apply fix description field in connection to be text"""
conn = op.get_bind() # pylint: disable=no-member
if conn.dialect.name == "sqlite":
# in sqlite TEXT and STRING column types are the same
return
if conn.dialect.name == "mysql":
op.alter_column(
'connection',
'description',
existing_type=sa.String(length=5000),
type_=sa.Text(length=5000),
existing_nullable=True,
)
else:
# postgres does not allow size modifier for text type
op.alter_column('connection', 'description', existing_type=sa.String(length=5000), type_=sa.Text())
def downgrade():
"""Unapply fix description field in connection to be text"""
conn = op.get_bind() # pylint: disable=no-member
if conn.dialect.name == "sqlite":
# in sqlite TEXT and STRING column types are the same
return
if conn.dialect.name == "mysql":
op.alter_column(
'connection',
'description',
existing_type=sa.Text(5000),
type_=sa.String(length=5000),
existing_nullable=True,
)
else:
# postgres does not allow size modifier for text type
op.alter_column(
'connection',
'description',
existing_type=sa.Text(),
type_=sa.String(length=5000),
existing_nullable=True,
)
| nathanielvarona/airflow | airflow/migrations/versions/64a7d6477aae_fix_description_field_in_connection_to_.py | Python | apache-2.0 | 2,586 |
# Copyright 2017 AT&T Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.identity.v3 import endpoint_groups_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestEndPointGroupsClient(base.BaseServiceTest):
FAKE_CREATE_ENDPOINT_GROUP = {
"endpoint_group": {
"id": 1,
"name": "FAKE_ENDPOINT_GROUP",
"description": "FAKE SERVICE ENDPOINT GROUP",
"filters": {
"service_id": 1
}
}
}
FAKE_ENDPOINT_GROUP_INFO = {
"endpoint_group": {
"id": 1,
"name": "FAKE_ENDPOINT_GROUP",
"description": "FAKE SERVICE ENDPOINT GROUP",
"links": {
"self": "http://example.com/identity/v3/OS-EP-FILTER/" +
"endpoint_groups/1"
},
"filters": {
"service_id": 1
}
}
}
FAKE_LIST_ENDPOINT_GROUPS = {
"endpoint_groups": [
{
"id": 1,
"name": "SERVICE_GROUP1",
"description": "FAKE SERVICE ENDPOINT GROUP",
"links": {
"self": "http://example.com/identity/v3/OS-EP-FILTER/" +
"endpoint_groups/1"
},
"filters": {
"service_id": 1
}
},
{
"id": 2,
"name": "SERVICE_GROUP2",
"description": "FAKE SERVICE ENDPOINT GROUP",
"links": {
"self": "http://example.com/identity/v3/OS-EP-FILTER/" +
"endpoint_groups/2"
},
"filters": {
"service_id": 2
}
}
]
}
def setUp(self):
super(TestEndPointGroupsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = endpoint_groups_client.EndPointGroupsClient(
fake_auth, 'identity', 'regionOne')
def _test_create_endpoint_group(self, bytes_body=False):
self.check_service_client_function(
self.client.create_endpoint_group,
'tempest.lib.common.rest_client.RestClient.post',
self.FAKE_CREATE_ENDPOINT_GROUP,
bytes_body,
status=201,
name="FAKE_ENDPOINT_GROUP",
filters={'service_id': "1"})
def _test_show_endpoint_group(self, bytes_body=False):
self.check_service_client_function(
self.client.show_endpoint_group,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_ENDPOINT_GROUP_INFO,
bytes_body,
endpoint_group_id="1")
def _test_check_endpoint_group(self, bytes_body=False):
self.check_service_client_function(
self.client.check_endpoint_group,
'tempest.lib.common.rest_client.RestClient.head',
{},
bytes_body,
status=200,
endpoint_group_id="1")
def _test_update_endpoint_group(self, bytes_body=False):
self.check_service_client_function(
self.client.update_endpoint_group,
'tempest.lib.common.rest_client.RestClient.patch',
self.FAKE_ENDPOINT_GROUP_INFO,
bytes_body,
endpoint_group_id="1",
name="NewName")
def _test_list_endpoint_groups(self, bytes_body=False):
self.check_service_client_function(
self.client.list_endpoint_groups,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_ENDPOINT_GROUPS,
bytes_body)
def test_create_endpoint_group_with_str_body(self):
self._test_create_endpoint_group()
def test_create_endpoint_group_with_bytes_body(self):
self._test_create_endpoint_group(bytes_body=True)
def test_show_endpoint_group_with_str_body(self):
self._test_show_endpoint_group()
def test_show_endpoint_group_with_bytes_body(self):
self._test_show_endpoint_group(bytes_body=True)
def test_check_endpoint_group_with_str_body(self):
self._test_check_endpoint_group()
def test_check_endpoint_group_with_bytes_body(self):
self._test_check_endpoint_group(bytes_body=True)
def test_list_endpoint_groups_with_str_body(self):
self._test_list_endpoint_groups()
def test_list_endpoint_groups_with_bytes_body(self):
self._test_list_endpoint_groups(bytes_body=True)
def test_update_endpoint_group_with_str_body(self):
self._test_update_endpoint_group()
def test_update_endpoint_group_with_bytes_body(self):
self._test_update_endpoint_group(bytes_body=True)
def test_delete_endpoint_group(self):
self.check_service_client_function(
self.client.delete_endpoint_group,
'tempest.lib.common.rest_client.RestClient.delete',
{},
endpoint_group_id="1",
status=204)
| vedujoshi/tempest | tempest/tests/lib/services/identity/v3/test_endpoint_groups_client.py | Python | apache-2.0 | 5,681 |
# Copyright 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from neutron._i18n import _
allowed_address_pair_opts = [
#TODO(limao): use quota framework when it support quota for attributes
cfg.IntOpt('max_allowed_address_pair', default=10,
help=_("Maximum number of allowed address pairs")),
]
def register_allowed_address_pair_opts(cfg=cfg.CONF):
cfg.register_opts(allowed_address_pair_opts)
| eayunstack/neutron | neutron/conf/extensions/allowedaddresspairs.py | Python | apache-2.0 | 1,020 |
## @package get_python_cmake_flags
# Module scripts.get_python_cmake_flags
##############################################################################
# Use this script to find your preferred python installation.
##############################################################################
#
# You can use the following to build with your preferred version of python
# if your installation is not being properly detected by CMake.
#
# mkdir -p build && cd build
# cmake $(python ../scripts/get_python_libs.py) ..
# make
#
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils import sysconfig
import os
import sys
import platform
# Flags to print to stdout
flags = ''
inc = sysconfig.get_python_inc()
lib = sysconfig.get_config_var("LIBDIR")
# macOS specific
if sys.platform == "darwin":
lib = os.path.dirname(lib) + '/Python'
if os.path.isfile(lib):
flags += '-DPYTHON_LIBRARY={lib} '.format(lib=lib)
if os.path.isfile(inc + '/Python.h'):
flags += '-DPYTHON_INCLUDE_DIR={inc} '.format(inc=inc)
print(flags, end='')
| Yangqing/caffe2 | scripts/get_python_cmake_flags.py | Python | apache-2.0 | 1,130 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that uses Google AutoML services.
"""
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from airflow.providers.google.cloud.operators.automl import (
AutoMLCreateDatasetOperator,
AutoMLDeleteDatasetOperator,
AutoMLDeleteModelOperator,
AutoMLImportDataOperator,
AutoMLTrainModelOperator,
)
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "your-project-id")
GCP_AUTOML_LOCATION = os.environ.get("GCP_AUTOML_LOCATION", "us-central1")
GCP_AUTOML_SENTIMENT_BUCKET = os.environ.get("GCP_AUTOML_SENTIMENT_BUCKET", "gs://INVALID BUCKET NAME")
# Example values
DATASET_ID = ""
# Example model
MODEL = {
"display_name": "auto_model_1",
"dataset_id": DATASET_ID,
"text_sentiment_model_metadata": {},
}
# Example dataset
DATASET = {
"display_name": "test_text_sentiment_dataset",
"text_sentiment_dataset_metadata": {"sentiment_max": 10},
}
IMPORT_INPUT_CONFIG = {"gcs_source": {"input_uris": [GCP_AUTOML_SENTIMENT_BUCKET]}}
extract_object_id = CloudAutoMLHook.extract_object_id
# Example DAG for AutoML Natural Language Text Sentiment
with models.DAG(
"example_automl_text_sentiment",
schedule_interval=None, # Override to match your needs
start_date=datetime(2021, 1, 1),
catchup=False,
user_defined_macros={"extract_object_id": extract_object_id},
tags=['example'],
) as example_dag:
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task", dataset=DATASET, location=GCP_AUTOML_LOCATION
)
dataset_id = create_dataset_task.output['dataset_id']
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
MODEL["dataset_id"] = dataset_id
create_model = AutoMLTrainModelOperator(task_id="create_model", model=MODEL, location=GCP_AUTOML_LOCATION)
model_id = create_model.output['model_id']
delete_model_task = AutoMLDeleteModelOperator(
task_id="delete_model_task",
model_id=model_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
import_dataset_task >> create_model
delete_model_task >> delete_datasets_task
# Task dependencies created via `XComArgs`:
# create_dataset_task >> import_dataset_task
# create_dataset_task >> create_model
# create_model >> delete_model_task
# create_dataset_task >> delete_datasets_task
| Acehaidrey/incubator-airflow | airflow/providers/google/cloud/example_dags/example_automl_nl_text_sentiment.py | Python | apache-2.0 | 3,589 |
# -*- coding: utf-8 -*-
###############################################################################
#
# InitializeOAuth
# Generates an authorization URL that an application can use to complete the first step in the OAuth process.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class InitializeOAuth(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the InitializeOAuth Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(InitializeOAuth, self).__init__(temboo_session, '/Library/Bitly/OAuth/InitializeOAuth')
def new_input_set(self):
return InitializeOAuthInputSet()
def _make_result_set(self, result, path):
return InitializeOAuthResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return InitializeOAuthChoreographyExecution(session, exec_id, path)
class InitializeOAuthInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the InitializeOAuth
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountName(self, value):
"""
Set the value of the AccountName input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(InitializeOAuthInputSet, self)._set_input('AccountName', value)
def set_AppKeyName(self, value):
"""
Set the value of the AppKeyName input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(InitializeOAuthInputSet, self)._set_input('AppKeyName', value)
def set_AppKeyValue(self, value):
"""
Set the value of the AppKeyValue input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(InitializeOAuthInputSet, self)._set_input('AppKeyValue', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((required, string) The Client ID provided by Bitly after registering your application.)
"""
super(InitializeOAuthInputSet, self)._set_input('ClientID', value)
def set_CustomCallbackID(self, value):
"""
Set the value of the CustomCallbackID input for this Choreo. ((optional, string) A unique identifier that you can pass to eliminate the need to wait for a Temboo generated CallbackID. Callback identifiers may only contain numbers, letters, periods, and hyphens.)
"""
super(InitializeOAuthInputSet, self)._set_input('CustomCallbackID', value)
def set_ForwardingURL(self, value):
"""
Set the value of the ForwardingURL input for this Choreo. ((optional, string) The URL that Temboo will redirect your users to after they grant access to your application. This should include the "https://" or "http://" prefix and be a fully qualified URL.)
"""
super(InitializeOAuthInputSet, self)._set_input('ForwardingURL', value)
class InitializeOAuthResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the InitializeOAuth Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_AuthorizationURL(self):
"""
Retrieve the value for the "AuthorizationURL" output from this Choreo execution. ((string) The authorization URL that the application's user needs to go to in order to grant access to your application.)
"""
return self._output.get('AuthorizationURL', None)
def get_CallbackID(self):
"""
Retrieve the value for the "CallbackID" output from this Choreo execution. ((string) An ID used to retrieve the callback data that Temboo stores once your application's user authorizes.)
"""
return self._output.get('CallbackID', None)
class InitializeOAuthChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return InitializeOAuthResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/Bitly/OAuth/InitializeOAuth.py | Python | apache-2.0 | 5,111 |
"""Tests for tensorflow.ops.gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import tensorflow.python.platform
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import types
# pylint: disable=unused-import
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import data_flow_grad
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_grad
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad
from tensorflow.python.ops import state_grad
# pylint: enable=unused-import
from tensorflow.python.ops.constant_op import constant
from tensorflow.python.ops.nn_ops import bias_add
from tensorflow.python.platform import googletest
def _OpsBetween(graph, to_ops, from_ops):
"""Build the list of operations between two lists of Operations.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
Returns:
The list of operations between "from_ops" and "to_ops", sorted by
decreasing operation id. This list contains all elements of to_ops.
TODO(touts): Think about returning an empty list if from_ops are not
reachable from to_ops. Presently it returns to_ops in that case.
"""
# List of booleans, indexed by operation id, indicating if
# an op is reached from the output of "input_ops".
reached_ops = [False] * (graph._last_id + 1)
# We only care to reach up to "output_ops" so we mark the
# output ops as reached to avoid recursing past them.
for op in to_ops:
reached_ops[op._id] = True
gradients._MarkReachedOps(from_ops, reached_ops)
between_ops = gradients._GatherInputs(to_ops, reached_ops)
between_ops.sort(lambda x, y: y._id - x._id)
return between_ops
class GradientsTest(test_util.TensorFlowTestCase):
def _OpNames(self, op_list):
return ["%s/%d" % (str(op.name), op._id) for op in op_list]
def _assertOpListEqual(self, ops1, ops2):
self.assertEquals(self._OpNames(ops1), self._OpNames(ops2))
def testOpsBetweenSimple(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.pack([t1, t2])
# Full graph
self._assertOpListEqual([t3.op, t2.op, t1.op],
_OpsBetween(g, [t3.op], [t1.op, t2.op]))
# Only t1, t3.
self._assertOpListEqual([t3.op, t1.op],
_OpsBetween(g, [t3.op], [t1.op]))
def testOpsBetweenUnreachable(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
_ = array_ops.pack([t1, t2])
t4 = constant(1.0)
t5 = constant(2.0)
t6 = array_ops.pack([t4, t5])
# Elements of to_ops are always listed.
self._assertOpListEqual([t6.op], _OpsBetween(g, [t6.op], [t1.op]))
def testOpsBetweenCut(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.pack([t1, t2])
t4 = constant([1.0])
t5 = array_ops.concat(0, [t4, t3])
t6 = constant([2.0])
t7 = array_ops.concat(0, [t5, t6])
self._assertOpListEqual([t7.op, t5.op, t4.op],
_OpsBetween(g, [t7.op], [t4.op]))
def testOpsBetweenCycle(self):
with ops.Graph().as_default() as g:
t1 = constant(1.0)
t2 = constant(2.0)
t3 = array_ops.pack([t1, t2])
t4 = array_ops.concat(0, [t3, t3, t3])
t5 = constant([1.0])
t6 = array_ops.concat(0, [t4, t5])
t7 = array_ops.concat(0, [t6, t3])
self._assertOpListEqual([t6.op, t4.op, t3.op],
_OpsBetween(g, [t6.op], [t3.op]))
self._assertOpListEqual([t7.op, t6.op, t5.op, t4.op, t3.op, t1.op],
_OpsBetween(g, [t7.op], [t1.op, t5.op]))
self._assertOpListEqual([t6.op, t5.op, t4.op, t3.op, t2.op],
_OpsBetween(g, [t6.op], [t2.op, t5.op]))
def testGradients(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[32, 100], name="in")
w = constant(1.0, shape=[100, 10], name="w")
b = constant(1.0, shape=[10], name="b")
xw = math_ops.matmul(inp, w, name="xw")
h = bias_add(xw, b, name="h")
w_grad = gradients.gradients(h, w)[0]
self.assertEquals("MatMul", w_grad.op.type)
self.assertEquals(w_grad.op._original_op, xw.op)
self.assertTrue(w_grad.op.get_attr("transpose_a"))
self.assertFalse(w_grad.op.get_attr("transpose_b"))
def testUnusedOutput(self):
with ops.Graph().as_default():
w = constant(1.0, shape=[2, 2])
x = constant(1.0, shape=[2, 2])
wx = math_ops.matmul(w, x)
split_wx = array_ops.split(0, 2, wx)
c = math_ops.reduce_sum(split_wx[1])
gw = gradients.gradients(c, [w])[0]
self.assertEquals("MatMul", gw.op.type)
def testColocateGradients(self):
with ops.Graph().as_default() as g:
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
with g.device("/gpu:0"):
wx = math_ops.matmul(w, x)
gw = gradients.gradients(wx, [w], colocate_gradients_with_ops=True)[0]
self.assertEquals("/gpu:0", gw.device)
def testColocateGradientsWithAggregation(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
wx = math_ops.matmul(w, x)
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEquals("/gpu:1", gw1.device)
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertEquals(None, gw2.device)
def testBoundaryStop(self):
# Test that we don't differentiate 'x'. The gradient function for 'x' is
# set explicitly to None so we will get an exception if the gradient code
# tries to differentiate 'x'.
with ops.Graph().as_default() as g:
c = constant(1.0)
x = array_ops.identity(c)
y = x + 1.0
z = y + 1
grads = gradients.gradients(z, [x])
self.assertTrue(all([x for x in grads]))
def testBoundaryContinue(self):
# Test that we differentiate both 'x' and 'y' correctly when x is a
# predecessor of y.
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y * 3.0
grads = gradients.gradients(z, [x, y])
self.assertTrue(all([x for x in grads]))
self.assertEqual(6.0, grads[0].eval())
def testAggregationMethodAccumulateN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z,
[x, y],
aggregation_method=
gradients.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
self.assertTrue(all([x for x in grads]))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodAddN(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z,
[x, y],
aggregation_method=gradients.AggregationMethod.ADD_N)
self.assertTrue(all([x for x in grads]))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testAggregationMethodTree(self):
with self.test_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z,
[x, y],
aggregation_method=gradients.AggregationMethod.EXPERIMENTAL_TREE)
self.assertTrue(all([x for x in grads]))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testNoGradientForStringOutputs(self):
with ops.Graph().as_default() as g:
@ops.RegisterGradient("TestOp")
def _TestOpGrad(op, float_grad, string_grad):
"""Gradient function for TestOp."""
self.assertEquals(float_grad.dtype, types.float32)
self.assertFalse(string_grad)
return float_grad
ops.RegisterShape("TestOp")(None)
c = constant(1.0)
x, y = g.create_op("TestOp", [c], [types.float32, types.string]).outputs
z = x * 2.0
w = z * 3.0
grads = gradients.gradients(z, [c])
self.assertTrue(isinstance(grads[0], ops.Tensor))
class StopGradientTest(test_util.TensorFlowTestCase):
def testStopGradient(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[100, 32], name="in")
out = array_ops.stop_gradient(inp)
igrad = gradients.gradients(out, inp)[0]
assert igrad is None
class HessianVectorProductTest(test_util.TensorFlowTestCase):
def testHessianVectorProduct(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that HessianVectorProduct matches multiplication by the
# explicit Hessian.
# Specifically, the Hessian of f(x) = x^T A x is
# H = A + A^T.
# We expect HessianVectorProduct(f(x), x, v) to be H v.
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
v_value = rng.randn(m, 1).astype("float32")
x_value = rng.randn(m, 1).astype("float32")
hess_value = mat_value + mat_value.T
hess_v_value = np.dot(hess_value, v_value)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
mat = constant_op.constant(mat_value)
v = constant_op.constant(v_value)
x = constant_op.constant(x_value)
mat_x = math_ops.matmul(mat, x, name="Ax")
x_mat_x = math_ops.matmul(array_ops.transpose(x), mat_x, name="xAx")
hess_v = gradients._hessian_vector_product(x_mat_x, [x], [v])[0]
hess_v_actual = hess_v.eval()
self.assertAllClose(hess_v_value, hess_v_actual)
class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase):
def testIndexedSlicesToTensor(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.mul(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testInt64Indices(self):
with self.test_session():
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
c_sparse = ops.IndexedSlices(
c_sparse.values, math_ops.cast(c_sparse.indices, types.int64),
c_sparse.dense_shape)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
c_dense = math_ops.mul(c_sparse, 1.0)
self.assertAllClose(np_val, c_dense.eval())
def testWarnings(self):
# Smaller than the threshold: no warning.
c_sparse = ops.IndexedSlices(array_ops.placeholder(types.float32),
array_ops.placeholder(types.int32),
constant([4, 4, 4, 4]))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(0, len(w))
# Greater than or equal to the threshold: warning.
c_sparse = ops.IndexedSlices(array_ops.placeholder(types.float32),
array_ops.placeholder(types.int32),
constant([100, 100, 100, 100]))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"with 100000000 elements. This may consume a large amount of memory."
in str(w[0].message))
# Unknown dense shape: warning.
c_sparse = ops.IndexedSlices(array_ops.placeholder(types.float32),
array_ops.placeholder(types.int32),
array_ops.placeholder(types.int32))
with warnings.catch_warnings(record=True) as w:
math_ops.mul(c_sparse, 1.0)
self.assertEqual(1, len(w))
self.assertTrue(
"of unknown shape. This may consume a large amount of memory."
in str(w[0].message))
if __name__ == "__main__":
googletest.main()
| arunhotra/tensorflow | tensorflow/python/ops/gradients_test.py | Python | apache-2.0 | 12,673 |
"""Test for the smhi weather entity."""
import asyncio
from datetime import datetime
import logging
from unittest.mock import AsyncMock, Mock, patch
from smhi.smhi_lib import APIURL_TEMPLATE, SmhiForecastException
from homeassistant.components.smhi import weather as weather_smhi
from homeassistant.components.smhi.const import ATTR_SMHI_CLOUDINESS
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_WEATHER_ATTRIBUTION,
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE,
ATTR_WEATHER_VISIBILITY,
ATTR_WEATHER_WIND_BEARING,
ATTR_WEATHER_WIND_SPEED,
DOMAIN as WEATHER_DOMAIN,
)
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry, load_fixture
_LOGGER = logging.getLogger(__name__)
TEST_CONFIG = {"name": "test", "longitude": "17.84197", "latitude": "59.32624"}
async def test_setup_hass(hass: HomeAssistant, aioclient_mock) -> None:
"""Test for successfully setting up the smhi platform.
This test are deeper integrated with the core. Since only
config_flow is used the component are setup with
"async_forward_entry_setup". The actual result are tested
with the entity state rather than "per function" unity tests
"""
uri = APIURL_TEMPLATE.format(TEST_CONFIG["longitude"], TEST_CONFIG["latitude"])
api_response = load_fixture("smhi.json")
aioclient_mock.get(uri, text=api_response)
entry = MockConfigEntry(domain="smhi", data=TEST_CONFIG)
await hass.config_entries.async_forward_entry_setup(entry, WEATHER_DOMAIN)
await hass.async_block_till_done()
assert aioclient_mock.call_count == 1
# Testing the actual entity state for
# deeper testing than normal unity test
state = hass.states.get("weather.smhi_test")
assert state.state == "sunny"
assert state.attributes[ATTR_SMHI_CLOUDINESS] == 50
assert state.attributes[ATTR_WEATHER_ATTRIBUTION].find("SMHI") >= 0
assert state.attributes[ATTR_WEATHER_HUMIDITY] == 55
assert state.attributes[ATTR_WEATHER_PRESSURE] == 1024
assert state.attributes[ATTR_WEATHER_TEMPERATURE] == 17
assert state.attributes[ATTR_WEATHER_VISIBILITY] == 50
assert state.attributes[ATTR_WEATHER_WIND_SPEED] == 7
assert state.attributes[ATTR_WEATHER_WIND_BEARING] == 134
_LOGGER.error(state.attributes)
assert len(state.attributes["forecast"]) == 4
forecast = state.attributes["forecast"][1]
assert forecast[ATTR_FORECAST_TIME] == "2018-09-02T12:00:00"
assert forecast[ATTR_FORECAST_TEMP] == 21
assert forecast[ATTR_FORECAST_TEMP_LOW] == 6
assert forecast[ATTR_FORECAST_PRECIPITATION] == 0
assert forecast[ATTR_FORECAST_CONDITION] == "partlycloudy"
def test_properties_no_data(hass: HomeAssistant) -> None:
"""Test properties when no API data available."""
weather = weather_smhi.SmhiWeather("name", "10", "10")
weather.hass = hass
assert weather.name == "name"
assert weather.should_poll is True
assert weather.temperature is None
assert weather.humidity is None
assert weather.wind_speed is None
assert weather.wind_bearing is None
assert weather.visibility is None
assert weather.pressure is None
assert weather.cloudiness is None
assert weather.condition is None
assert weather.forecast is None
assert weather.temperature_unit == TEMP_CELSIUS
# pylint: disable=protected-access
def test_properties_unknown_symbol() -> None:
"""Test behaviour when unknown symbol from API."""
hass = Mock()
data = Mock()
data.temperature = 5
data.mean_precipitation = 0.5
data.total_precipitation = 1
data.humidity = 5
data.wind_speed = 10
data.wind_direction = 180
data.horizontal_visibility = 6
data.pressure = 1008
data.cloudiness = 52
data.symbol = 100 # Faulty symbol
data.valid_time = datetime(2018, 1, 1, 0, 1, 2)
data2 = Mock()
data2.temperature = 5
data2.mean_precipitation = 0.5
data2.total_precipitation = 1
data2.humidity = 5
data2.wind_speed = 10
data2.wind_direction = 180
data2.horizontal_visibility = 6
data2.pressure = 1008
data2.cloudiness = 52
data2.symbol = 100 # Faulty symbol
data2.valid_time = datetime(2018, 1, 1, 12, 1, 2)
data3 = Mock()
data3.temperature = 5
data3.mean_precipitation = 0.5
data3.total_precipitation = 1
data3.humidity = 5
data3.wind_speed = 10
data3.wind_direction = 180
data3.horizontal_visibility = 6
data3.pressure = 1008
data3.cloudiness = 52
data3.symbol = 100 # Faulty symbol
data3.valid_time = datetime(2018, 1, 2, 12, 1, 2)
testdata = [data, data2, data3]
weather = weather_smhi.SmhiWeather("name", "10", "10")
weather.hass = hass
weather._forecasts = testdata
assert weather.condition is None
forecast = weather.forecast[0]
assert forecast[ATTR_FORECAST_CONDITION] is None
# pylint: disable=protected-access
async def test_refresh_weather_forecast_exceeds_retries(hass) -> None:
"""Test the refresh weather forecast function."""
with patch.object(
hass.helpers.event, "async_call_later"
) as call_later, patch.object(
weather_smhi.SmhiWeather,
"get_weather_forecast",
side_effect=SmhiForecastException(),
):
weather = weather_smhi.SmhiWeather("name", "17.0022", "62.0022")
weather.hass = hass
weather._fail_count = 2
await weather.async_update()
assert weather._forecasts is None
assert not call_later.mock_calls
async def test_refresh_weather_forecast_timeout(hass) -> None:
"""Test timeout exception."""
weather = weather_smhi.SmhiWeather("name", "17.0022", "62.0022")
weather.hass = hass
with patch.object(
hass.helpers.event, "async_call_later"
) as call_later, patch.object(
weather_smhi.SmhiWeather, "retry_update"
), patch.object(
weather_smhi.SmhiWeather,
"get_weather_forecast",
side_effect=asyncio.TimeoutError,
):
await weather.async_update()
assert len(call_later.mock_calls) == 1
# Assert we are going to wait RETRY_TIMEOUT seconds
assert call_later.mock_calls[0][1][0] == weather_smhi.RETRY_TIMEOUT
async def test_refresh_weather_forecast_exception() -> None:
"""Test any exception."""
hass = Mock()
weather = weather_smhi.SmhiWeather("name", "17.0022", "62.0022")
weather.hass = hass
with patch.object(
hass.helpers.event, "async_call_later"
) as call_later, patch.object(
weather,
"get_weather_forecast",
side_effect=SmhiForecastException(),
):
await weather.async_update()
assert len(call_later.mock_calls) == 1
# Assert we are going to wait RETRY_TIMEOUT seconds
assert call_later.mock_calls[0][1][0] == weather_smhi.RETRY_TIMEOUT
async def test_retry_update():
"""Test retry function of refresh forecast."""
hass = Mock()
weather = weather_smhi.SmhiWeather("name", "17.0022", "62.0022")
weather.hass = hass
with patch.object(weather, "async_update", AsyncMock()) as update:
await weather.retry_update(None)
assert len(update.mock_calls) == 1
def test_condition_class():
"""Test condition class."""
def get_condition(index: int) -> str:
"""Return condition given index."""
return [k for k, v in weather_smhi.CONDITION_CLASSES.items() if index in v][0]
# SMHI definitions as follows, see
# http://opendata.smhi.se/apidocs/metfcst/parameters.html
# 1. Clear sky
assert get_condition(1) == "sunny"
# 2. Nearly clear sky
assert get_condition(2) == "sunny"
# 3. Variable cloudiness
assert get_condition(3) == "partlycloudy"
# 4. Halfclear sky
assert get_condition(4) == "partlycloudy"
# 5. Cloudy sky
assert get_condition(5) == "cloudy"
# 6. Overcast
assert get_condition(6) == "cloudy"
# 7. Fog
assert get_condition(7) == "fog"
# 8. Light rain showers
assert get_condition(8) == "rainy"
# 9. Moderate rain showers
assert get_condition(9) == "rainy"
# 18. Light rain
assert get_condition(18) == "rainy"
# 19. Moderate rain
assert get_condition(19) == "rainy"
# 10. Heavy rain showers
assert get_condition(10) == "pouring"
# 20. Heavy rain
assert get_condition(20) == "pouring"
# 21. Thunder
assert get_condition(21) == "lightning"
# 11. Thunderstorm
assert get_condition(11) == "lightning-rainy"
# 15. Light snow showers
assert get_condition(15) == "snowy"
# 16. Moderate snow showers
assert get_condition(16) == "snowy"
# 17. Heavy snow showers
assert get_condition(17) == "snowy"
# 25. Light snowfall
assert get_condition(25) == "snowy"
# 26. Moderate snowfall
assert get_condition(26) == "snowy"
# 27. Heavy snowfall
assert get_condition(27) == "snowy"
# 12. Light sleet showers
assert get_condition(12) == "snowy-rainy"
# 13. Moderate sleet showers
assert get_condition(13) == "snowy-rainy"
# 14. Heavy sleet showers
assert get_condition(14) == "snowy-rainy"
# 22. Light sleet
assert get_condition(22) == "snowy-rainy"
# 23. Moderate sleet
assert get_condition(23) == "snowy-rainy"
# 24. Heavy sleet
assert get_condition(24) == "snowy-rainy"
| partofthething/home-assistant | tests/components/smhi/test_weather.py | Python | apache-2.0 | 9,580 |
# Copyright 2009-2015 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from requestbuilder import Arg
from euca2ools.commands.iam import IAMRequest, AS_ACCOUNT, arg_user
class CreateSigningCertificate(IAMRequest):
DESCRIPTION = '[Eucalyptus only] Create a new signing certificate'
ARGS = [arg_user(nargs='?', help='''user to create the signing
certificate for (default: current user)'''),
Arg('--out', metavar='FILE', route_to=None,
help='file to write the certificate to (default: stdout)'),
Arg('--keyout', metavar='FILE', route_to=None,
help='file to write the private key to (default: stdout)'),
AS_ACCOUNT]
def postprocess(self, result):
if self.args['out']:
with open(self.args['out'], 'w') as certfile:
certfile.write(result['Certificate']['CertificateBody'])
if self.args['keyout']:
old_umask = os.umask(0o077)
with open(self.args['keyout'], 'w') as keyfile:
keyfile.write(result['Certificate']['PrivateKey'])
os.umask(old_umask)
def print_result(self, result):
print result['Certificate']['CertificateId']
if not self.args['out']:
print result['Certificate']['CertificateBody']
if not self.args['keyout']:
print result['Certificate']['PrivateKey']
| jhajek/euca2ools | euca2ools/commands/iam/createsigningcertificate.py | Python | bsd-2-clause | 2,698 |
# Copyright 2013-2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from requestbuilder import Arg
from euca2ools.commands.ec2 import EC2Request
class DeleteNetworkAclEntry(EC2Request):
DESCRIPTION = 'Delete a network acl rule'
ARGS = [Arg('NetworkAclId', metavar='NACL', help='''ID of the
network ACL to delete an entry from (required)'''),
Arg('-n', '--rule-number', dest='RuleNumber', required=True,
type=int, help='number of the entry to delete (required)'),
Arg('--egress', dest='Egress', action='store_true', help='''delete
an egress entry (default: delete an ingress entry)''')]
| vasiliykochergin/euca2ools | euca2ools/commands/ec2/deletenetworkaclentry.py | Python | bsd-2-clause | 1,948 |
from __future__ import unicode_literals
import json
from django.test import TestCase, override_settings
from django.utils.http import urlquote
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Permission
from django.core.files.uploadedfile import SimpleUploadedFile
from django.template.defaultfilters import filesizeformat
# Get the chars that Django considers safe to leave unescaped in a URL
# This list changed in Django 1.8: https://github.com/django/django/commit/e167e96cfea670422ca75d0b35fe7c4195f25b63
try:
from django.utils.http import RFC3986_SUBDELIMS
urlquote_safechars = RFC3986_SUBDELIMS + str('/~:@')
except ImportError: # < Django 1,8
urlquote_safechars = '/'
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailimages.utils import generate_signature
from .utils import Image, get_test_image_file
class TestImageIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/index.html')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
def test_ordering(self):
orderings = ['title', '-created_at']
for ordering in orderings:
response = self.get({'ordering': ordering})
self.assertEqual(response.status_code, 200)
class TestImageAddView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
def test_add(self):
response = self.post({
'title': "Test image",
'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image was created
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 1)
# Test that size was populated correctly
image = images.first()
self.assertEqual(image.width, 640)
self.assertEqual(image.height, 480)
# Test that the file_size field was set
self.assertTrue(image.file_size)
def test_add_no_file_selected(self):
response = self.post({
'title': "Test image",
})
# Shouldn't redirect anywhere
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
# The form should have an error
self.assertFormError(response, 'form', 'file', "This field is required.")
@override_settings(WAGTAILIMAGES_MAX_UPLOAD_SIZE=1)
def test_add_too_large_file(self):
file_content = get_test_image_file().file.getvalue()
response = self.post({
'title': "Test image",
'file': SimpleUploadedFile('test.png', file_content),
})
# Shouldn't redirect anywhere
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/add.html')
# The form should have an error
self.assertFormError(response, 'form', 'file', "This file is too big ({file_size}). Maximum filesize {max_file_size}.".format(
file_size=filesizeformat(len(file_content)),
max_file_size=filesizeformat(1),
))
class TestImageEditView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages:edit', args=(self.image.id,)), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages:edit', args=(self.image.id,)), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
def test_edit(self):
response = self.post({
'title': "Edited",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image was edited
image = Image.objects.get(id=self.image.id)
self.assertEqual(image.title, "Edited")
def test_edit_with_new_image_file(self):
file_content = get_test_image_file().file.getvalue()
# Change the file size of the image
self.image.file_size = 100000
self.image.save()
response = self.post({
'title': "Edited",
'file': SimpleUploadedFile('new.png', file_content),
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image file size changed (assume it changed to the correct value)
image = Image.objects.get(id=self.image.id)
self.assertNotEqual(image.file_size, 100000)
def test_with_missing_image_file(self):
self.image.file.delete(False)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/edit.html')
class TestImageDeleteView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages:delete', args=(self.image.id,)), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailimages:delete', args=(self.image.id,)), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/confirm_delete.html')
def test_delete(self):
response = self.post({
'hello': 'world'
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailimages:index'))
# Check that the image was deleted
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 0)
class TestImageChooserView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages:chooser'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.js')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestImageChooserChosenView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
# Create an image to edit
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def get(self, params={}):
return self.client.get(reverse('wagtailimages:image_chosen', args=(self.image.id,)), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/image_chosen.js')
# TODO: Test posting
class TestImageChooserUploadView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailimages:chooser_upload'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.js')
def test_upload(self):
response = self.client.post(reverse('wagtailimages:chooser_upload'), {
'title': "Test image",
'file': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
})
# Check response
self.assertEqual(response.status_code, 200)
# Check that the image was created
images = Image.objects.filter(title="Test image")
self.assertEqual(images.count(), 1)
# Test that size was populated correctly
image = images.first()
self.assertEqual(image.width, 640)
self.assertEqual(image.height, 480)
def test_upload_no_file_selected(self):
response = self.client.post(reverse('wagtailimages:chooser_upload'), {
'title': "Test image",
})
# Shouldn't redirect anywhere
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/chooser/chooser.html')
# The form should have an error
self.assertFormError(response, 'uploadform', 'file', "This field is required.")
class TestMultipleImageUploader(TestCase, WagtailTestUtils):
"""
This tests the multiple image upload views located in wagtailimages/views/multiple.py
"""
def setUp(self):
self.login()
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
def test_add(self):
"""
This tests that the add view responds correctly on a GET request
"""
# Send request
response = self.client.get(reverse('wagtailimages:add_multiple'))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/multiple/add.html')
@override_settings(WAGTAILIMAGES_MAX_UPLOAD_SIZE=1000)
def test_add_max_file_size_context_variables(self):
response = self.client.get(reverse('wagtailimages:add_multiple'))
self.assertEqual(response.context['max_filesize'], 1000)
self.assertEqual(response.context['error_max_file_size'], "This file is too big. Maximum filesize 1000\xa0bytes.")
def test_add_post(self):
"""
This tests that a POST request to the add view saves the image and returns an edit form
"""
response = self.client.post(reverse('wagtailimages:add_multiple'), {
'files[]': SimpleUploadedFile('test.png', get_test_image_file().file.getvalue()),
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertTemplateUsed(response, 'wagtailimages/multiple/edit_form.html')
# Check image
self.assertIn('image', response.context)
self.assertEqual(response.context['image'].title, 'test.png')
self.assertTrue(response.context['image'].file_size)
# Check form
self.assertIn('form', response.context)
self.assertEqual(response.context['form'].initial['title'], 'test.png')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], response.context['image'].id)
self.assertTrue(response_json['success'])
def test_add_post_noajax(self):
"""
This tests that only AJAX requests are allowed to POST to the add view
"""
response = self.client.post(reverse('wagtailimages:add_multiple'), {})
# Check response
self.assertEqual(response.status_code, 400)
def test_add_post_nofile(self):
"""
This tests that the add view checks for a file when a user POSTs to it
"""
response = self.client.post(reverse('wagtailimages:add_multiple'), {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 400)
def test_add_post_badfile(self):
"""
This tests that the add view checks for a file when a user POSTs to it
"""
response = self.client.post(reverse('wagtailimages:add_multiple'), {
'files[]': SimpleUploadedFile('test.png', b"This is not an image!"),
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertNotIn('image_id', response_json)
self.assertNotIn('form', response_json)
self.assertIn('success', response_json)
self.assertIn('error_message', response_json)
self.assertFalse(response_json['success'])
self.assertEqual(response_json['error_message'], "Not a supported image format. Supported formats: GIF, JPEG, PNG.")
def test_edit_get(self):
"""
This tests that a GET request to the edit view returns a 405 "METHOD NOT ALLOWED" response
"""
# Send request
response = self.client.get(reverse('wagtailimages:edit_multiple', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 405)
def test_edit_post(self):
"""
This tests that a POST request to the edit view edits the image
"""
# Send request
response = self.client.post(reverse('wagtailimages:edit_multiple', args=(self.image.id, )), {
('image-%d-title' % self.image.id): "New title!",
('image-%d-tags' % self.image.id): "",
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertNotIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], self.image.id)
self.assertTrue(response_json['success'])
def test_edit_post_noajax(self):
"""
This tests that a POST request to the edit view without AJAX returns a 400 response
"""
# Send request
response = self.client.post(reverse('wagtailimages:edit_multiple', args=(self.image.id, )), {
('image-%d-title' % self.image.id): "New title!",
('image-%d-tags' % self.image.id): "",
})
# Check response
self.assertEqual(response.status_code, 400)
def test_edit_post_validation_error(self):
"""
This tests that a POST request to the edit page returns a json document with "success=False"
and a form with the validation error indicated
"""
# Send request
response = self.client.post(reverse('wagtailimages:edit_multiple', args=(self.image.id, )), {
('image-%d-title' % self.image.id): "", # Required
('image-%d-tags' % self.image.id): "",
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertTemplateUsed(response, 'wagtailimages/multiple/edit_form.html')
# Check that a form error was raised
self.assertFormError(response, 'form', 'title', "This field is required.")
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertIn('form', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], self.image.id)
self.assertFalse(response_json['success'])
def test_delete_get(self):
"""
This tests that a GET request to the delete view returns a 405 "METHOD NOT ALLOWED" response
"""
# Send request
response = self.client.get(reverse('wagtailimages:delete_multiple', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 405)
def test_delete_post(self):
"""
This tests that a POST request to the delete view deletes the image
"""
# Send request
response = self.client.post(reverse('wagtailimages:delete_multiple', args=(self.image.id, )), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Make sure the image is deleted
self.assertFalse(Image.objects.filter(id=self.image.id).exists())
# Check JSON
response_json = json.loads(response.content.decode())
self.assertIn('image_id', response_json)
self.assertIn('success', response_json)
self.assertEqual(response_json['image_id'], self.image.id)
self.assertTrue(response_json['success'])
def test_delete_post_noajax(self):
"""
This tests that a POST request to the delete view without AJAX returns a 400 response
"""
# Send request
response = self.client.post(reverse('wagtailimages:delete_multiple', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 400)
class TestURLGeneratorView(TestCase, WagtailTestUtils):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Login
self.user = self.login()
def test_get(self):
"""
This tests that the view responds correctly for a user with edit permissions on this image
"""
# Get
response = self.client.get(reverse('wagtailimages:url_generator', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailimages/images/url_generator.html')
def test_get_bad_permissions(self):
"""
This tests that the view gives a 403 if a user without correct permissions attemts to access it
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get
response = self.client.get(reverse('wagtailimages:url_generator', args=(self.image.id, )))
# Check response
self.assertEqual(response.status_code, 403)
class TestGenerateURLView(TestCase, WagtailTestUtils):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Login
self.user = self.login()
def test_get(self):
"""
This tests that the view responds correctly for a user with edit permissions on this image
"""
# Get
response = self.client.get(reverse('wagtailimages:generate_url', args=(self.image.id, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
content_json = json.loads(response.content.decode())
self.assertEqual(set(content_json.keys()), set(['url', 'preview_url']))
expected_url = 'http://localhost/images/%(signature)s/%(image_id)d/fill-800x600/' % {
'signature': urlquote(generate_signature(self.image.id, 'fill-800x600').decode(), safe=urlquote_safechars),
'image_id': self.image.id,
}
self.assertEqual(content_json['url'], expected_url)
expected_preview_url = reverse('wagtailimages:preview', args=(self.image.id, 'fill-800x600'))
self.assertEqual(content_json['preview_url'], expected_preview_url)
def test_get_bad_permissions(self):
"""
This tests that the view gives a 403 if a user without correct permissions attemts to access it
"""
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get
response = self.client.get(reverse('wagtailimages:generate_url', args=(self.image.id, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 403)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
self.assertJSONEqual(response.content.decode(), json.dumps({
'error': 'You do not have permission to generate a URL for this image.',
}))
def test_get_bad_image(self):
"""
This tests that the view gives a 404 response if a user attempts to use it with an image which doesn't exist
"""
# Get
response = self.client.get(reverse('wagtailimages:generate_url', args=(self.image.id + 1, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 404)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
self.assertJSONEqual(response.content.decode(), json.dumps({
'error': 'Cannot find image.',
}))
def test_get_bad_filter_spec(self):
"""
This tests that the view gives a 400 response if the user attempts to use it with an invalid filter spec
"""
# Get
response = self.client.get(reverse('wagtailimages:generate_url', args=(self.image.id, 'bad-filter-spec')))
# Check response
self.assertEqual(response.status_code, 400)
self.assertEqual(response['Content-Type'], 'application/json')
# Check JSON
self.assertJSONEqual(response.content.decode(), json.dumps({
'error': 'Invalid filter spec.',
}))
class TestPreviewView(TestCase, WagtailTestUtils):
def setUp(self):
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
# Login
self.user = self.login()
def test_get(self):
"""
Test a valid GET request to the view
"""
# Get the image
response = self.client.get(reverse('wagtailimages:preview', args=(self.image.id, 'fill-800x600')))
# Check response
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'image/png')
def test_get_invalid_filter_spec(self):
"""
Test that an invalid filter spec returns a 400 response
This is very unlikely to happen in reality. A user would have
to create signature for the invalid filter spec which can't be
done with Wagtails built in URL generator. We should test it
anyway though.
"""
# Get the image
response = self.client.get(reverse('wagtailimages:preview', args=(self.image.id, 'bad-filter-spec')))
# Check response
self.assertEqual(response.status_code, 400)
| bjesus/wagtail | wagtail/wagtailimages/tests/test_admin_views.py | Python | bsd-3-clause | 25,034 |
from __future__ import absolute_import
input_name = '../examples/multi_physics/thermo_elasticity_ess.py'
output_name = 'test_thermo_elasticity_ess.vtk'
from tests_basic import TestInput
class Test(TestInput):
pass
| vlukes/sfepy | tests/test_input_thermo_elasticity_ess.py | Python | bsd-3-clause | 219 |
def get_attributes_display_map(variant, attributes):
display = {}
for attribute in attributes:
value = variant.get_attribute(attribute.pk)
if value:
choices = {a.pk: a for a in attribute.values.all()}
attr = choices.get(value)
if attr:
display[attribute.pk] = attr
else:
display[attribute.pk] = value
return display
| laosunhust/saleor | saleor/product/utils.py | Python | bsd-3-clause | 424 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# https://github.com/swistakm/django-rest-framework/blob/master/rest_framework/runtests/runtests.py
import os
import sys
# fix sys path so we don't need to setup PYTHONPATH
sys.path.append(os.path.join(os.path.dirname(__file__), "../.."))
os.environ['DJANGO_SETTINGS_MODULE'] = 'oauth_provider.runtests.settings'
from django.conf import settings
from django.test.utils import get_runner
from south.management.commands import patch_for_test_db_setup
def usage():
return """
Usage: python runtests.py [UnitTestClass].[method]
You can pass the Class name of the `UnitTestClass` you want to test.
Append a method name if you only want to test a specific method of that class.
"""
def main():
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=2)
if len(sys.argv) == 2:
test_case = '.' + sys.argv[1]
elif len(sys.argv) == 1:
test_case = ''
else:
print(usage())
sys.exit(1)
patch_for_test_db_setup()
failures = test_runner.run_tests(['tests' + test_case])
sys.exit(failures)
if __name__ == '__main__':
main()
| philipforget/django-oauth-plus | oauth_provider/runtests/runtests.py | Python | bsd-3-clause | 1,167 |
import taxcalc
| mcdeaton13/Tax-Calculator | conda.recipe/run_test.py | Python | mit | 15 |
import cherrypy
from cherrypy.test import helper
class ETagTest(helper.CPWebCase):
def setup_server():
class Root:
def resource(self):
return "Oh wah ta goo Siam."
resource.exposed = True
def fail(self, code):
code = int(code)
if 300 <= code <= 399:
raise cherrypy.HTTPRedirect([], code)
else:
raise cherrypy.HTTPError(code)
fail.exposed = True
def unicoded(self):
return u'I am a \u1ee4nicode string.'
unicoded.exposed = True
unicoded._cp_config = {'tools.encode.on': True}
conf = {'/': {'tools.etags.on': True,
'tools.etags.autotags': True,
}}
cherrypy.tree.mount(Root(), config=conf)
setup_server = staticmethod(setup_server)
def test_etags(self):
self.getPage("/resource")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('Oh wah ta goo Siam.')
etag = self.assertHeader('ETag')
# Test If-Match (both valid and invalid)
self.getPage("/resource", headers=[('If-Match', etag)])
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "*")])
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "*")], method="POST")
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "a bogus tag")])
self.assertStatus("412 Precondition Failed")
# Test If-None-Match (both valid and invalid)
self.getPage("/resource", headers=[('If-None-Match', etag)])
self.assertStatus(304)
self.getPage("/resource", method='POST', headers=[('If-None-Match', etag)])
self.assertStatus("412 Precondition Failed")
self.getPage("/resource", headers=[('If-None-Match', "*")])
self.assertStatus(304)
self.getPage("/resource", headers=[('If-None-Match', "a bogus tag")])
self.assertStatus("200 OK")
def test_errors(self):
self.getPage("/resource")
self.assertStatus(200)
etag = self.assertHeader('ETag')
# Test raising errors in page handler
self.getPage("/fail/412", headers=[('If-Match', etag)])
self.assertStatus(412)
self.getPage("/fail/304", headers=[('If-Match', etag)])
self.assertStatus(304)
self.getPage("/fail/412", headers=[('If-None-Match', "*")])
self.assertStatus(412)
self.getPage("/fail/304", headers=[('If-None-Match', "*")])
self.assertStatus(304)
def test_unicode_body(self):
self.getPage("/unicoded")
self.assertStatus(200)
etag1 = self.assertHeader('ETag')
self.getPage("/unicoded", headers=[('If-Match', etag1)])
self.assertStatus(200)
self.assertHeader('ETag', etag1)
| evilhero/mylar | lib/cherrypy/test/test_etags.py | Python | gpl-3.0 | 3,071 |
# -*- coding: utf-8 -*-
#
# Test links:
# http://speedy.sh/ep2qY/Zapp-Brannigan.jpg
import re
import urlparse
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class SpeedyshareCom(SimpleHoster):
__name__ = "SpeedyshareCom"
__type__ = "hoster"
__version__ = "0.06"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?(speedyshare\.com|speedy\.sh)/\w+'
__config__ = [("use_premium", "bool", "Use premium account if available", True)]
__description__ = """Speedyshare.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zapp-brannigan", "[email protected]")]
NAME_PATTERN = r'class=downloadfilename>(?P<N>.*)</span></td>'
SIZE_PATTERN = r'class=sizetagtext>(?P<S>.*) (?P<U>[kKmM]?[iI]?[bB]?)</div>'
OFFLINE_PATTERN = r'class=downloadfilenamenotfound>.*</span>'
LINK_FREE_PATTERN = r'<a href=\'(.*)\'><img src=/gf/slowdownload\.png alt=\'Slow Download\' border=0'
def setup(self):
self.multiDL = False
self.chunk_limit = 1
def handle_free(self, pyfile):
m = re.search(self.LINK_FREE_PATTERN, self.html)
if m is None:
self.link = m.group(1)
getInfo = create_getInfo(SpeedyshareCom)
| fayf/pyload | module/plugins/hoster/SpeedyshareCom.py | Python | gpl-3.0 | 1,259 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Online Members Directory',
'category': 'Website',
'summary': 'Publish your members directory',
'version': '1.0',
'description': """
Publish your members/association directory publicly.
""",
'depends': ['website_partner', 'website_google_map', 'association', 'website_sale'],
'data': [
'data/membership_data.xml',
'views/website_membership_templates.xml',
'security/ir.model.access.csv',
'security/website_membership.xml',
],
'demo': ['data/membership_demo.xml'],
'qweb': ['static/src/xml/*.xml'],
'installable': True,
}
| t3dev/odoo | addons/website_membership/__manifest__.py | Python | gpl-3.0 | 711 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv, orm
from tools.translate import _
from datetime import datetime
from datetime import timedelta
from tools.safe_eval import safe_eval
from tools import ustr
import pooler
import re
import time
import tools
def get_datetime(date_field):
'''Return a datetime from a date string or a datetime string'''
#complete date time if date_field contains only a date
date_split = date_field.split(' ')
if len(date_split) == 1:
date_field = date_split[0] + " 00:00:00"
return datetime.strptime(date_field[:19], '%Y-%m-%d %H:%M:%S')
class base_action_rule(osv.osv):
""" Base Action Rules """
_name = 'base.action.rule'
_description = 'Action Rules'
def _state_get(self, cr, uid, context=None):
""" Get State
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param context: A standard dictionary for contextual values """
return self.state_get(cr, uid, context=context)
def state_get(self, cr, uid, context=None):
""" Get State
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param context: A standard dictionary for contextual values """
return [('', '')]
def priority_get(self, cr, uid, context=None):
""" Get Priority
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param context: A standard dictionary for contextual values """
return [('', '')]
_columns = {
'name': fields.char('Rule Name', size=64, required=True),
'model_id': fields.many2one('ir.model', 'Object', required=True),
'create_date': fields.datetime('Create Date', readonly=1),
'active': fields.boolean('Active', help="If the active field is set to False,\
it will allow you to hide the rule without removing it."),
'sequence': fields.integer('Sequence', help="Gives the sequence order \
when displaying a list of rules."),
'trg_date_type': fields.selection([
('none', 'None'),
('create', 'Creation Date'),
('action_last', 'Last Action Date'),
('date', 'Date'),
('deadline', 'Deadline'),
], 'Trigger Date', size=16),
'trg_date_range': fields.integer('Delay after trigger date', \
help="Delay After Trigger Date,\
specifies you can put a negative number. If you need a delay before the \
trigger date, like sending a reminder 15 minutes before a meeting."),
'trg_date_range_type': fields.selection([('minutes', 'Minutes'), ('hour', 'Hours'), \
('day', 'Days'), ('month', 'Months')], 'Delay type'),
'trg_user_id': fields.many2one('res.users', 'Responsible'),
'trg_partner_id': fields.many2one('res.partner', 'Partner'),
'trg_partner_categ_id': fields.many2one('res.partner.category', 'Partner Category'),
'trg_state_from': fields.selection(_state_get, 'State', size=16),
'trg_state_to': fields.selection(_state_get, 'Button Pressed', size=16),
'act_method': fields.char('Call Object Method', size=64),
'act_user_id': fields.many2one('res.users', 'Set Responsible to'),
'act_state': fields.selection(_state_get, 'Set State to', size=16),
'act_email_cc': fields.char('Add Watchers (Cc)', size=250, help="\
These people will receive a copy of the future communication between partner \
and users by email"),
'act_remind_partner': fields.boolean('Remind Partner', help="Check \
this if you want the rule to send a reminder by email to the partner."),
'act_remind_user': fields.boolean('Remind Responsible', help="Check \
this if you want the rule to send a reminder by email to the user."),
'act_reply_to': fields.char('Reply-To', size=64),
'act_remind_attach': fields.boolean('Remind with Attachment', help="Check this if you want that all documents attached to the object be attached to the reminder email sent."),
'act_mail_to_user': fields.boolean('Mail to Responsible', help="Check\
this if you want the rule to send an email to the responsible person."),
'act_mail_to_watchers': fields.boolean('Mail to Watchers (CC)',
help="Check this if you want \
the rule to mark CC(mail to any other person defined in actions)."),
'act_mail_to_email': fields.char('Mail to these Emails', size=128, \
help="Email-id of the persons whom mail is to be sent"),
'act_mail_body': fields.text('Mail body', help="Content of mail"),
'regex_name': fields.char('Regex on Resource Name', size=128, help="Regular expression for matching name of the resource\
\ne.g.: 'urgent.*' will search for records having name starting with the string 'urgent'\
\nNote: This is case sensitive search."),
'server_action_id': fields.many2one('ir.actions.server', 'Server Action', help="Describes the action name.\neg:on which object which action to be taken on basis of which condition"),
'filter_id':fields.many2one('ir.filters', 'Filter', required=False),
'act_email_from' : fields.char('Email From', size=64, required=False,
help="Use a python expression to specify the right field on which one than we will use for the 'From' field of the header"),
'act_email_to' : fields.char('Email To', size=64, required=False,
help="Use a python expression to specify the right field on which one than we will use for the 'To' field of the header"),
'last_run': fields.datetime('Last Run', readonly=1),
}
_defaults = {
'active': lambda *a: True,
'trg_date_type': lambda *a: 'none',
'trg_date_range_type': lambda *a: 'day',
'act_mail_to_user': lambda *a: 0,
'act_remind_partner': lambda *a: 0,
'act_remind_user': lambda *a: 0,
'act_mail_to_watchers': lambda *a: 0,
}
_order = 'sequence'
def onchange_model_id(self, cr, uid, ids, name):
#This is not a good solution as it will affect the domain only on onchange
res = {'domain':{'filter_id':[]}}
if name:
model_name = self.pool.get('ir.model').read(cr, uid, [name], ['model'])
if model_name:
mod_name = model_name[0]['model']
res['domain'] = {'filter_id': [('model_id','=',mod_name)]}
else:
res['value'] = {'filter_id':False}
return res
def post_action(self, cr, uid, ids, model, context=None):
# Searching for action rules
cr.execute("SELECT model.model, rule.id FROM base_action_rule rule \
LEFT JOIN ir_model model on (model.id = rule.model_id) \
WHERE active")
res = cr.fetchall()
# Check if any rule matching with current object
for obj_name, rule_id in res:
if not (model == obj_name):
continue # TODO add this condition in the WHERE clause above.
else:
obj = self.pool.get(obj_name)
# If the rule doesn't involve a time condition, run it immediately
# Otherwise we let the scheduler run the action
if self.browse(cr, uid, rule_id, context=context).trg_date_type == 'none':
self._action(cr, uid, [rule_id], obj.browse(cr, uid, ids, context=context), context=context)
return True
def _create(self, old_create, model, context=None):
"""
Return a wrapper around `old_create` calling both `old_create` and
`post_action`, in that order.
"""
def wrapper(cr, uid, vals, context=context):
if context is None:
context = {}
new_id = old_create(cr, uid, vals, context=context)
if not context.get('action'):
self.post_action(cr, uid, [new_id], model, context=context)
return new_id
return wrapper
def _write(self, old_write, model, context=None):
"""
Return a wrapper around `old_write` calling both `old_write` and
`post_action`, in that order.
"""
def wrapper(cr, uid, ids, vals, context=context):
if context is None:
context = {}
if isinstance(ids, (str, int, long)):
ids = [ids]
old_write(cr, uid, ids, vals, context=context)
if not context.get('action'):
self.post_action(cr, uid, ids, model, context=context)
return True
return wrapper
def _register_hook(self, cr, uid, ids, context=None):
"""
Wrap every `create` and `write` methods of the models specified by
the rules (given by `ids`).
"""
for action_rule in self.browse(cr, uid, ids, context=context):
model = action_rule.model_id.model
obj_pool = self.pool.get(model)
if not hasattr(obj_pool, 'base_action_ruled'):
obj_pool.create = self._create(obj_pool.create, model, context=context)
obj_pool.write = self._write(obj_pool.write, model, context=context)
obj_pool.base_action_ruled = True
return True
def create(self, cr, uid, vals, context=None):
res_id = super(base_action_rule, self).create(cr, uid, vals, context=context)
self._register_hook(cr, uid, [res_id], context=context)
return res_id
def write(self, cr, uid, ids, vals, context=None):
super(base_action_rule, self).write(cr, uid, ids, vals, context=context)
self._register_hook(cr, uid, ids, context=context)
return True
def _check(self, cr, uid, automatic=False, use_new_cursor=False, \
context=None):
"""
This Function is call by scheduler.
"""
rule_pool = self.pool.get('base.action.rule')
rule_ids = rule_pool.search(cr, uid, [], context=context)
self._register_hook(cr, uid, rule_ids, context=context)
rules = self.browse(cr, uid, rule_ids, context=context)
for rule in rules:
model = rule.model_id.model
model_pool = self.pool.get(model)
last_run = False
if rule.last_run:
last_run = get_datetime(rule.last_run)
now = datetime.now()
for obj_id in model_pool.search(cr, uid, [], context=context):
obj = model_pool.browse(cr, uid, obj_id, context=context)
# Calculate when this action should next occur for this object
base = False
if rule.trg_date_type=='create' and hasattr(obj, 'create_date'):
base = obj.create_date
elif (rule.trg_date_type=='action_last'
and hasattr(obj, 'create_date')):
if hasattr(obj, 'date_action_last') and obj.date_action_last:
base = obj.date_action_last
else:
base = obj.create_date
elif (rule.trg_date_type=='deadline'
and hasattr(obj, 'date_deadline')
and obj.date_deadline):
base = obj.date_deadline
elif (rule.trg_date_type=='date'
and hasattr(obj, 'date')
and obj.date):
base = obj.date
if base:
fnct = {
'minutes': lambda interval: timedelta(minutes=interval),
'day': lambda interval: timedelta(days=interval),
'hour': lambda interval: timedelta(hours=interval),
'month': lambda interval: timedelta(months=interval),
}
base = get_datetime(base)
delay = fnct[rule.trg_date_range_type](rule.trg_date_range)
action_date = base + delay
if (not last_run or (last_run <= action_date < now)):
self._action(cr, uid, [rule.id], [obj], context=context)
rule_pool.write(cr, uid, [rule.id], {'last_run': now},
context=context)
def format_body(self, body):
""" Foramat Action rule's body
@param self: The object pointer """
return body and tools.ustr(body) or ''
def format_mail(self, obj, body):
data = {
'object_id': obj.id,
'object_subject': hasattr(obj, 'name') and obj.name or False,
'object_date': hasattr(obj, 'date') and obj.date or False,
'object_description': hasattr(obj, 'description') and obj.description or False,
'object_user': hasattr(obj, 'user_id') and (obj.user_id and obj.user_id.name) or '/',
'object_user_email': hasattr(obj, 'user_id') and (obj.user_id and \
obj.user_id.user_email) or '/',
'object_user_phone': hasattr(obj, 'partner_address_id') and (obj.partner_address_id and \
obj.partner_address_id.phone) or '/',
'partner': hasattr(obj, 'partner_id') and (obj.partner_id and obj.partner_id.name) or '/',
'partner_email': hasattr(obj, 'partner_address_id') and (obj.partner_address_id and\
obj.partner_address_id.email) or '/',
}
return self.format_body(body % data)
def email_send(self, cr, uid, obj, emails, body, emailfrom=None, context=None):
""" send email
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param email: pass the emails
@param emailfrom: Pass name the email From else False
@param context: A standard dictionary for contextual values """
if not emailfrom:
emailfrom = tools.config.get('email_from', False)
if context is None:
context = {}
mail_message = self.pool.get('mail.message')
body = self.format_mail(obj, body)
if not emailfrom:
if hasattr(obj, 'user_id') and obj.user_id and obj.user_id.user_email:
emailfrom = obj.user_id.user_email
name = '[%d] %s' % (obj.id, tools.ustr(obj.name))
emailfrom = tools.ustr(emailfrom)
reply_to = emailfrom
if not emailfrom:
raise osv.except_osv(_('Error!'),
_("No E-Mail ID Found for your Company address!"))
return mail_message.schedule_with_attach(cr, uid, emailfrom, emails, name, body, model='base.action.rule', reply_to=reply_to, res_id=obj.id)
def do_check(self, cr, uid, action, obj, context=None):
""" check Action
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param context: A standard dictionary for contextual values """
if context is None:
context = {}
ok = True
if action.filter_id:
if action.model_id.model == action.filter_id.model_id:
context.update(eval(action.filter_id.context))
obj_ids = obj._table.search(cr, uid, eval(action.filter_id.domain), context=context)
if not obj.id in obj_ids:
ok = False
else:
ok = False
if getattr(obj, 'user_id', False):
ok = ok and (not action.trg_user_id.id or action.trg_user_id.id==obj.user_id.id)
if getattr(obj, 'partner_id', False):
ok = ok and (not action.trg_partner_id.id or action.trg_partner_id.id==obj.partner_id.id)
ok = ok and (
not action.trg_partner_categ_id.id or
(
obj.partner_id.id and
(action.trg_partner_categ_id.id in map(lambda x: x.id, obj.partner_id.category_id or []))
)
)
state_to = context.get('state_to', False)
state = getattr(obj, 'state', False)
if state:
ok = ok and (not action.trg_state_from or action.trg_state_from==state)
if state_to:
ok = ok and (not action.trg_state_to or action.trg_state_to==state_to)
elif action.trg_state_to:
ok = False
reg_name = action.regex_name
result_name = True
if reg_name:
ptrn = re.compile(ustr(reg_name))
_result = ptrn.search(ustr(obj.name))
if not _result:
result_name = False
regex_n = not reg_name or result_name
ok = ok and regex_n
return ok
def do_action(self, cr, uid, action, model_obj, obj, context=None):
""" Do Action
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param action: pass action
@param model_obj: pass Model object
@param context: A standard dictionary for contextual values """
if context is None:
context = {}
if action.server_action_id:
context.update({'active_id': obj.id, 'active_ids': [obj.id], 'active_model': obj._name})
self.pool.get('ir.actions.server').run(cr, uid, [action.server_action_id.id], context)
write = {}
if hasattr(obj, 'user_id') and action.act_user_id:
obj.user_id = action.act_user_id
write['user_id'] = action.act_user_id.id
if hasattr(obj, 'date_action_last'):
write['date_action_last'] = time.strftime('%Y-%m-%d %H:%M:%S')
if hasattr(obj, 'state') and action.act_state:
obj.state = action.act_state
write['state'] = action.act_state
if hasattr(obj, 'categ_id') and action.act_categ_id:
obj.categ_id = action.act_categ_id
write['categ_id'] = action.act_categ_id.id
model_obj.write(cr, uid, [obj.id], write, context)
if hasattr(model_obj, 'remind_user') and action.act_remind_user:
model_obj.remind_user(cr, uid, [obj.id], context, attach=action.act_remind_attach)
if hasattr(model_obj, 'remind_partner') and action.act_remind_partner:
model_obj.remind_partner(cr, uid, [obj.id], context, attach=action.act_remind_attach)
if action.act_method:
getattr(model_obj, 'act_method')(cr, uid, [obj.id], action, context)
emails = []
if hasattr(obj, 'user_id') and action.act_mail_to_user:
if obj.user_id:
emails.append(obj.user_id.user_email)
if action.act_mail_to_watchers:
emails += (action.act_email_cc or '').split(',')
if action.act_mail_to_email:
emails += (action.act_mail_to_email or '').split(',')
locals_for_emails = {
'user' : self.pool.get('res.users').browse(cr, uid, uid, context=context),
'obj' : obj,
}
if action.act_email_to:
emails.append(safe_eval(action.act_email_to, {}, locals_for_emails))
emails = filter(None, emails)
if len(emails) and action.act_mail_body:
emails = list(set(emails))
email_from = safe_eval(action.act_email_from, {}, locals_for_emails)
def to_email(text):
return re.findall(r'([^ ,<@]+@[^> ,]+)', text or '')
emails = to_email(','.join(filter(None, emails)))
email_froms = to_email(email_from)
if email_froms:
self.email_send(cr, uid, obj, emails, action.act_mail_body, emailfrom=email_froms[0])
return True
def _action(self, cr, uid, ids, objects, scrit=None, context=None):
""" Do Action
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Basic Action Rule’s IDs,
@param objects: pass objects
@param context: A standard dictionary for contextual values """
if context is None:
context = {}
context.update({'action': True})
if not scrit:
scrit = []
for action in self.browse(cr, uid, ids, context=context):
for obj in objects:
if self.do_check(cr, uid, action, obj, context=context):
model_obj = self.pool.get(action.model_id.model)
self.do_action(cr, uid, action, model_obj, obj, context=context)
context.update({'action': False})
return True
def _check_mail(self, cr, uid, ids, context=None):
""" Check Mail
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Action Rule’s IDs
@param context: A standard dictionary for contextual values """
empty = orm.browse_null()
rule_obj = self.pool.get('base.action.rule')
for rule in self.browse(cr, uid, ids, context=context):
if rule.act_mail_body:
try:
rule_obj.format_mail(empty, rule.act_mail_body)
except (ValueError, KeyError, TypeError):
return False
return True
_constraints = [
(_check_mail, 'Error: The mail is not well formated', ['act_mail_body']),
]
base_action_rule()
class ir_cron(osv.osv):
_inherit = 'ir.cron'
_init_done = False
def _poolJobs(self, db_name, check=False):
if not self._init_done:
self._init_done = True
try:
db = pooler.get_db(db_name)
except:
return False
cr = db.cursor()
try:
next = datetime.now().strftime('%Y-%m-%d %H:00:00')
# Putting nextcall always less than current time in order to call it every time
cr.execute('UPDATE ir_cron set nextcall = \'%s\' where numbercall<>0 and active and model=\'base.action.rule\' ' % (next))
finally:
cr.commit()
cr.close()
super(ir_cron, self)._poolJobs(db_name, check=check)
ir_cron()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| ksrajkumar/openerp-6.1 | openerp/addons/base_action_rule/base_action_rule.py | Python | agpl-3.0 | 24,051 |
# Generated by Django 1.11.21 on 2019-07-01 12:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instructor_task', '0002_gradereportsetting'),
]
operations = [
migrations.AlterField(
model_name='instructortask',
name='task_input',
field=models.TextField(),
),
]
| eduNEXT/edx-platform | lms/djangoapps/instructor_task/migrations/0003_alter_task_input_field.py | Python | agpl-3.0 | 396 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class DhpmmF(MakefilePackage):
"""DHPMM_P:High-precision Matrix Multiplication with Faithful Rounding"""
homepage = "http://www.math.twcu.ac.jp/ogita/post-k/"
url = "http://www.math.twcu.ac.jp/ogita/post-k/software/DHPMM_F/DHPMM_F_alpha.tar.gz"
version('alpha', sha256='35321ecbc749f2682775ffcd27833afc8c8eb4fa7753ce769727c9d1fe097848')
depends_on('blas', type='link')
depends_on('lapack', type='link')
def patch(self):
math_libs = self.spec['lapack'].libs + self.spec['blas'].libs
makefile = FileFilter('Makefile')
if self.spec.satisfies('%gcc'):
makefile.filter(r'^MKL\s+=\s1', 'MKL=0')
makefile.filter(r'^CC\s+=\sgcc',
'CC={0}'.format(spack_cc))
makefile.filter(r'^CXX\s+=\sg\+\+',
'CXX={0}'.format(spack_cxx))
makefile.filter(r'^BLASLIBS\s+=\s-llapack\s-lblas',
'BLASLIBS={0}'.format(math_libs.ld_flags))
elif self.spec.satisfies('%fj'):
makefile.filter(r'^#ENV\s+=\sFX100', 'ENV=FX100')
makefile.filter(r'^ENV\s+=\sGCC', '#ENV=GCC')
makefile.filter(r'^MKL\s+=\s1', 'MKL=0')
makefile.filter(r'^CC\s+=\sfccpx',
'CC={0}'.format(spack_cc))
makefile.filter(r'^CXX\s+=\sFCCpx',
'CXX={0}'.format(spack_cxx))
makefile.filter(r'^BLASLIBS\s+=\s-llapack\s-lblas',
'BLASLIBS={0}'.format(math_libs.ld_flags))
elif self.spec.satisfies('%intel'):
makefile.filter(r'^ENV\s+=\sGCC', '#ENV=GCC')
makefile.filter(r'^ENV\s+=\sICC', 'ENV=ICC')
makefile.filter(r'^CC\s+=\sicc',
'CC={0}'.format(spack_cc))
makefile.filter(r'^CXX\s+=\sicc',
'CXX={0}'.format(spack_cxx))
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('test/source4_SpMV', prefix.bin)
| LLNL/spack | var/spack/repos/builtin/packages/dhpmm-f/package.py | Python | lgpl-2.1 | 2,254 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Minigan(Package):
"""miniGAN is a generative adversarial network code developed as part of the
Exascale Computing Project's (ECP) ExaLearn project at
Sandia National Laboratories."""
homepage = "https://github.com/SandiaMLMiniApps/miniGAN"
url = "https://github.com/SandiaMLMiniApps/miniGAN/archive/1.0.0.tar.gz"
version('1.0.0', sha256='ef6d5def9c7040af520acc64b7a8b6c8ec4b7901721b11b0cb25a583ea0c8ae3')
depends_on('python', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-torch', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-horovod@master', type=('build', 'run'))
depends_on('py-torchvision', type=('build', 'run'))
depends_on('[email protected]', type=('build', 'run'))
def install(self, spec, prefix):
install_tree('.', prefix)
| LLNL/spack | var/spack/repos/builtin/packages/minigan/package.py | Python | lgpl-2.1 | 1,105 |
import unittest
import sys
from PySide.QtCore import QObject, SIGNAL, QUrl
from PySide.QtWebKit import *
from PySide.QtNetwork import QNetworkRequest
from helper import adjust_filename, UsesQApplication
class TestWebFrame(UsesQApplication):
def load_finished(self, ok):
self.assert_(ok)
page = self.view.page()
self.assert_(page)
frame = page.mainFrame()
self.assert_(frame)
meta = frame.metaData()
self.assertEqual(meta['description'], ['PySide Test METADATA.'])
self.app.quit()
def testMetaData(self):
self.view = QWebView()
QObject.connect(self.view, SIGNAL('loadFinished(bool)'),
self.load_finished)
url = QUrl.fromLocalFile(adjust_filename('fox.html', __file__))
self.view.setUrl(url)
self.app.exec_()
if __name__ == '__main__':
unittest.main()
| enthought/pyside | tests/QtWebKit/webframe_test.py | Python | lgpl-2.1 | 899 |
# Copyright (c) 2008-2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""0.3.0 to 0.4.0
Revision ID: 0.3.0
Revises:
"""
# revision identifiers, used by Alembic.
revision = '0.4.0'
down_revision = '0.3.0'
branch_labels = None
depends_on = None
from alembic import op
from db_meta import *
from sqlalchemy.dialects import mysql
def upgrade():
"""
update schema&data
"""
bind = op.get_bind()
#alter column user.username, alter column user.email, project.name and add column replication_policy.deleted
op.alter_column('user', 'username', type_=sa.String(32), existing_type=sa.String(15))
op.alter_column('user', 'email', type_=sa.String(255), existing_type=sa.String(128))
op.alter_column('project', 'name', type_=sa.String(41), existing_type=sa.String(30), nullable=False)
op.alter_column('replication_target', 'password', type_=sa.String(128), existing_type=sa.String(40))
op.add_column('replication_policy', sa.Column('deleted', mysql.TINYINT(1), nullable=False, server_default=sa.text("'0'")))
#create index pid_optime (project_id, op_time) on table access_log, poid_uptime (policy_id, update_time) on table replication_job
op.create_index('pid_optime', 'access_log', ['project_id', 'op_time'])
op.create_index('poid_uptime', 'replication_job', ['policy_id', 'update_time'])
#create tables: repository
Repository.__table__.create(bind)
def downgrade():
"""
Downgrade has been disabled.
"""
pass
| wknet123/harbor | tools/migration/migration_harbor/versions/0_4_0.py | Python | apache-2.0 | 2,016 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, division
import random
import string
import timeit
import os
import zipfile
import datrie
def words100k():
zip_name = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'words100k.txt.zip'
)
zf = zipfile.ZipFile(zip_name)
txt = zf.open(zf.namelist()[0]).read().decode('utf8')
return txt.splitlines()
def random_words(num):
russian = 'абвгдеёжзиклмнопрстуфхцчъыьэюя'
alphabet = russian + string.ascii_letters
return [
"".join([random.choice(alphabet) for x in range(random.randint(1,15))])
for y in range(num)
]
def truncated_words(words):
return [word[:3] for word in words]
def prefixes1k(words, prefix_len):
words = [w for w in words if len(w) >= prefix_len]
every_nth = int(len(words)/1000)
_words = [w[:prefix_len] for w in words[::every_nth]]
return _words[:1000]
WORDS100k = words100k()
MIXED_WORDS100k = truncated_words(WORDS100k)
NON_WORDS100k = random_words(100000)
PREFIXES_3_1k = prefixes1k(WORDS100k, 3)
PREFIXES_5_1k = prefixes1k(WORDS100k, 5)
PREFIXES_8_1k = prefixes1k(WORDS100k, 8)
PREFIXES_15_1k = prefixes1k(WORDS100k, 15)
def _alphabet(words):
chars = set()
for word in words:
for ch in word:
chars.add(ch)
return "".join(sorted(list(chars)))
ALPHABET = _alphabet(WORDS100k)
def bench(name, timer, descr='M ops/sec', op_count=0.1, repeats=3, runs=5):
times = []
for x in range(runs):
times.append(timer.timeit(repeats))
def op_time(time):
return op_count*repeats / time
print("%55s: %0.3f%s" % (
name,
op_time(min(times)),
descr,
))
def create_trie():
words = words100k()
trie = datrie.Trie(ALPHABET)
for word in words:
trie[word] = 1
return trie
def benchmark():
print('\n====== Benchmarks (100k unique unicode words) =======\n')
tests = [
('__getitem__ (hits)', "for word in words: data[word]", 'M ops/sec', 0.1, 3),
('__contains__ (hits)', "for word in words: word in data", 'M ops/sec', 0.1, 3),
('__contains__ (misses)', "for word in NON_WORDS100k: word in data", 'M ops/sec', 0.1, 3),
('__len__', 'len(data)', ' ops/sec', 1, 1),
('__setitem__ (updates)', 'for word in words: data[word]=1', 'M ops/sec', 0.1, 3),
('__setitem__ (inserts, random)', 'for word in NON_WORDS_10k: data[word]=1', 'M ops/sec',0.01, 3),
('__setitem__ (inserts, sorted)', 'for word in words: empty_data[word]=1', 'M ops/sec', 0.1, 3),
('setdefault (updates)', 'for word in words: data.setdefault(word, 1)', 'M ops/sec', 0.1, 3),
('setdefault (inserts)', 'for word in NON_WORDS_10k: data.setdefault(word, 1)', 'M ops/sec', 0.01, 3),
('values()', 'list(data.values())', ' ops/sec', 1, 1),
('keys()', 'list(data.keys())', ' ops/sec', 1, 1),
('items()', 'list(data.items())', ' ops/sec', 1, 1),
]
common_setup = """
from __main__ import create_trie, WORDS100k, NON_WORDS100k, MIXED_WORDS100k, datrie
from __main__ import PREFIXES_3_1k, PREFIXES_5_1k, PREFIXES_8_1k, PREFIXES_15_1k
from __main__ import ALPHABET
words = WORDS100k
NON_WORDS_10k = NON_WORDS100k[:10000]
NON_WORDS_1k = ['ыва', 'xyz', 'соы', 'Axx', 'avы']*200
"""
dict_setup = common_setup + 'data = dict((word, 1) for word in words); empty_data=dict()'
trie_setup = common_setup + 'data = create_trie(); empty_data = datrie.Trie(ALPHABET)'
for test_name, test, descr, op_count, repeats in tests:
t_dict = timeit.Timer(test, dict_setup)
t_trie = timeit.Timer(test, trie_setup)
bench('dict '+test_name, t_dict, descr, op_count, repeats)
bench('trie '+test_name, t_trie, descr, op_count, repeats)
# trie-specific benchmarks
bench(
'trie.iter_prefix_values (hits)',
timeit.Timer(
"for word in words:\n"
" for it in data.iter_prefix_values(word):\n"
" pass",
trie_setup
),
)
bench(
'trie.prefix_values (hits)',
timeit.Timer(
"for word in words: data.prefix_values(word)",
trie_setup
)
)
bench(
'trie.prefix_values loop (hits)',
timeit.Timer(
"for word in words:\n"
" for it in data.prefix_values(word):pass",
trie_setup
)
)
bench(
'trie.iter_prefix_items (hits)',
timeit.Timer(
"for word in words:\n"
" for it in data.iter_prefix_items(word):\n"
" pass",
trie_setup
),
)
bench(
'trie.prefix_items (hits)',
timeit.Timer(
"for word in words: data.prefix_items(word)",
trie_setup
)
)
bench(
'trie.prefix_items loop (hits)',
timeit.Timer(
"for word in words:\n"
" for it in data.prefix_items(word):pass",
trie_setup
)
)
bench(
'trie.iter_prefixes (hits)',
timeit.Timer(
"for word in words:\n"
" for it in data.iter_prefixes(word): pass",
trie_setup
)
)
bench(
'trie.iter_prefixes (misses)',
timeit.Timer(
"for word in NON_WORDS100k:\n"
" for it in data.iter_prefixes(word): pass",
trie_setup
)
)
bench(
'trie.iter_prefixes (mixed)',
timeit.Timer(
"for word in MIXED_WORDS100k:\n"
" for it in data.iter_prefixes(word): pass",
trie_setup
)
)
bench(
'trie.has_keys_with_prefix (hits)',
timeit.Timer(
"for word in words: data.has_keys_with_prefix(word)",
trie_setup
)
)
bench(
'trie.has_keys_with_prefix (misses)',
timeit.Timer(
"for word in NON_WORDS100k: data.has_keys_with_prefix(word)",
trie_setup
)
)
for meth in ('longest_prefix', 'longest_prefix_item', 'longest_prefix_value'):
bench(
'trie.%s (hits)' % meth,
timeit.Timer(
"for word in words: data.%s(word)" % meth,
trie_setup
)
)
bench(
'trie.%s (misses)' % meth,
timeit.Timer(
"for word in NON_WORDS100k: data.%s(word, default=None)" % meth,
trie_setup
)
)
bench(
'trie.%s (mixed)' % meth,
timeit.Timer(
"for word in MIXED_WORDS100k: data.%s(word, default=None)" % meth,
trie_setup
)
)
prefix_data = [
('xxx', 'avg_len(res)==415', 'PREFIXES_3_1k'),
('xxxxx', 'avg_len(res)==17', 'PREFIXES_5_1k'),
('xxxxxxxx', 'avg_len(res)==3', 'PREFIXES_8_1k'),
('xxxxx..xx', 'avg_len(res)==1.4', 'PREFIXES_15_1k'),
('xxx', 'NON_EXISTING', 'NON_WORDS_1k'),
]
for xxx, avg, data in prefix_data:
for meth in ('items', 'keys', 'values'):
bench(
'trie.%s(prefix="%s"), %s' % (meth, xxx, avg),
timeit.Timer(
"for word in %s: data.%s(word)" % (data, meth),
trie_setup
),
'K ops/sec',
op_count=1,
)
def profiling():
print('\n====== Profiling =======\n')
def profile_yep():
import yep
trie = create_trie()
#WORDS = words100k()
yep.start(b'output.prof')
for x in range(100):
trie.keys()
# for x in range(1000):
# for word in WORDS:
# trie[word]
yep.stop()
def profile_cprofile():
import pstats
import cProfile
trie = create_trie()
WORDS = words100k()
def check_trie(trie, words):
value = 0
for word in words:
value += trie[word]
if value != len(words):
raise Exception()
# def check_prefixes(trie, words):
# for word in words:
# trie.keys(word)
# cProfile.runctx("check_prefixes(trie, NON_WORDS_1k)", globals(), locals(), "Profile.prof")
cProfile.runctx("check_trie(trie, WORDS)", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("time").print_stats(20)
#profile_cprofile()
profile_yep()
#def memory():
# gc.collect()
# _memory = lambda: _get_memory(os.getpid())
# initial_memory = _memory()
# trie = create_trie()
# gc.collect()
# trie_memory = _memory()
#
# del trie
# gc.collect()
# alphabet, words = words100k()
# words_dict = dict((word, 1) for word in words)
# del alphabet
# del words
# gc.collect()
#
# dict_memory = _memory()
# print('initial: %s, trie: +%s, dict: +%s' % (
# initial_memory,
# trie_memory-initial_memory,
# dict_memory-initial_memory,
# ))
if __name__ == '__main__':
benchmark()
#profiling()
#memory()
print('\n~~~~~~~~~~~~~~\n') | hexforge/pulp_db | experiments/tries/comparison/datrie/bench/speed.py | Python | apache-2.0 | 9,330 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.from_sparse_tensor_slices()`."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class FromSparseTensorSlicesTest(test_base.DatasetTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=1, mode=["graph"]),
combinations.combine(slices=[[
[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []
], [[1., 2.], [], [1., 2.], [1.], [1., 2.], [], [1., 2.]]])))
def testFromSparseTensorSlices(self, slices):
"""Test a dataset based on slices of a `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
with self.cached_session() as sess:
# Test with sparse tensor in the appropriate order.
# pylint: disable=g-complex-comprehension
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
values = np.array([val for s in slices for val in s])
# pylint: enable=g-complex-comprehension
dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
sparse_feed = sparse_tensor.SparseTensorValue(indices, values,
dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
for i, s in enumerate(slices):
results = sess.run(get_next)
self.assertAllEqual(s, results.values)
expected_indices = np.array(
[[j] for j in range(len(slices[i]))]).reshape([-1, 1])
self.assertAllEqual(expected_indices, results.indices)
self.assertAllEqual(dense_shape[1:], results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=1, mode=["graph"]),
combinations.combine(slices=[[
[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []
], [[1., 2.], [], [1., 2.], [1.], [1., 2.], [], [1., 2.]]])))
def testFromSparseTensorSlicesInReverse(self, slices):
"""Test a dataset based on slices of a `tf.sparse.SparseTensor` in reverse order."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
with self.cached_session() as sess:
# pylint: disable=g-complex-comprehension
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
values = np.array([val for s in slices for val in s])
# pylint: enable=g-complex-comprehension
dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
# Test with sparse tensor in the reverse order, which is not
# currently supported.
reverse_order_indices = indices[::-1, :]
reverse_order_values = values[::-1]
sparse_feed = sparse_tensor.SparseTensorValue(
reverse_order_indices, reverse_order_values, dense_shape)
with self.assertRaises(errors.UnimplementedError):
sess.run(init_op, feed_dict={st: sparse_feed})
@combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"]))
def testEmptySparseTensorSlices(self):
"""Test a dataset based on slices of an empty `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
with self.cached_session() as sess:
# Test with an empty sparse tensor.
empty_indices = np.empty((0, 4), dtype=np.int64)
empty_values = np.empty((0,), dtype=np.float64)
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
empty_dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"]))
def testEmptySparseTensorSlicesInvalid(self):
"""Test a dataset based on invalid `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
with self.cached_session() as sess:
# Test with an empty sparse tensor but with non empty values.
empty_indices = np.empty((0, 4), dtype=np.int64)
non_empty_values = [1, 2, 3, 4]
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices,
non_empty_values,
empty_dense_shape)
# Here, we expect the test to fail when running the feed.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={st: sparse_feed})
@combinations.generate(combinations.combine(tf_api_version=1, mode=["graph"]))
def testEmptySparseTensorSlicesInvalid2(self):
"""Test a dataset based on invalid `tf.sparse.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_sparse_tensor_slices(st))
init_op = iterator.initializer
with self.cached_session() as sess:
# Test with an empty sparse tensor but with non empty values.
empty_indices = [[]]
empty_values = []
dense_shape = [1, 1]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
dense_shape)
# Here, we expect the test to fail when running the feed.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={st: sparse_feed})
@combinations.generate(combinations.combine(tf_api_version=2, mode=["eager"]))
def testFromSparseTensorSlicesError(self):
with self.assertRaises(AttributeError):
dataset_ops.Dataset.from_sparse_tensor_slices(None)
class FromSparseTensorSlicesCheckpointTest(
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):
def _build_sparse_tensor_slice_dataset(self, slices):
# pylint: disable=g-complex-comprehension
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))],
dtype=np.int64)
values = np.array([val for s in slices for val in s], dtype=np.float64)
# pylint: enable=g-complex-comprehension
dense_shape = np.array(
[len(slices), max(len(s) for s in slices) + 1], dtype=np.int64)
sparse_components = sparse_tensor.SparseTensor(indices, values, dense_shape)
return dataset_ops.Dataset.from_sparse_tensor_slices(sparse_components)
@combinations.generate(
combinations.times(test_base.v1_only_combinations(),
checkpoint_test_base.default_test_combinations()))
def test(self, verify_fn):
slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]
verify_fn(
self,
lambda: self._build_sparse_tensor_slice_dataset(slices),
num_outputs=9,
sparse_tensors=True)
if __name__ == "__main__":
test.main()
| tensorflow/tensorflow | tensorflow/python/data/kernel_tests/from_sparse_tensor_slices_test.py | Python | apache-2.0 | 8,944 |
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for workflow object exports."""
from os.path import abspath, dirname, join
from flask.json import dumps
from ggrc.app import app
from ggrc_workflows.models import Workflow
from integration.ggrc import TestCase
from integration.ggrc_workflows.generator import WorkflowsGenerator
THIS_ABS_PATH = abspath(dirname(__file__))
CSV_DIR = join(THIS_ABS_PATH, 'test_csvs/')
class TestExportEmptyTemplate(TestCase):
"""Test empty export for all workflow object types."""
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC",
"X-export-view": "blocks",
}
def test_single_object_export(self):
"""Test empty exports for workflow only."""
data = [{"object_name": "Workflow", "fields": "all"}]
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
def test_multiple_objects(self):
"""Test empty exports for all workflow object in one query."""
data = [
{"object_name": "Workflow", "fields": "all"},
{"object_name": "TaskGroup", "fields": "all"},
{"object_name": "TaskGroupTask", "fields": "all"},
{"object_name": "Cycle", "fields": "all"},
{"object_name": "CycleTaskGroup", "fields": "all"},
{"object_name": "CycleTaskGroupObjectTask", "fields": "all"},
]
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Workflow,", response.data)
self.assertIn("Task Group,", response.data)
self.assertIn("Task,", response.data)
self.assertIn("Cycle,", response.data)
self.assertIn("Cycle Task Group,", response.data)
self.assertIn("Cycle Task Group Object Task,", response.data)
class TestExportMultipleObjects(TestCase):
""" Test data is found in the google sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=2035742544
"""
@classmethod
def setUpClass(cls): # pylint: disable=C0103
TestCase.clear_data()
cls.tc = app.test_client()
cls.tc.get("/login")
cls.import_file("workflow_big_sheet.csv")
@classmethod
def import_file(cls, filename, dry_run=False):
data = {"file": (open(join(CSV_DIR, filename)), filename)}
headers = {
"X-test-only": "true" if dry_run else "false",
"X-requested-by": "gGRC",
}
cls.tc.post("/_service/import_csv",
data=data, headers=headers)
def activate(self):
""" activate workflows just once after the class has been initialized
This should be in setUpClass method, but we can't access the server
context from there."""
gen = WorkflowsGenerator()
# generate cycle for the only one time wf
wf1 = Workflow.query.filter_by(status="Draft", slug="wf-1").first()
if wf1:
gen.generate_cycle(wf1)
workflows = Workflow.query.filter_by(status="Draft").all()
for wf in workflows:
gen.activate_workflow(wf)
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC",
"X-export-view": "blocks",
}
self.activate()
def export_csv(self, data):
response = self.client.post("/_service/export_csv", data=dumps(data),
headers=self.headers)
self.assert200(response)
return response
def test_workflow_task_group_mapping(self):
""" test workflow and task group mappings """
data = [
{
"object_name": "Workflow", # wf-1
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "TaskGroup",
"slugs": ["tg-1"],
},
},
"fields": "all",
}, {
"object_name": "TaskGroup", # tg-1, tg-2
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("wf-1")) # 1 for wf and 1 on each tg
self.assertIn("tg-1", response)
self.assertIn("tg-6", response)
def test_tg_task(self):
""" test task group and task mappings """
data = [
{
"object_name": "TaskGroupTask", # task-1, task-7
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "TaskGroup",
"slugs": ["tg-1"],
},
},
"fields": "all",
}, {
"object_name": "TaskGroup", # tg-1, tg-2
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("tg-1")) # 2 for tasks and 1 for tg
self.assertIn("task-1", response)
self.assertIn("task-7", response)
def test_workflow_cycle_mapping(self):
""" test workflow and cycle mappings """
data = [
{
"object_name": "Cycle", # cycle with title wf-1
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Workflow",
"slugs": ["wf-1"],
},
},
"fields": "all",
}, {
"object_name": "Workflow", # wf-1
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
}, {
"object_name": "CycleTaskGroup", # two cycle groups
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
}, {
"object_name": "Cycle", # sholud be same cycle as in first block
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["2"],
},
},
"fields": "all",
}, {
# Task mapped to any of the two task groups, 3 tasks
"object_name": "CycleTaskGroupObjectTask",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["2"],
},
},
"fields": "all",
}, {
"object_name": "CycleTaskGroup", # two cycle groups
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["4"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("wf-1")) # 2 for cycles and 1 for wf
# 3rd block = 2, 5th block = 3, 6th block = 2.
self.assertEqual(7, response.count("CYCLEGROUP-"))
self.assertEqual(9, response.count("CYCLE-"))
self.assertEqual(3, response.count("CYCLETASK-"))
def test_cycle_taks_objects(self):
""" test cycle task and various objects """
data = [
{
"object_name": "CycleTaskGroupObjectTask", #
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Policy",
"slugs": ["p1"],
},
},
"fields": "all",
}, {
"object_name": "Policy", #
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": ["slug", "title"],
},
]
response = self.export_csv(data).data
self.assertEqual(2, response.count("CYCLETASK-"))
self.assertEqual(3, response.count(",p1,"))
def test_wf_indirect_relevant_filters(self):
""" test related filter for indirect relationships on wf objects """
def block(obj):
return {
"object_name": obj,
"fields": ["slug"],
"filters": {
"expression": {
"object_name": "Policy",
"op": {"name": "relevant"},
"slugs": ["p1"],
},
},
}
data = [
block("Workflow"),
block("Cycle"),
block("CycleTaskGroup"),
block("CycleTaskGroupObjectTask"),
]
response = self.export_csv(data).data
wf = Workflow.query.filter_by(slug="wf-1").first()
cycle = wf.cycles[0]
cycle_tasks = []
for cycle_task in cycle.cycle_task_group_object_tasks:
is_related = False
for related_object in cycle_task.related_objects:
if related_object.slug == "p1":
is_related = True
if is_related:
cycle_tasks.append(cycle_task)
cycle_task_groups = list({cycle_task.cycle_task_group
for cycle_task in cycle_tasks})
self.assertEqual(1, response.count("wf-"))
self.assertRegexpMatches(response, ",{}[,\r\n]".format(wf.slug))
self.assertEqual(1, response.count("CYCLE-"))
self.assertRegexpMatches(response, ",{}[,\r\n]".format(cycle.slug))
self.assertEqual(1, response.count("CYCLEGROUP-"))
self.assertEqual(1, len(cycle_task_groups))
self.assertRegexpMatches(response, ",{}[,\r\n]".format(
cycle_task_groups[0].slug))
self.assertEqual(2, response.count("CYCLETASK-"))
self.assertEqual(2, len(cycle_tasks))
for cycle_task in cycle_tasks:
self.assertRegexpMatches(response, ",{}[,\r\n]".format(
cycle_task.slug))
destinations = [
("Workflow", wf.slug, 3),
("Cycle", cycle.slug, 3),
("CycleTaskGroupObjectTask", cycle_tasks[0].slug, 1),
("CycleTaskGroupObjectTask", cycle_tasks[1].slug, 1),
]
for object_name, slug, count in destinations:
data = [{
"object_name": "Policy",
"fields": ["slug"],
"filters": {
"expression": {
"object_name": object_name,
"op": {"name": "relevant"},
"slugs": [slug],
},
},
}]
response = self.export_csv(data).data
self.assertEqual(count, response.count(",p"), "Count for " + object_name)
self.assertIn(",p1", response)
| andrei-karalionak/ggrc-core | test/integration/ggrc_workflows/converters/test_workflow_export_csv.py | Python | apache-2.0 | 11,531 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import json
import os
import random
import shutil
import tempfile
import time
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import exporter as exporter_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.estimator import training
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export as export_lib
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
_DEFAULT_EVAL_STEPS = 100
_DEFAULT_EVAL_DELAY_SECS = 120
_DEFAULT_EVAL_THROTTLE_SECS = 600
_DELAY_SECS_PER_WORKER = 5
_GLOBAL_STEP_KEY = ops.GraphKeys.GLOBAL_STEP
_INVALID_INPUT_FN_MSG = '`input_fn` must be callable'
_INVALID_HOOK_MSG = 'All hooks must be `SessionRunHook` instances'
_INVALID_MAX_STEPS_MSG = 'Must specify max_steps > 0'
_INVALID_STEPS_MSG = 'Must specify steps > 0'
_INVALID_NAME_MSG = '`name` must be string'
_INVALID_EVAL_DELAY_SECS_MSG = 'Must specify start_delay_secs >= 0'
_INVALID_EVAL_THROTTLE_SECS_MSG = 'Must specify throttle_secs >= 0'
_INVALID_ESTIMATOR_MSG = '`estimator` must have type `tf.estimator.Estimator`'
_STALE_CHECKPOINT_MSG = 'There was no new checkpoint after the training.'
_INVALID_EXPORTER_MSG = '`exporters` must be an Exporter'
_INVALID_EXPORTER_NAME_TYPE_MSG = 'An Exporter must have a string name'
_DUPLICATE_EXPORTER_NAMES_MSG = '`exporters` must have unique names.'
_NONE_EXPORTER_NAME_MSG = (
'An Exporter cannot have a name that is `None` or empty.')
_INVALID_TRAIN_SPEC_MSG = '`train_spec` must have type `tf.estimator.TrainSpec`'
_INVALID_EVAL_SPEC_MSG = '`eval_spec` must have type `tf.estimator.EvalSpec`'
_EVAL_SPEC_OR_NONE_MSG = (
'`eval_spec` must be either `None` or have type `tf.estimator.EvalSpec`')
_INVALID_EVAL_LISTENER_MSG = 'must have type `_ContinuousEvalListener`'
_INVALID_CONFIG_FOR_STD_SERVER_MSG = 'Could not start server; .*TF_CONFIG'
_INVALID_LOCAL_TASK_WITH_CLUSTER = '`task.type` in TF_CONFIG cannot be `local`'
_INVALID_TASK_TYPE = '`estimator.config` must have task_type set.'
_INPROPER_THROTTL_SECS = (
'EvalSpec.throttle_secs is set as 0.*Please consider to increase')
# The message should NOT have 'local' word as part of it. As (?!word) is looking
# ahead, so, the $ (ending) check is required; otherwise, it will match
# partially and return successuful.
_INVALID_TASK_TO_RUN = (
'Task type .* is not supported. Supported task types are ((?!local).)*$')
_INVALID_EMPTY_EVAL_RESULT_ERR = (
'Internal error: `Estimator.evaluate` should never return empty metrics')
_INVALID_EVAL_RESULT_TYPE_ERR = '`Estimator.evaluate` should return dict.'
_MISSING_GLOBAL_STEP_IN_EVAL_RESULT_ERR = (
'Internal error: `Estimator.evaluate` result should have `global_step`')
_INVALID_EVAL_TASK_ID_ERR = (
'there can only be one `evaluator` task .*with task id 0')
_TF_CONFIG_FOR_CHIEF = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.CHIEF,
'index': 0
}
}
_TF_CONFIG_FOR_MASTER = {
'cluster': {
run_config_lib.TaskType.MASTER: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.MASTER,
'index': 0
}
}
_TF_CONFIG_FOR_WORKER = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
_TF_CONFIG_FOR_PS = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.PS,
'index': 1
}
}
_TF_CONFIG_FOR_EVALUATOR = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
run_config_lib.TaskType.PS: ['host1:1', 'host2:2'],
run_config_lib.TaskType.WORKER: ['host3:3', 'host4:4']
},
'task': {
'type': run_config_lib.TaskType.EVALUATOR,
'index': 0
}
}
_TF_CONFIG_FOR_GOOGLE = {'environment': 'google'}
class _FakeHook(session_run_hook.SessionRunHook):
"""Fake implementation of `SessionRunHook`."""
class _InvalidHook(object):
"""Invalid hook (not a subclass of `SessionRunHook`)."""
def _create_exporter(name):
class FakeExporter(exporter_lib.Exporter):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
def export(self, *args, **kwargs):
del args, kwargs
return FakeExporter(name=name)
def _create_run_config_with_cluster_spec(tf_config):
with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}):
return run_config_lib.RunConfig()
class TrainSpecTest(test.TestCase):
"""Tests TrainSpec."""
def testRequiredArgumentsSet(self):
"""Tests that no errors are raised when all required arguments are set."""
spec = training.TrainSpec(input_fn=lambda: 1)
self.assertEqual(1, spec.input_fn())
self.assertIsNone(spec.max_steps)
self.assertEqual(0, len(spec.hooks))
def testAllArgumentsSet(self):
"""Tests that no errors are raised when all arguments are set."""
hooks = [_FakeHook()]
spec = training.TrainSpec(input_fn=lambda: 1, max_steps=2, hooks=hooks)
self.assertEqual(1, spec.input_fn())
self.assertEqual(2, spec.max_steps)
self.assertEqual(tuple(hooks), spec.hooks)
def testInvalidInputFn(self):
with self.assertRaisesRegexp(TypeError, _INVALID_INPUT_FN_MSG):
training.TrainSpec(input_fn='invalid')
def testInvalidMaxStep(self):
with self.assertRaisesRegexp(ValueError, _INVALID_MAX_STEPS_MSG):
training.TrainSpec(input_fn=lambda: 1, max_steps=0)
def testInvalidHook(self):
with self.assertRaisesRegexp(TypeError, _INVALID_HOOK_MSG):
training.TrainSpec(input_fn=lambda: 1, hooks=[_InvalidHook()])
class EvalSpecTest(test.TestCase):
"""Tests EvalSpec."""
def testRequiredArgumentsSet(self):
"""Tests that no errors are raised when all required arguments are set."""
spec = training.EvalSpec(input_fn=lambda: 1)
self.assertEqual(1, spec.input_fn())
self.assertEqual(_DEFAULT_EVAL_STEPS, spec.steps)
self.assertIsNone(spec.name)
self.assertEqual(0, len(spec.hooks))
self.assertEqual(0, len(spec.exporters))
self.assertEqual(_DEFAULT_EVAL_DELAY_SECS, spec.start_delay_secs)
self.assertEqual(_DEFAULT_EVAL_THROTTLE_SECS, spec.throttle_secs)
def testAllArgumentsSet(self):
"""Tests that no errors are raised when all arguments are set."""
hooks = [_FakeHook()]
exporter = _create_exporter('a')
spec = training.EvalSpec(
input_fn=lambda: 1,
steps=2,
name='name',
hooks=hooks,
exporters=exporter,
start_delay_secs=3,
throttle_secs=4)
self.assertEqual(1, spec.input_fn())
self.assertEqual(2, spec.steps)
self.assertEqual('name', spec.name)
self.assertEqual(tuple(hooks), spec.hooks)
self.assertEqual((exporter,), spec.exporters)
self.assertEqual(3, spec.start_delay_secs)
self.assertEqual(4, spec.throttle_secs)
def testListOfExporters(self):
"""Tests that no errors are raised with multiple exporters."""
exporters = [_create_exporter('a'), _create_exporter('b')]
spec = training.EvalSpec(input_fn=lambda: 1, exporters=exporters)
self.assertEqual(1, spec.input_fn())
self.assertEqual(tuple(exporters), spec.exporters)
def testInvalidInputFn(self):
with self.assertRaisesRegexp(TypeError, _INVALID_INPUT_FN_MSG):
training.EvalSpec(input_fn='invalid')
def testInvalidMaxStep(self):
with self.assertRaisesRegexp(ValueError, _INVALID_STEPS_MSG):
training.EvalSpec(input_fn=lambda: 1, steps=0)
def testInvalidName(self):
with self.assertRaisesRegexp(TypeError, _INVALID_NAME_MSG):
training.EvalSpec(input_fn=lambda: 1, name=123)
def testInvalidHook(self):
with self.assertRaisesRegexp(TypeError, _INVALID_HOOK_MSG):
training.EvalSpec(input_fn=lambda: 1, hooks=[_InvalidHook()])
def testInvalidDelaySecs(self):
with self.assertRaisesRegexp(ValueError, _INVALID_EVAL_DELAY_SECS_MSG):
training.EvalSpec(input_fn=lambda: 1, start_delay_secs=-1)
def testInvalidThrottleSecs(self):
with self.assertRaisesRegexp(ValueError, _INVALID_EVAL_THROTTLE_SECS_MSG):
training.EvalSpec(input_fn=lambda: 1, throttle_secs=-1)
def testInvalidTypeOfListOfExporters(self):
with self.assertRaisesRegexp(TypeError, _INVALID_EXPORTER_MSG):
training.EvalSpec(
input_fn=lambda: 1, exporters=[_create_exporter('a'),
_FakeHook()])
def testInvalidTypeOfIndividualExporter(self):
with self.assertRaisesRegexp(TypeError, _INVALID_EXPORTER_MSG):
training.EvalSpec(input_fn=lambda: 1, exporters=_FakeHook())
def testInvalidTypeOfExporterName(self):
with self.assertRaisesRegexp(ValueError, _INVALID_EXPORTER_NAME_TYPE_MSG):
training.EvalSpec(input_fn=lambda: 1,
exporters=_create_exporter(name=123))
def testMultipleExportersWithTheSameName(self):
with self.assertRaisesRegexp(ValueError, _DUPLICATE_EXPORTER_NAMES_MSG):
training.EvalSpec(
input_fn=lambda: 1,
exporters=[_create_exporter('a'), _create_exporter('a')])
def testMultipleExportersAndOneWithoutAName(self):
with self.assertRaisesRegexp(ValueError, _NONE_EXPORTER_NAME_MSG):
training.EvalSpec(
input_fn=lambda: 1,
exporters=[_create_exporter('a'),
_create_exporter(None)])
def testSingleExporterWithoutAName(self):
with self.assertRaisesRegexp(ValueError, _NONE_EXPORTER_NAME_MSG):
training.EvalSpec(input_fn=lambda: 1, exporters=_create_exporter(None))
class TrainAndEvaluateTest(test.TestCase):
def test_run_task(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
with test.mock.patch.object(training, '_TrainingExecutor') as mock_executor:
mock_executor_instance = test.mock.Mock()
mock_executor.return_value = mock_executor_instance
training.train_and_evaluate(mock_est, mock_train_spec, mock_eval_spec)
mock_executor.assert_called_with(estimator=mock_est,
train_spec=mock_train_spec,
eval_spec=mock_eval_spec)
self.assertTrue(mock_executor_instance.run.called)
def test_error_out_if_evaluator_task_id_is_non_zero(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
},
'task': {
'type': run_config_lib.TaskType.EVALUATOR,
'index': 1
}
}
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = _create_run_config_with_cluster_spec(tf_config)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
with self.assertRaisesRegexp(ValueError, _INVALID_EVAL_TASK_ID_ERR):
training.train_and_evaluate(mock_est, mock_train_spec, mock_eval_spec)
def test_invalid_estimator(self):
invalid_estimator = object()
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
with self.assertRaisesRegexp(TypeError, _INVALID_ESTIMATOR_MSG):
training.train_and_evaluate(invalid_estimator, mock_train_spec,
mock_eval_spec)
def test_fail_fast_if_invalid_eval_spec(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
invalid_eval_spec = object()
with test.mock.patch.object(training, '_TrainingExecutor') as mock_executor:
with self.assertRaisesRegexp(TypeError, _INVALID_EVAL_SPEC_MSG):
training.train_and_evaluate(mock_est, mock_train_spec,
invalid_eval_spec)
mock_executor.assert_not_called()
class TrainingExecutorConstructorTest(test.TestCase):
"""Tests constructor of _TrainingExecutor."""
def test_required_arguments_set(self):
estimator = estimator_lib.Estimator(model_fn=lambda features: features)
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = training.EvalSpec(input_fn=lambda: 1)
executor = training._TrainingExecutor(estimator, train_spec, eval_spec)
self.assertEqual(estimator, executor.estimator)
def test_invalid_estimator(self):
invalid_estimator = object()
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = training.EvalSpec(input_fn=lambda: 1)
with self.assertRaisesRegexp(TypeError, _INVALID_ESTIMATOR_MSG):
training._TrainingExecutor(invalid_estimator, train_spec, eval_spec)
def test_invalid_train_spec(self):
estimator = estimator_lib.Estimator(model_fn=lambda features: features)
invalid_train_spec = object()
eval_spec = training.EvalSpec(input_fn=lambda: 1)
with self.assertRaisesRegexp(TypeError, _INVALID_TRAIN_SPEC_MSG):
training._TrainingExecutor(estimator, invalid_train_spec, eval_spec)
def test_invalid_eval_spec(self):
estimator = estimator_lib.Estimator(model_fn=lambda features: features)
train_spec = training.TrainSpec(input_fn=lambda: 1)
invalid_eval_spec = object()
with self.assertRaisesRegexp(TypeError, _EVAL_SPEC_OR_NONE_MSG):
training._TrainingExecutor(estimator, train_spec, invalid_eval_spec)
def test_eval_spec_none(self):
estimator = estimator_lib.Estimator(model_fn=lambda features: features)
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = None
# Tests that no error is raised.
training._TrainingExecutor(estimator, train_spec, eval_spec)
def test_invalid_train_hooks(self):
estimator = estimator_lib.Estimator(model_fn=lambda features: features)
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = training.EvalSpec(input_fn=lambda: 1)
invalid_train_hooks = [object()]
with self.assertRaisesRegexp(TypeError, _INVALID_HOOK_MSG):
training._TrainingExecutor(
estimator, train_spec, eval_spec, train_hooks=invalid_train_hooks)
def test_invalid_continuous_eval_listener(self):
estimator = estimator_lib.Estimator(model_fn=lambda features: features)
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = training.EvalSpec(input_fn=lambda: 1)
invalid_continuous_eval_listener = object()
with self.assertRaisesRegexp(TypeError, _INVALID_EVAL_LISTENER_MSG):
training._TrainingExecutor(
estimator,
train_spec,
eval_spec,
continuous_eval_listener=invalid_continuous_eval_listener)
class _TrainingExecutorTrainingTest(object):
"""Tests training of _TrainingExecutor."""
def __init__(self, run_config):
self._run_config = run_config
def _run_task(self, executor):
# We should not call executor.run as the test here is intended to test
# run_foo explicitly (foo is the task type).
return getattr(executor, 'run_' + self._run_config.task_type)()
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_train_with_train_spec(self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = self._run_config
train_spec = training.TrainSpec(
input_fn=lambda: 1, max_steps=2, hooks=[_FakeHook()])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_server_instance = mock_server.return_value
executor = training._TrainingExecutor(mock_est, train_spec, mock_eval_spec)
self._run_task(executor)
mock_server.assert_called_with(
mock_est.config.cluster_spec,
job_name=mock_est.config.task_type,
task_index=mock_est.config.task_id,
config=test.mock.ANY,
protocol=None,
start=False)
self.assertTrue(mock_server_instance.start.called)
mock_est.train.assert_called_with(
input_fn=train_spec.input_fn,
max_steps=train_spec.max_steps,
hooks=list(train_spec.hooks),
saving_listeners=test.mock.ANY)
mock_est.evaluate.assert_not_called()
mock_est.export_savedmodel.assert_not_called()
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_train_with_no_eval_spec(self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = self._run_config
train_spec = training.TrainSpec(
input_fn=lambda: 1, max_steps=2, hooks=[_FakeHook()])
eval_spec = None
mock_server_instance = mock_server.return_value
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
self._run_task(executor)
mock_server.assert_called_with(
mock_est.config.cluster_spec,
job_name=mock_est.config.task_type,
task_index=mock_est.config.task_id,
config=test.mock.ANY,
protocol=None,
start=False)
self.assertTrue(mock_server_instance.start.called)
mock_est.train.assert_called_with(
input_fn=train_spec.input_fn,
max_steps=train_spec.max_steps,
hooks=list(train_spec.hooks),
saving_listeners=test.mock.ANY)
mock_est.evaluate.assert_not_called()
mock_est.export_savedmodel.assert_not_called()
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_train_with_train_hooks(self, unused_mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = self._run_config
train_spec = training.TrainSpec(
input_fn=lambda: 1, max_steps=2, hooks=[_FakeHook()])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
extra_hooks = [_FakeHook()]
executor = training._TrainingExecutor(
mock_est, train_spec, mock_eval_spec, train_hooks=extra_hooks)
self._run_task(executor)
mock_est.train.assert_called_with(
input_fn=train_spec.input_fn,
max_steps=train_spec.max_steps,
hooks=list(train_spec.hooks) + extra_hooks,
saving_listeners=test.mock.ANY)
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_no_server_startup_in_google(self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = self._run_config
mock_train_spec = test.mock.Mock(spec=training.TrainSpec, hooks=[])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
tf_config = {'TF_CONFIG': json.dumps(_TF_CONFIG_FOR_GOOGLE)}
with test.mock.patch.dict('os.environ', tf_config):
self._run_task(executor)
mock_server.assert_not_called()
def test_fail_with_empty_cluster_spec(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = None
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = 'worker'
mock_est.config.task_id = 2
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
self._run_task(training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec))
def test_fail_with_empty_master(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec(
{'worker': ['dummy', 'dummy1']})
mock_est.config.master = ''
mock_est.config.task_type = 'worker'
mock_est.config.task_id = 2
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
self._run_task(training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec))
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_single_worker_node_with_empty_tf_master(
self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec, hooks=[])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
# Single node cluster.
mock_est.config.cluster_spec = server_lib.ClusterSpec({'worker': ['dummy']})
mock_est.config.master = ''
mock_est.config.task_type = 'worker'
mock_est.config.task_id = 2
self._run_task(training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec))
self.assertTrue(mock_est.train.called)
mock_server.assert_not_called()
def test_fail_with_empty_task_type(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec({'worker': ['dummy']})
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = ''
mock_est.config.task_id = 2
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
self._run_task(training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec))
def test_fail_with_none_task_id(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec({'worker': ['dummy']})
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = 'worker'
mock_est.config.task_id = None
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
self._run_task(training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec))
class TrainingExecutorRunWorkerTest(_TrainingExecutorTrainingTest,
test.TestCase):
"""Tests run_worker of _TrainingExecutor."""
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
_TrainingExecutorTrainingTest.__init__(
self,
run_config=_create_run_config_with_cluster_spec(_TF_CONFIG_FOR_WORKER))
@test.mock.patch.object(server_lib, 'Server')
def test_delay_for_worker(self, _):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = self._run_config
mock_train_spec = test.mock.Mock(spec=training.TrainSpec, hooks=[])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
expected_secs = (self._run_config.task_id + 1) * _DELAY_SECS_PER_WORKER
with test.mock.patch.object(time, 'sleep') as mock_sleep:
mock_sleep.side_effect = lambda s: self.assertEqual(expected_secs, s)
self._run_task(executor)
self.assertTrue(mock_sleep.called)
class TrainingExecutorRunChiefTest(_TrainingExecutorTrainingTest,
test.TestCase):
"""Tests run_chief of _TrainingExecutor."""
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
_TrainingExecutorTrainingTest.__init__(
self,
run_config=_create_run_config_with_cluster_spec(_TF_CONFIG_FOR_CHIEF))
@test.mock.patch.object(server_lib, 'Server')
def test_no_delay_for_chief(self, _):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = self._run_config
mock_train_spec = test.mock.Mock(spec=training.TrainSpec, hooks=[])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
with test.mock.patch.object(time, 'sleep') as mock_sleep:
self._run_task(executor)
mock_sleep.assert_not_called()
class TrainingExecutorRunMasterTest(test.TestCase):
"""Tests run_chief of _TrainingExecutor."""
def setUp(self):
self._run_config = _create_run_config_with_cluster_spec(
_TF_CONFIG_FOR_MASTER)
@test.mock.patch.object(server_lib, 'Server')
def test_no_delay_for_master(self, _):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate = lambda *args, **kw: {ops.GraphKeys.GLOBAL_STEP: 123}
mock_est.config = self._run_config
mock_train_spec = test.mock.Mock(
spec=training.TrainSpec, max_steps=123, hooks=[])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec, exporters=[])
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
with test.mock.patch.object(time, 'sleep') as mock_sleep:
executor.run_master()
mock_sleep.assert_not_called()
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_train_with_train_spec(self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate = lambda *args, **kw: {ops.GraphKeys.GLOBAL_STEP: 123}
mock_est.config = self._run_config
train_spec = training.TrainSpec(
input_fn=lambda: 1, max_steps=2, hooks=[_FakeHook()])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec, exporters=[])
mock_server_instance = mock_server.return_value
executor = training._TrainingExecutor(mock_est, train_spec, mock_eval_spec)
executor.run_master()
mock_server.assert_called_with(
mock_est.config.cluster_spec,
job_name=mock_est.config.task_type,
task_index=mock_est.config.task_id,
config=test.mock.ANY,
protocol=None,
start=False)
self.assertTrue(mock_server_instance.start.called)
mock_est.train.assert_called_with(
input_fn=train_spec.input_fn,
max_steps=train_spec.max_steps,
hooks=list(train_spec.hooks),
saving_listeners=test.mock.ANY)
mock_est.export_savedmodel.assert_not_called()
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_train_with_no_eval_spec_fails(self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate = lambda *args, **kw: {ops.GraphKeys.GLOBAL_STEP: 123}
mock_est.config = self._run_config
train_spec = training.TrainSpec(
input_fn=lambda: 1, max_steps=2, hooks=[_FakeHook()])
eval_spec = None
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
with self.assertRaisesRegexp(TypeError, _INVALID_EVAL_SPEC_MSG):
executor.run_master()
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_train_with_train_hooks(self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate = lambda *args, **kw: {ops.GraphKeys.GLOBAL_STEP: 123}
mock_est.config = self._run_config
train_spec = training.TrainSpec(
input_fn=lambda: 1, max_steps=2, hooks=[_FakeHook()])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec, exporters=[])
extra_hooks = [_FakeHook()]
executor = training._TrainingExecutor(
mock_est, train_spec, mock_eval_spec, train_hooks=extra_hooks)
executor.run_master()
mock_est.train.assert_called_with(
input_fn=train_spec.input_fn,
max_steps=train_spec.max_steps,
hooks=list(train_spec.hooks) + extra_hooks,
saving_listeners=test.mock.ANY)
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_no_server_startup_in_google(self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate = lambda *args, **kw: {ops.GraphKeys.GLOBAL_STEP: 123}
mock_est.config = self._run_config
mock_train_spec = test.mock.Mock(
spec=training.TrainSpec, max_steps=123, hooks=[])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec, exporters=[])
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
tf_config = {'TF_CONFIG': json.dumps(_TF_CONFIG_FOR_GOOGLE)}
with test.mock.patch.dict('os.environ', tf_config):
executor.run_master()
mock_server.assert_not_called()
def test_fail_with_empty_cluster_spec(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = None
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = 'master'
mock_est.config.task_id = 2
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
training._TrainingExecutor(
mock_est, mock_train_spec, mock_eval_spec).run_master()
def test_fail_with_empty_master(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec(
{'master': ['dummy'], 'worker': ['dummy1']})
mock_est.config.master = ''
mock_est.config.task_type = 'master'
mock_est.config.task_id = 0
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
training._TrainingExecutor(
mock_est, mock_train_spec, mock_eval_spec).run_master()
@test.mock.patch.object(time, 'sleep')
@test.mock.patch.object(server_lib, 'Server')
def test_single_master_node_with_empty_tf_master(
self, mock_server, unused_mock_sleep):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate = lambda *args, **kw: {ops.GraphKeys.GLOBAL_STEP: 123}
mock_train_spec = test.mock.Mock(
spec=training.TrainSpec, max_steps=123, hooks=[])
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec, exporters=[])
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec(
{'master': ['dummy']})
mock_est.config.master = ''
mock_est.config.task_type = 'master'
mock_est.config.task_id = 0
executor = training._TrainingExecutor(
mock_est, mock_train_spec, mock_eval_spec)
executor.run_master()
mock_server.assert_not_called()
self.assertTrue(mock_est.train.called)
def test_fail_with_empty_task_type(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec({'master': ['dummy']})
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = ''
mock_est.config.task_id = 2
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
training._TrainingExecutor(
mock_est, mock_train_spec, mock_eval_spec).run_master()
def test_fail_with_none_task_id(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec({'master': ['dummy']})
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = 'master'
mock_est.config.task_id = None
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
training._TrainingExecutor(
mock_est, mock_train_spec, mock_eval_spec).run_master()
@test.mock.patch.object(server_lib, 'Server')
def test_run_master_triggers_evaluate_and_export(self, _):
def estimator_train(saving_listeners, *args, **kwargs):
# There shalt be a saving_listener. Estimator is going to call
# `after_save`.
del args, kwargs
saving_listeners[0].begin()
saving_listeners[0].after_save(session=None, global_step_value=0)
saving_listeners[0].after_save(session=None, global_step_value=10)
mock_est = test.mock.Mock(
spec=estimator_lib.Estimator, model_dir='path/', train=estimator_train)
mock_est.latest_checkpoint.return_value = 'checkpoint_path/'
mock_est.config = self._run_config
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_whether_export_is_called'
train_spec = training.TrainSpec(input_fn=lambda: 1, max_steps=300)
eval_spec = training.EvalSpec(
input_fn=lambda: 1, steps=2, exporters=exporter)
eval_result = {_GLOBAL_STEP_KEY: train_spec.max_steps}
mock_est.evaluate.return_value = eval_result
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
executor.run_master()
mock_est.evaluate.assert_called_with(
name=eval_spec.name,
input_fn=eval_spec.input_fn,
steps=eval_spec.steps,
checkpoint_path='checkpoint_path/',
hooks=eval_spec.hooks)
self.assertEqual(1, exporter.export.call_count)
exporter.export.assert_called_with(
estimator=mock_est,
export_path=os.path.join('path/', 'export', exporter.name),
checkpoint_path='checkpoint_path/',
eval_result=eval_result,
is_the_final_export=True)
@test.mock.patch.object(basic_session_run_hooks, 'SecondOrStepTimer')
@test.mock.patch.object(server_lib, 'Server')
def test_run_master_throttle_eval(self, _, mock_timer_class):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, model_dir='path/')
mock_timer = test.mock.Mock()
mock_timer_class.return_value = mock_timer
def estimator_train(saving_listeners, *args, **kwargs):
del args, kwargs
saving_listeners[0].begin()
# Call four times.
mock_timer.should_trigger_for_step.return_value = True
saving_listeners[0].after_save(session=None, global_step_value=None)
mock_timer.should_trigger_for_step.return_value = True
saving_listeners[0].after_save(session=None, global_step_value=None)
mock_timer.should_trigger_for_step.return_value = False
saving_listeners[0].after_save(session=None, global_step_value=None)
mock_timer.should_trigger_for_step.return_value = True
saving_listeners[0].after_save(session=None, global_step_value=None)
mock_est.train = estimator_train
mock_est.latest_checkpoint.side_effect = ['ckpt1', 'ckpt2']
mock_est.config = self._run_config
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_whether_export_is_called'
train_spec = training.TrainSpec(input_fn=lambda: 1, max_steps=300)
eval_spec = training.EvalSpec(
input_fn=lambda: 1, steps=2, exporters=exporter, throttle_secs=10)
mock_est.evaluate.side_effect = [
{_GLOBAL_STEP_KEY: train_spec.max_steps //2},
{_GLOBAL_STEP_KEY: train_spec.max_steps}
]
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
executor.run_master()
self.assertEqual(2, mock_est.evaluate.call_count)
self.assertEqual(2, exporter.export.call_count)
is_final_export_list = [call[1]['is_the_final_export']
for call in exporter.export.call_args_list]
self.assertEqual([False, True], is_final_export_list)
@test.mock.patch.object(basic_session_run_hooks, 'SecondOrStepTimer')
@test.mock.patch.object(server_lib, 'Server')
def test_run_master_throttle_eval_which_skips_final_ckpt(
self, _, mock_timer_class):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, model_dir='path/')
mock_timer = test.mock.Mock()
mock_timer_class.return_value = mock_timer
def estimator_train(saving_listeners, *args, **kwargs):
del args, kwargs
saving_listeners[0].begin()
# Call tree times (one for first saving).
mock_timer.should_trigger_for_step.return_value = True
saving_listeners[0].after_save(session=None, global_step_value=0)
mock_timer.should_trigger_for_step.return_value = True
saving_listeners[0].after_save(session=None, global_step_value=125)
mock_timer.should_trigger_for_step.return_value = False
saving_listeners[0].after_save(session=None, global_step_value=250)
# At the end evaluate should be called even if throttle secs prevents it.
mock_timer.should_trigger_for_step.return_value = False
saving_listeners[0].end(session=None, global_step_value=300)
mock_est.train = estimator_train
mock_est.latest_checkpoint.side_effect = ['ckpt1', 'ckpt2']
mock_est.config = self._run_config
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_whether_export_is_called'
train_spec = training.TrainSpec(input_fn=lambda: 1, max_steps=300)
eval_spec = training.EvalSpec(
input_fn=lambda: 1, steps=2, exporters=exporter, throttle_secs=10)
mock_est.evaluate.side_effect = [
{_GLOBAL_STEP_KEY: train_spec.max_steps //2},
{_GLOBAL_STEP_KEY: train_spec.max_steps}
]
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
executor.run_master()
self.assertEqual(2, mock_est.evaluate.call_count)
self.assertEqual(2, exporter.export.call_count)
is_final_export_list = [call[1]['is_the_final_export']
for call in exporter.export.call_args_list]
self.assertEqual([False, True], is_final_export_list)
class TrainingExecutorRunEvaluatorTest(test.TestCase):
"""Tests run_evaluator of _TrainingExecutor."""
def _set_up_mock_est_to_train_and_evaluate_once(self, mock_est,
mock_train_spec):
"""Sets global step in eval result to end the while True eval loop."""
training_max_step = 200
mock_est.evaluate.return_value = {_GLOBAL_STEP_KEY: training_max_step}
mock_train_spec.max_steps = training_max_step
def test_evaluate_with_evaluate_spec(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.latest_checkpoint.return_value = 'latest_it_is'
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
self._set_up_mock_est_to_train_and_evaluate_once(mock_est, mock_train_spec)
eval_spec = training.EvalSpec(
input_fn=lambda: 1, steps=2, hooks=[_FakeHook()], name='cont_eval',
start_delay_secs=0, throttle_secs=0)
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
executor.run_evaluator()
mock_est.evaluate.assert_called_with(
name='cont_eval',
input_fn=eval_spec.input_fn,
steps=eval_spec.steps,
checkpoint_path='latest_it_is',
hooks=eval_spec.hooks)
self.assertFalse(mock_est.train.called)
def test_evaluate_with_no_eval_spec_fails(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.latest_checkpoint.return_value = 'latest_it_is'
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
self._set_up_mock_est_to_train_and_evaluate_once(mock_est, mock_train_spec)
eval_spec = None
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
with self.assertRaisesRegexp(TypeError, _INVALID_EVAL_SPEC_MSG):
executor.run_evaluator()
def test_evaluate_with_train_hooks(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.latest_checkpoint.return_value = 'latest_it_is'
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
self._set_up_mock_est_to_train_and_evaluate_once(mock_est, mock_train_spec)
eval_spec = training.EvalSpec(
input_fn=lambda: 1,
steps=2,
hooks=[_FakeHook()],
name='cont_eval',
start_delay_secs=0,
throttle_secs=0)
# The train_hooks will not be called during eval.
mock_hook = test.mock.Mock(spec=session_run_hook.SessionRunHook)
executor = training._TrainingExecutor(
mock_est, mock_train_spec, eval_spec, train_hooks=[mock_hook])
executor.run_evaluator()
mock_hook.begin.assert_not_called()
def test_evaluate_multiple_times(self):
training_max_step = 200
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.model_dir = compat.as_bytes(test.get_temp_dir())
mock_est.evaluate.side_effect = [
{_GLOBAL_STEP_KEY: training_max_step // 2},
{_GLOBAL_STEP_KEY: training_max_step}
]
mock_est.latest_checkpoint.side_effect = ['path_1', 'path_2']
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_train_spec.max_steps = training_max_step
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_how_many_times_export_is_called'
mock_est.times_export_was_called = 0
mock_est.times_final_export_was_called = 0
def export(estimator, export_path, checkpoint_path, eval_result,
is_the_final_export):
del export_path, checkpoint_path, eval_result
estimator.times_export_was_called += 1
# final_export is happened at the end.
self.assertEqual(0, estimator.times_final_export_was_called)
if is_the_final_export:
estimator.times_final_export_was_called += 1
exporter.export = export
eval_spec = training.EvalSpec(
input_fn=lambda: 1,
start_delay_secs=0,
throttle_secs=0,
exporters=exporter)
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
executor.run_evaluator()
self.assertEqual(2, mock_est.evaluate.call_count)
self.assertEqual(2, mock_est.times_export_was_called)
self.assertEqual(1, mock_est.times_final_export_was_called)
def test_evaluate_listener_before_eval(self):
training_max_step = 200
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.model_dir = compat.as_bytes(test.get_temp_dir())
# Without early stopping, this eval will be run twice.
mock_est.evaluate.side_effect = [{
_GLOBAL_STEP_KEY: training_max_step // 2
}, {
_GLOBAL_STEP_KEY: training_max_step
}]
mock_est.latest_checkpoint.side_effect = ['path_1', 'path_2']
mock_train_spec = test.mock.Mock(spec=training.TrainSpec, hooks=[])
mock_train_spec.max_steps = training_max_step
class _Listener(training._ContinuousEvalListener):
def __init__(self):
self.call_count = 0
def before_eval(self):
self.call_count += 1
return self.call_count == 1
listener = _Listener()
eval_spec = training.EvalSpec(
input_fn=lambda: 1, start_delay_secs=0, throttle_secs=0)
training._TrainingExecutor(
mock_est, mock_train_spec, eval_spec,
continuous_eval_listener=listener).run_evaluator()
# Before_eval returns False during the second time, so, evaluate will be
# called once.
self.assertEqual(1, mock_est.evaluate.call_count)
self.assertEqual(2, listener.call_count)
def test_evaluate_listener_after_eval(self):
training_max_step = 200
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.model_dir = compat.as_bytes(test.get_temp_dir())
# Without early stopping, this eval will be run twice.
expected_eval_metrics = [{
_GLOBAL_STEP_KEY: training_max_step // 2
}, {
_GLOBAL_STEP_KEY: training_max_step
}]
mock_est.evaluate.side_effect = expected_eval_metrics
mock_est.latest_checkpoint.side_effect = ['path_1', 'path_2']
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_train_spec.max_steps = training_max_step
class _Listener(training._ContinuousEvalListener):
def __init__(self):
self.call_count = 0
def after_eval(self, eval_result):
self.call_count += 1
self.eval_result = eval_result
return False
listener = _Listener()
eval_spec = training.EvalSpec(
input_fn=lambda: 1, start_delay_secs=0, throttle_secs=0)
training._TrainingExecutor(
mock_est, mock_train_spec, eval_spec,
continuous_eval_listener=listener).run_evaluator()
# after_eval returns False during the first time, so, evaluate will be
# called once.
self.assertEqual(1, mock_est.evaluate.call_count)
self.assertEqual(1, listener.call_count)
self.assertAllEqual(expected_eval_metrics[0], listener.eval_result.metrics)
self.assertEqual('path_1', listener.eval_result.checkpoint_path)
def test_final_export_is_true_in_the_end(self):
training_max_step = 200
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.model_dir = compat.as_bytes(test.get_temp_dir())
mock_est.evaluate.side_effect = [
{_GLOBAL_STEP_KEY: training_max_step // 2},
{_GLOBAL_STEP_KEY: training_max_step}
]
mock_est.latest_checkpoint.side_effect = ['path_1', 'path_2']
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_train_spec.max_steps = training_max_step
mock_est.times_export_fn_was_called = 0
mock_est.times_the_final_export_was_true = 0
def export(estimator, export_path, checkpoint_path, eval_result,
is_the_final_export):
del export_path, checkpoint_path, eval_result
estimator.times_export_fn_was_called += 1
if is_the_final_export:
estimator.times_the_final_export_was_true += 1
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_how_many_times_export_is_called'
exporter.export = export
eval_spec = training.EvalSpec(
input_fn=lambda: 1,
start_delay_secs=0,
throttle_secs=0,
exporters=exporter)
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
executor.run_evaluator()
self.assertEqual(2, mock_est.evaluate.call_count)
self.assertEqual(2, mock_est.times_export_fn_was_called)
self.assertEqual(1, mock_est.times_the_final_export_was_true)
def test_skip_evaluation_due_to_ckpt(self):
training_max_step = 200
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate.side_effect = [
{_GLOBAL_STEP_KEY: training_max_step // 2},
{_GLOBAL_STEP_KEY: training_max_step}
]
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_train_spec.max_steps = training_max_step
self._set_up_mock_est_to_train_and_evaluate_once(mock_est, mock_train_spec)
# First two items are invalid, next two items are same.
mock_est.latest_checkpoint.side_effect = [
None, '', 'same', 'same', 'path_2'
]
eval_spec = training.EvalSpec(
input_fn=lambda: 1, start_delay_secs=0, throttle_secs=2)
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
with test.mock.patch.object(logging, 'warning') as mock_log:
executor.run_evaluator()
# Three checkpoint paths are invalid.
self.assertEqual(5, mock_est.latest_checkpoint.call_count)
self.assertEqual(2, mock_est.evaluate.call_count)
# Two warning logs are expected (last warning time is reset after a
# successuful evaluation)
self.assertEqual(2, mock_log.call_count)
def test_warning_if_throttle_secs_is_zero(self):
training_max_step = 200
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate.side_effect = [
{_GLOBAL_STEP_KEY: training_max_step}
]
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_train_spec.max_steps = training_max_step
self._set_up_mock_est_to_train_and_evaluate_once(mock_est, mock_train_spec)
# We need to make the first one invalid, so it will check the
# throttle_secs=0.
mock_est.latest_checkpoint.side_effect = [None, 'path']
eval_spec = training.EvalSpec(
input_fn=lambda: 1, start_delay_secs=0, throttle_secs=0)
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
with test.mock.patch.object(logging, 'warning') as mock_log:
executor.run_evaluator()
# First ckpt is invalid.
self.assertEqual(2, mock_est.latest_checkpoint.call_count)
self.assertEqual(1, mock_est.evaluate.call_count)
self.assertRegexpMatches(str(mock_log.call_args), _INPROPER_THROTTL_SECS)
def test_continuous_eval_listener_eval_result(self):
training_max_step = 200
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
expected_eval_metrics = [{
_GLOBAL_STEP_KEY: training_max_step // 2
}, {
_GLOBAL_STEP_KEY: training_max_step
}]
mock_est.evaluate.side_effect = expected_eval_metrics
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_train_spec.max_steps = training_max_step
class _Listener(training._ContinuousEvalListener):
def __init__(self):
self.eval_results = []
def after_eval(self, eval_result):
self.eval_results.append(eval_result)
return True
continuous_eval_listener = _Listener()
self._set_up_mock_est_to_train_and_evaluate_once(mock_est, mock_train_spec)
# First two items are invalid, next two items are same.
mock_est.latest_checkpoint.side_effect = [
None, '', 'same', 'same', 'path_2'
]
expected_eval_results = [
training._EvalResult(training._EvalStatus.MISSING_CHECKPOINT),
training._EvalResult(training._EvalStatus.MISSING_CHECKPOINT),
training._EvalResult(
training._EvalStatus.EVALUATED,
metrics=expected_eval_metrics[0],
checkpoint_path='same'),
training._EvalResult(training._EvalStatus.NO_NEW_CHECKPOINT),
training._EvalResult(
training._EvalStatus.EVALUATED,
metrics=expected_eval_metrics[1],
checkpoint_path='path_2'),
]
eval_spec = training.EvalSpec(
input_fn=lambda: 1, start_delay_secs=0, throttle_secs=0)
executor = training._TrainingExecutor(
mock_est,
mock_train_spec,
eval_spec,
continuous_eval_listener=continuous_eval_listener)
executor.run_evaluator()
# Three checkpoint paths are invalid.
self.assertEqual(5, mock_est.latest_checkpoint.call_count)
self.assertEqual(2, mock_est.evaluate.call_count)
self.assertEqual(5, len(continuous_eval_listener.eval_results))
for i, result in enumerate(continuous_eval_listener.eval_results):
self.assertEqual(expected_eval_results[i].status, result.status)
self.assertAllEqual(expected_eval_results[i].metrics, result.metrics)
self.assertEqual(expected_eval_results[i].checkpoint_path,
result.checkpoint_path)
def test_sleep_start_delay_secs(self):
training_max_step = 200
start_delay_secs = 123
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.evaluate.return_value = {_GLOBAL_STEP_KEY: training_max_step}
mock_est.model_dir = compat.as_bytes(test.get_temp_dir())
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_train_spec.max_steps = training_max_step
eval_spec = training.EvalSpec(
input_fn=lambda: 1, steps=2, hooks=[_FakeHook()], name='cont_eval',
start_delay_secs=start_delay_secs, throttle_secs=0)
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
with test.mock.patch.object(time, 'sleep') as mock_sleep:
executor.run_evaluator()
mock_sleep.assert_called_with(start_delay_secs)
self.assertTrue(mock_est.evaluate.called)
@test.mock.patch.object(time, 'time')
@test.mock.patch.object(time, 'sleep')
def test_throttle_secs(self, mock_sleep, mock_time):
throttle_secs = 123
operation_secs = 12
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
self._set_up_mock_est_to_train_and_evaluate_once(mock_est, mock_train_spec)
eval_spec = training.EvalSpec(
input_fn=lambda: 1, start_delay_secs=0, throttle_secs=throttle_secs)
mock_time.side_effect = [921, 921 + operation_secs]
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
# Disable logging as it calls time.time also.
with test.mock.patch.object(logging, 'info'):
executor.run_evaluator()
mock_sleep.assert_called_with(throttle_secs - operation_secs)
self.assertTrue(mock_est.evaluate.called)
def test_that_export_is_called(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
self._set_up_mock_est_to_train_and_evaluate_once(mock_est, mock_train_spec)
def export(estimator, *args, **kwargs):
del args, kwargs
estimator.export_was_called = True
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_whether_export_is_called'
exporter.export = export
eval_spec = training.EvalSpec(
input_fn=lambda: 1,
steps=2,
start_delay_secs=0,
throttle_secs=0,
exporters=exporter)
executor = training._TrainingExecutor(mock_est, mock_train_spec, eval_spec)
executor.run_evaluator()
# Verify that export was called on the right estimator.
self.assertTrue(mock_est.export_was_called)
def test_errors_out_if_evaluate_returns_empty_dict(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = training.EvalSpec(input_fn=(lambda: 1),
start_delay_secs=0, throttle_secs=0)
mock_est.evaluate.return_value = {}
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
with self.assertRaisesRegexp(ValueError, _INVALID_EMPTY_EVAL_RESULT_ERR):
executor.run_evaluator()
def test_errors_out_if_evaluate_returns_non_dict(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = training.EvalSpec(input_fn=(lambda: 1),
start_delay_secs=0, throttle_secs=0)
mock_est.evaluate.return_value = 123
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
with self.assertRaisesRegexp(TypeError, _INVALID_EVAL_RESULT_TYPE_ERR):
executor.run_evaluator()
def test_errors_out_if_evaluate_returns_dict_without_global_step(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
train_spec = training.TrainSpec(input_fn=lambda: 1)
eval_spec = training.EvalSpec(input_fn=(lambda: 1),
start_delay_secs=0, throttle_secs=0)
mock_est.evaluate.return_value = {'loss': 123}
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
with self.assertRaisesRegexp(ValueError,
_MISSING_GLOBAL_STEP_IN_EVAL_RESULT_ERR):
executor.run_evaluator()
class TrainingExecutorRunPsTest(test.TestCase):
"""Tests run_ps of _TrainingExecutor."""
@test.mock.patch.object(server_lib, 'Server')
def test_std_server(self, mock_server):
mock_server_instance = test.mock.Mock()
mock_server.return_value = mock_server_instance
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = _create_run_config_with_cluster_spec(_TF_CONFIG_FOR_PS)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
executor.run_ps()
mock_server.assert_called_with(
mock_est.config.cluster_spec,
job_name=mock_est.config.task_type,
task_index=mock_est.config.task_id,
config=test.mock.ANY,
protocol=None,
start=False)
self.assertTrue(mock_server_instance.start.called)
self.assertTrue(mock_server_instance.join.called)
def test_fail_with_empty_cluster_spec(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = None
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = 'ps'
mock_est.config.task_id = 2
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec).run_ps()
def test_fail_with_empty_master(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec({'ps': ['dummy']})
mock_est.config.master = ''
mock_est.config.task_type = 'ps'
mock_est.config.task_id = 2
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec).run_ps()
def test_fail_with_empty_task_type(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec({'ps': ['dummy']})
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = ''
mock_est.config.task_id = 2
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec).run_ps()
def test_fail_with_none_task_id(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.PropertyMock(spec=run_config_lib.RunConfig)
mock_est.config.cluster_spec = server_lib.ClusterSpec({'ps': ['dummy']})
mock_est.config.master = 'grpc://...'
mock_est.config.task_type = 'ps'
mock_est.config.task_id = None
with self.assertRaisesRegexp(RuntimeError,
_INVALID_CONFIG_FOR_STD_SERVER_MSG):
training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec).run_ps()
class StopAtSecsHookTest(test.TestCase):
"""Tests StopAtSecsHook."""
@test.mock.patch.object(time, 'time')
def test_stops_after_time(self, mock_time):
mock_time.return_value = 1484695987.209386
hook = training._StopAtSecsHook(1000)
with ops.Graph().as_default():
no_op = control_flow_ops.no_op()
# some time passed before training starts
mock_time.return_value += 250
with monitored_session.MonitoredSession(hooks=[hook]) as sess:
self.assertFalse(sess.should_stop())
sess.run(no_op)
self.assertFalse(sess.should_stop())
mock_time.return_value += 500
sess.run(no_op)
self.assertFalse(sess.should_stop())
mock_time.return_value += 400
sess.run(no_op)
self.assertFalse(sess.should_stop())
mock_time.return_value += 200
sess.run(no_op)
self.assertTrue(sess.should_stop())
class TrainingExecutorRunLocalTest(test.TestCase):
"""Tests run_local of _TrainingExecutor."""
def _model_fn(self, features, labels, mode):
del labels
with ops.control_dependencies([features]):
train_op = state_ops.assign_add(training_util.get_global_step(), 1)
return model_fn_lib.EstimatorSpec(
mode,
loss=constant_op.constant(0.),
train_op=train_op,
predictions=constant_op.constant([[10.]]),
eval_metric_ops={'mean_of_features': metrics_lib.mean(features)})
def _input_fn(self, repeat=True):
ds = dataset_ops.Dataset.from_tensors([1])
if repeat:
return ds.repeat()
return ds
def unique_checkpoint_every_time_fn(self):
return 'checkpoint_path_%s/' % random.random()
def test_runs_evaluate_with_every_new_checkpoint(self):
est = estimator_lib.Estimator(
model_fn=self._model_fn,
config=run_config_lib.RunConfig(save_checkpoints_steps=10))
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
mock_est.times_export_was_called = 0
mock_est.times_final_export_was_called = 0
def export(estimator, export_path, checkpoint_path, eval_result,
is_the_final_export):
del export_path, checkpoint_path, eval_result
estimator.times_export_was_called += 1
# final_export is happened at the end.
self.assertEqual(0, estimator.times_final_export_was_called)
if is_the_final_export:
estimator.times_final_export_was_called += 1
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_how_many_times_export_is_called'
exporter.export = export
train_spec = training.TrainSpec(input_fn=self._input_fn, max_steps=22)
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False),
throttle_secs=0,
exporters=exporter)
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
executor.run_local()
self.assertEqual(1, mock_est.train.call_count)
self.assertEqual(3, mock_est.evaluate.call_count)
self.assertEqual(3, mock_est.times_export_was_called)
self.assertEqual(1, mock_est.times_final_export_was_called)
def test_runs_with_eval_listener_before_eval(self):
est = estimator_lib.Estimator(
model_fn=self._model_fn,
config=run_config_lib.RunConfig(save_checkpoints_steps=10))
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
mock_est.latest_checkpoint = self.unique_checkpoint_every_time_fn
train_spec = training.TrainSpec(input_fn=self._input_fn, max_steps=12)
eval_spec = training.EvalSpec(input_fn=lambda: self._input_fn(repeat=False))
mock_est.evaluate.side_effect = [{_GLOBAL_STEP_KEY: train_spec.max_steps}]
class _Listener(training._ContinuousEvalListener):
def __init__(self):
self.call_count = 0
def before_eval(self):
self.call_count += 1
return False # Will stop the run_local before first eval.
listener = _Listener()
executor = training._TrainingExecutor(
mock_est, train_spec, eval_spec, continuous_eval_listener=listener)
executor.run_local()
self.assertEqual(1, mock_est.train.call_count)
self.assertEqual(0, mock_est.evaluate.call_count)
def test_runs_with_eval_listener_after_eval(self):
est = estimator_lib.Estimator(
model_fn=self._model_fn,
config=run_config_lib.RunConfig(save_checkpoints_steps=10))
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
train_spec = training.TrainSpec(input_fn=self._input_fn, max_steps=3000)
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False), throttle_secs=0)
class _Listener(training._ContinuousEvalListener):
def __init__(self):
self.call_count = 0
def after_eval(self, eval_result):
self.call_count += 1
return False # Will stop the run_local after first eval.
listener = _Listener()
executor = training._TrainingExecutor(
mock_est, train_spec, eval_spec, continuous_eval_listener=listener)
metrics, _ = executor.run_local() # pylint: disable=assignment-from-no-return
self.assertEqual(1, mock_est.train.call_count)
self.assertEqual(1, mock_est.evaluate.call_count)
self.assertEqual(1, listener.call_count)
# Should be less than max_steps since listener did early stopping.
self.assertLess(metrics[_GLOBAL_STEP_KEY], train_spec.max_steps)
def test_handles_no_new_checkpoint_found(self):
est = estimator_lib.Estimator(
model_fn=self._model_fn,
# disable saving checkpoint
config=run_config_lib.RunConfig(
save_checkpoints_steps=None, save_checkpoints_secs=None))
train_spec = training.TrainSpec(
input_fn=self._input_fn, max_steps=300, hooks=[_FakeHook()])
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False),
hooks=[_FakeHook()],
throttle_secs=100)
executor = training._TrainingExecutor(est, train_spec, eval_spec)
with self.assertRaisesRegexp(ValueError,
'There should be a CheckpointSaverHook'):
executor.run_local()
def test_final_export_is_true_in_the_end(self):
est = estimator_lib.Estimator(
model_fn=self._model_fn,
config=run_config_lib.RunConfig(save_checkpoints_steps=10))
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
mock_est.times_export_fn_was_called = 0
mock_est.times_the_final_export_was_true = 0
def export(estimator, export_path, checkpoint_path, eval_result,
is_the_final_export):
del export_path, checkpoint_path, eval_result
estimator.times_export_fn_was_called += 1
if is_the_final_export:
estimator.times_the_final_export_was_true += 1
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_how_many_times_export_is_called'
exporter.export = export
train_spec = training.TrainSpec(
input_fn=self._input_fn, max_steps=12, hooks=[_FakeHook()])
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False),
throttle_secs=0,
exporters=exporter)
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
executor.run_local()
self.assertEqual(1, mock_est.train.call_count)
self.assertEqual(2, mock_est.evaluate.call_count)
self.assertEqual(2, mock_est.times_export_fn_was_called)
self.assertEqual(1, mock_est.times_the_final_export_was_true)
def test_train_and_evaluate_args(self):
est = estimator_lib.Estimator(model_fn=self._model_fn)
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
train_spec = training.TrainSpec(
input_fn=self._input_fn, max_steps=300, hooks=[_FakeHook()])
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False),
steps=2,
hooks=[_FakeHook()],
name='local_eval')
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
executor.run_local()
mock_est.evaluate.assert_called_with(
name=eval_spec.name,
input_fn=eval_spec.input_fn,
steps=eval_spec.steps,
checkpoint_path=est.latest_checkpoint(),
hooks=eval_spec.hooks)
train_args = mock_est.train.call_args[1]
self.assertEqual(list(train_spec.hooks), list(train_args['hooks']))
self.assertEqual(train_spec.input_fn, train_args['input_fn'])
self.assertEqual(train_spec.max_steps, train_args['max_steps'])
def test_train_with_no_eval_spec_fails(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
train_spec = training.TrainSpec(
input_fn=lambda: 1, max_steps=300, hooks=[_FakeHook()])
eval_spec = None
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
with self.assertRaisesRegexp(TypeError, _INVALID_EVAL_SPEC_MSG):
executor.run_local()
def test_train_hooks(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, model_dir='path/')
mock_est.latest_checkpoint.return_value = 'checkpoint_path/'
train_spec = training.TrainSpec(
input_fn=lambda: 1, max_steps=300, hooks=[_FakeHook()])
eval_spec = training.EvalSpec(input_fn=lambda: 1, steps=2)
mock_est.evaluate.return_value = {_GLOBAL_STEP_KEY: train_spec.max_steps}
extra_hooks = [_FakeHook()]
executor = training._TrainingExecutor(
mock_est, train_spec, eval_spec, train_hooks=extra_hooks)
executor.run_local()
train_args = mock_est.train.call_args[1]
self.assertEqual(
list(train_spec.hooks) + extra_hooks, [
h for h in train_args['hooks']
if not isinstance(h, training._StopAtSecsHook)
])
def test_that_export_is_called_with_run_local(self):
est = estimator_lib.Estimator(model_fn=self._model_fn)
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
train_spec = training.TrainSpec(input_fn=self._input_fn, max_steps=12)
mock_est.evaluate.return_value = {_GLOBAL_STEP_KEY: train_spec.max_steps}
def export(estimator, *args, **kwargs):
del args, kwargs
estimator.export_was_called = True
return 'path_to_export'
exporter = test.mock.PropertyMock(spec=exporter_lib.Exporter)
exporter.name = 'see_whether_export_is_called'
exporter.export = export
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False),
steps=2,
start_delay_secs=0,
throttle_secs=213,
exporters=exporter)
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
# pylint: disable=assignment-from-no-return
_, export_results = executor.run_local()
# pylint: enable=assignment-from-no-return
self.assertTrue(mock_est.export_was_called)
self.assertEqual(export_results, ['path_to_export'])
def test_errors_out_if_evaluate_returns_empty_dict(self):
est = estimator_lib.Estimator(
model_fn=self._model_fn,
config=run_config_lib.RunConfig(save_checkpoints_steps=2))
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
train_spec = training.TrainSpec(input_fn=self._input_fn)
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False), throttle_secs=0)
mock_est.evaluate.return_value = {}
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
with self.assertRaisesRegexp(ValueError, _INVALID_EMPTY_EVAL_RESULT_ERR):
executor.run_local()
def test_errors_out_if_evaluate_returns_non_dict(self):
est = estimator_lib.Estimator(
model_fn=self._model_fn,
config=run_config_lib.RunConfig(save_checkpoints_steps=2))
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
train_spec = training.TrainSpec(input_fn=self._input_fn)
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False), throttle_secs=0)
mock_est.evaluate.return_value = 123
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
with self.assertRaisesRegexp(TypeError, _INVALID_EVAL_RESULT_TYPE_ERR):
executor.run_local()
def test_errors_out_if_evaluate_returns_dict_without_global_step(self):
est = estimator_lib.Estimator(
model_fn=self._model_fn,
config=run_config_lib.RunConfig(save_checkpoints_steps=2))
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
train_spec = training.TrainSpec(input_fn=self._input_fn)
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False), throttle_secs=0)
mock_est.evaluate.return_value = {'loss': 123}
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
with self.assertRaisesRegexp(ValueError,
_MISSING_GLOBAL_STEP_IN_EVAL_RESULT_ERR):
executor.run_local()
def test_train_and_evaluate_return_metrics(self):
est = estimator_lib.Estimator(model_fn=self._model_fn)
mock_est = test.mock.Mock(spec=estimator_lib.Estimator, wraps=est)
train_spec = training.TrainSpec(
input_fn=self._input_fn, max_steps=12, hooks=[_FakeHook()])
eval_spec = training.EvalSpec(
input_fn=lambda: self._input_fn(repeat=False),
steps=2,
hooks=[_FakeHook()],
name='local_eval')
executor = training._TrainingExecutor(mock_est, train_spec, eval_spec)
# pylint: disable=assignment-from-no-return
metrics, _ = executor.run_local()
# pylint: enable=assignment-from-no-return
self.assertEqual(metrics['global_step'], 12)
class TrainAndEvaluateRunTest(test.TestCase):
def _test_run_task_and_executor(self, run_config):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = run_config
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
executor.call_task = {}
def task_fn(name):
def _fn():
executor.call_task[name] = 1
return _fn
executor.run_chief = task_fn('chief')
executor.run_master = task_fn('master')
executor.run_ps = task_fn('ps')
executor.run_evaluator = task_fn('evaluator')
executor.run_worker = task_fn('worker')
executor.run_local = task_fn('local')
return executor
def test_run_chief(self):
executor = self._test_run_task_and_executor(
run_config=_create_run_config_with_cluster_spec(_TF_CONFIG_FOR_CHIEF))
executor.run()
self.assertEqual(1, executor.call_task['chief'])
def test_run_worker(self):
executor = self._test_run_task_and_executor(
run_config=_create_run_config_with_cluster_spec(_TF_CONFIG_FOR_WORKER))
executor.run()
self.assertEqual(1, executor.call_task['worker'])
def test_run_ps(self):
executor = self._test_run_task_and_executor(
run_config=_create_run_config_with_cluster_spec(_TF_CONFIG_FOR_PS))
executor.run()
self.assertEqual(1, executor.call_task['ps'])
def test_run_evaluator(self):
executor = self._test_run_task_and_executor(
run_config=_create_run_config_with_cluster_spec(
_TF_CONFIG_FOR_EVALUATOR))
executor.run()
self.assertEqual(1, executor.call_task['evaluator'])
def test_run_local(self):
executor = self._test_run_task_and_executor(
run_config=run_config_lib.RunConfig())
executor.run()
self.assertEqual(1, executor.call_task['local'])
def test_invalid_local_task(self):
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
'local': ['hos1:1'],
},
'task': {
'type': 'local', # invalid task type.
'index': 0
}
}
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = _create_run_config_with_cluster_spec(tf_config)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
with self.assertRaisesRegexp(ValueError, _INVALID_LOCAL_TASK_WITH_CLUSTER):
executor.run()
def test_unsupported_task_due_to_missing_run_task(self):
unsupported_task = 'alloc'
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
unsupported_task: ['hos1:1'],
},
'task': {
'type': unsupported_task,
'index': 0
}
}
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = _create_run_config_with_cluster_spec(tf_config)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_TO_RUN):
executor.run()
def test_unsupported_task_due_to_not_callable(self):
unsupported_task = 'alloc'
tf_config = {
'cluster': {
run_config_lib.TaskType.CHIEF: ['host0:0'],
unsupported_task: ['hos1:1'],
},
'task': {
'type': unsupported_task,
'index': 0
}
}
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = _create_run_config_with_cluster_spec(tf_config)
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
executor.run_alloc = 123 # not callable
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_TO_RUN):
executor.run()
def test_invalid_task_type(self):
mock_est = test.mock.Mock(spec=estimator_lib.Estimator)
mock_est.config = test.mock.Mock()
mock_train_spec = test.mock.Mock(spec=training.TrainSpec)
mock_eval_spec = test.mock.Mock(spec=training.EvalSpec)
mock_est.config = test.mock.Mock()
mock_est.config.cluster_spec = server_lib.ClusterSpec({'1': ['dummy']})
mock_est.config.task_type = ''
executor = training._TrainingExecutor(mock_est, mock_train_spec,
mock_eval_spec)
with self.assertRaisesRegexp(ValueError, _INVALID_TASK_TYPE):
executor.run()
class TrainAndEvaluateIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _get_exporter(self, name, fc):
feature_spec = feature_column.make_parse_example_spec(fc)
serving_input_receiver_fn = (
export_lib.build_parsing_serving_input_receiver_fn(feature_spec))
return exporter_lib.LatestExporter(
name, serving_input_receiver_fn=serving_input_receiver_fn)
def _extract_loss_and_global_step(self, event_folder):
"""Returns the loss and global step in last event."""
event_paths = glob.glob(os.path.join(event_folder, 'events*'))
loss = None
global_step_count = None
for e in summary_iterator.summary_iterator(event_paths[-1]):
current_loss = None
for v in e.summary.value:
if v.tag == 'loss':
current_loss = v.simple_value
# If loss is not found, global step is meaningless.
if current_loss is None:
continue
current_global_step = e.step
if global_step_count is None or current_global_step > global_step_count:
global_step_count = current_global_step
loss = current_loss
return (loss, global_step_count)
def test_complete_flow_with_non_distributed_configuration(self):
n_classes = 3
input_dimension = 2
batch_size = 10
eval_name = 'foo'
exporter_name = 'saved_model_exporter'
# max_steps should be larger than save_summary_steps
max_steps = 10
save_summary_steps = 9
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
def train_input_fn():
return dataset_ops.Dataset.from_tensor_slices(({
'x': x_data
}, y_data)).batch(batch_size).repeat().shuffle(1000)
def eval_input_fn():
return dataset_ops.Dataset.from_tensor_slices(({
'x': x_data
}, y_data)).batch(batch_size)
def predict_input_fn():
return dataset_ops.Dataset.from_tensor_slices({
'x': x_data
}).batch(batch_size)
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
config=run_config_lib.RunConfig(save_summary_steps=save_summary_steps),
model_dir=self._model_dir)
train_spec = training.TrainSpec(input_fn=train_input_fn,
max_steps=max_steps)
eval_spec = training.EvalSpec(
name=eval_name,
input_fn=eval_input_fn,
steps=None,
exporters=self._get_exporter(exporter_name, feature_columns),
throttle_secs=0)
training.train_and_evaluate(est, train_spec, eval_spec)
# Make sure nothing is stuck in limbo.
writer_cache.FileWriterCache.clear()
# Examine the training events. Use a range to check global step to avoid
# flakyness due to global step race condition.
training_loss, _ = self._extract_loss_and_global_step(est.model_dir)
self.assertIsNotNone(training_loss)
# Examine the eval events. The global step should be accurate.
eval_loss, eval_global_step = self._extract_loss_and_global_step(
event_folder=est.eval_dir(eval_name))
self.assertIsNotNone(eval_loss)
self.assertEqual(max_steps, eval_global_step)
# Examine the export folder.
export_dir = os.path.join(os.path.join(est.model_dir, 'export'),
exporter_name)
self.assertTrue(gfile.Exists(export_dir))
# Examine the ckpt for predict.
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
if __name__ == '__main__':
test.main()
| xodus7/tensorflow | tensorflow/python/estimator/training_test.py | Python | apache-2.0 | 85,718 |
"""Support for ISY994 lights."""
from typing import Callable, Dict
from pyisy.constants import ISY_VALUE_UNKNOWN
from homeassistant.components.light import (
DOMAIN as LIGHT,
SUPPORT_BRIGHTNESS,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
_LOGGER,
CONF_RESTORE_LIGHT_STATE,
DOMAIN as ISY994_DOMAIN,
ISY994_NODES,
)
from .entity import ISYNodeEntity
from .helpers import migrate_old_unique_ids
from .services import async_setup_device_services, async_setup_light_services
ATTR_LAST_BRIGHTNESS = "last_brightness"
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[list], None],
) -> bool:
"""Set up the ISY994 light platform."""
hass_isy_data = hass.data[ISY994_DOMAIN][entry.entry_id]
isy_options = entry.options
restore_light_state = isy_options.get(CONF_RESTORE_LIGHT_STATE, False)
devices = []
for node in hass_isy_data[ISY994_NODES][LIGHT]:
devices.append(ISYLightEntity(node, restore_light_state))
await migrate_old_unique_ids(hass, LIGHT, devices)
async_add_entities(devices)
async_setup_device_services(hass)
async_setup_light_services(hass)
class ISYLightEntity(ISYNodeEntity, LightEntity, RestoreEntity):
"""Representation of an ISY994 light device."""
def __init__(self, node, restore_light_state) -> None:
"""Initialize the ISY994 light device."""
super().__init__(node)
self._last_brightness = None
self._restore_light_state = restore_light_state
@property
def is_on(self) -> bool:
"""Get whether the ISY994 light is on."""
if self._node.status == ISY_VALUE_UNKNOWN:
return False
return int(self._node.status) != 0
@property
def brightness(self) -> float:
"""Get the brightness of the ISY994 light."""
if self._node.status == ISY_VALUE_UNKNOWN:
return None
return int(self._node.status)
def turn_off(self, **kwargs) -> None:
"""Send the turn off command to the ISY994 light device."""
self._last_brightness = self.brightness
if not self._node.turn_off():
_LOGGER.debug("Unable to turn off light")
def on_update(self, event: object) -> None:
"""Save brightness in the update event from the ISY994 Node."""
if self._node.status not in (0, ISY_VALUE_UNKNOWN):
self._last_brightness = self._node.status
super().on_update(event)
# pylint: disable=arguments-differ
def turn_on(self, brightness=None, **kwargs) -> None:
"""Send the turn on command to the ISY994 light device."""
if self._restore_light_state and brightness is None and self._last_brightness:
brightness = self._last_brightness
if not self._node.turn_on(val=brightness):
_LOGGER.debug("Unable to turn on light")
@property
def device_state_attributes(self) -> Dict:
"""Return the light attributes."""
attribs = super().device_state_attributes
attribs[ATTR_LAST_BRIGHTNESS] = self._last_brightness
return attribs
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
async def async_added_to_hass(self) -> None:
"""Restore last_brightness on restart."""
await super().async_added_to_hass()
self._last_brightness = self.brightness or 255
last_state = await self.async_get_last_state()
if not last_state:
return
if (
ATTR_LAST_BRIGHTNESS in last_state.attributes
and last_state.attributes[ATTR_LAST_BRIGHTNESS]
):
self._last_brightness = last_state.attributes[ATTR_LAST_BRIGHTNESS]
def set_on_level(self, value):
"""Set the ON Level for a device."""
self._node.set_on_level(value)
def set_ramp_rate(self, value):
"""Set the Ramp Rate for a device."""
self._node.set_ramp_rate(value)
| robbiet480/home-assistant | homeassistant/components/isy994/light.py | Python | apache-2.0 | 4,198 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from eventlet import event
from eventlet import greenthread
from keystone.openstack.common.gettextutils import _ # noqa
from keystone.openstack.common import log as logging
from keystone.openstack.common import timeutils
LOG = logging.getLogger(__name__)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCallBase(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
self.done = None
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
class FixedIntervalLoopingCall(LoopingCallBase):
"""A fixed interval looping call."""
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
start = timeutils.utcnow()
self.f(*self.args, **self.kw)
end = timeutils.utcnow()
if not self._running:
break
delay = interval - timeutils.delta_seconds(start, end)
if delay <= 0:
LOG.warn(_('task run outlasted interval by %s sec') %
-delay)
greenthread.sleep(delay if delay > 0 else 0)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn_n(_inner)
return self.done
# TODO(mikal): this class name is deprecated in Havana and should be removed
# in the I release
LoopingCall = FixedIntervalLoopingCall
class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.
The function called should return how long to sleep for before being
called again.
"""
def start(self, initial_delay=None, periodic_interval_max=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
idle = self.f(*self.args, **self.kw)
if not self._running:
break
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug(_('Dynamic looping call sleeping for %.02f '
'seconds'), idle)
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in dynamic looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done
| citrix-openstack-build/keystone | keystone/openstack/common/loopingcall.py | Python | apache-2.0 | 4,679 |
import mock
from nose.tools import eq_, ok_, assert_raises
from funfactory.urlresolvers import reverse
from .base import ManageTestCase
class TestErrorTrigger(ManageTestCase):
def test_trigger_error(self):
url = reverse('manage:error_trigger')
response = self.client.get(url)
assert self.user.is_superuser
eq_(response.status_code, 200)
# sans a message
response = self.client.post(url, {'message': ''})
eq_(response.status_code, 200)
ok_('This field is required' in response.content)
assert_raises(
NameError,
self.client.post,
url,
{'message': 'Some Message'}
)
@mock.patch('airmozilla.manage.views.errors.Client')
def test_trigger_error_with_raven(self, mocked_client):
url = reverse('manage:error_trigger')
assert self.user.is_superuser
raven_config = {
'dsn': 'fake123'
}
with self.settings(RAVEN_CONFIG=raven_config):
response = self.client.post(url, {
'message': 'Some Message',
'capture_with_raven': True
})
eq_(response.status_code, 302)
mocked_client().captureException.assert_called_with()
| zofuthan/airmozilla | airmozilla/manage/tests/views/test_errors.py | Python | bsd-3-clause | 1,278 |
from django.http import HttpResponseNotAllowed, HttpResponseServerError
from django.utils import simplejson as json
from util import to_json_response
from util import to_dojo_data
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.3, 2.4 fallback.
def expect_post_request(func):
"""Allow only POST requests to come in, throw an exception otherwise.
This relieves from checking every time that the request is
really a POST request, which it should be when using this
decorator.
"""
def _ret(*args, **kwargs):
ret = func(*args, **kwargs)
request = args[0]
if not request.method=='POST':
return HttpResponseNotAllowed(['POST'])
return ret
return _ret
def add_request_getdict(func):
"""Add the method getdict() to the request object.
This works just like getlist() only that it decodes any nested
JSON encoded object structure.
Since sending deep nested structures is not possible via
GET/POST by default, this enables it. Of course you need to
make sure that on the JavaScript side you are also sending
the data properly, which dojango.send() automatically does.
Example:
this is being sent:
one:1
two:{"three":3, "four":4}
using
request.POST.getdict('two')
returns a dict containing the values sent by the JavaScript.
"""
def _ret(*args, **kwargs):
args[0].POST.__class__.getdict = __getdict
ret = func(*args, **kwargs)
return ret
return _ret
def __getdict(self, key):
ret = self.get(key)
try:
ret = json.loads(ret)
except ValueError: # The value was not JSON encoded :-)
raise Exception('"%s" was not JSON encoded as expected (%s).' % (key, str(ret)))
return ret
def json_response(func):
"""
A simple json response decorator. Use it on views, where a python data object should be converted
to a json response:
@json_response
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
"""
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret)
return wraps(func)(inner)
def jsonp_response_custom(callback_param_name):
"""
A jsonp (JSON with Padding) response decorator, where you can define your own callbackParamName.
It acts like the json_response decorator but with the difference, that it
wraps the returned json string into a client-specified function name (that is the Padding).
You can add this decorator to a function like that:
@jsonp_response_custom("my_callback_param")
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
Your now can access this view from a foreign URL using JSONP.
An example with Dojo looks like that:
dojo.io.script.get({ url:"http://example.com/my_url/",
callbackParamName:"my_callback_param",
load: function(response){
console.log(response);
}
});
Note: the callback_param_name in the decorator and in your JavaScript JSONP call must be the same.
"""
def decorator(func):
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret, callback_param_name=callback_param_name)
return wraps(func)(inner)
return decorator
jsonp_response = jsonp_response_custom("jsonp_callback")
jsonp_response.__doc__ = "A predefined jsonp response decorator using 'jsoncallback' as a fixed callback_param_name."
def json_iframe_response(func):
"""
A simple json response decorator but wrapping the json response into a html page.
It helps when doing a json request using an iframe (e.g. file up-/download):
@json_iframe
def my_view(request):
my_data = {'foo': 'bar'}
return my_data
"""
def inner(request, *args, **kwargs):
ret = func(request, *args, **kwargs)
return __prepare_json_ret(request, ret, use_iframe=True)
return wraps(func)(inner)
def __prepare_json_ret(request, ret, callback_param_name=None, use_iframe=False):
if ret==False:
ret = {'success':False}
elif ret==None: # Sometimes there is no return.
ret = {}
# Add the 'ret'=True, since it was obviously no set yet and we got valid data, no exception.
func_name = None
if callback_param_name:
func_name = request.GET.get(callback_param_name, "callbackParamName")
try:
if not ret.has_key('success'):
ret['success'] = True
except AttributeError, e:
raise Exception("The returned data of your function must be a dictionary!")
json_ret = ""
try:
# Sometimes the serialization fails, i.e. when there are too deeply nested objects or even classes inside
json_ret = to_json_response(ret, func_name, use_iframe)
except Exception, e:
print '\n\n===============Exception=============\n\n'+str(e)+'\n\n'
print ret
print '\n\n'
return HttpResponseServerError(content=str(e))
return json_ret
| google-code-export/dojango | dojango/decorators.py | Python | bsd-3-clause | 5,423 |
from __future__ import print_function
import inspect
import numpy as np
import theano
from ..layers.advanced_activations import LeakyReLU, PReLU
from ..layers.core import Dense, Merge, Dropout, Activation, Reshape, Flatten, RepeatVector, Layer
from ..layers.core import ActivityRegularization, TimeDistributedDense, AutoEncoder, MaxoutDense
from ..layers.embeddings import Embedding, WordContextProduct
from ..layers.noise import GaussianNoise, GaussianDropout
from ..layers.normalization import BatchNormalization
from ..layers.recurrent import SimpleRNN, SimpleDeepRNN, GRU, LSTM, JZS1, JZS2, JZS3
from ..layers import containers
from .. import regularizers
from .. import constraints
def container_from_config(layer_dict):
name = layer_dict.get('name')
hasParams = False
if name == 'Merge':
mode = layer_dict.get('mode')
layers = layer_dict.get('layers')
layer_list = []
for layer in layers:
init_layer = container_from_config(layer)
layer_list.append(init_layer)
merge_layer = Merge(layer_list, mode)
return merge_layer
elif name == 'Sequential':
layers = layer_dict.get('layers')
layer_list = []
for layer in layers:
init_layer = container_from_config(layer)
layer_list.append(init_layer)
seq_layer = containers.Sequential(layer_list)
return seq_layer
elif name == 'Graph':
graph_layer = containers.Graph()
inputs = layer_dict.get('input_config')
for input in inputs:
graph_layer.add_input(**input)
nodes = layer_dict.get('node_config')
for node in nodes:
layer = container_from_config(layer_dict['nodes'].get(node['name']))
node['layer'] = layer
graph_layer.add_node(**node)
outputs = layer_dict.get('output_config')
for output in outputs:
graph_layer.add_output(**output)
return graph_layer
else:
# The case in which layer_dict represents an "atomic" layer
layer_dict.pop('name')
if 'parameters' in layer_dict:
params = layer_dict.get('parameters')
layer_dict.pop('parameters')
hasParams = True
for k, v in layer_dict.items():
# For now, this can only happen for regularizers and constraints
if isinstance(v, dict):
vname = v.get('name')
v.pop('name')
if vname in [x for x, y in inspect.getmembers(constraints, predicate=inspect.isclass)]:
layer_dict[k] = constraints.get(vname, v)
if vname in [x for x, y in inspect.getmembers(regularizers, predicate=inspect.isclass)]:
layer_dict[k] = regularizers.get(vname, v)
base_layer = get_layer(name, layer_dict)
if hasParams:
shaped_params = []
for param in params:
data = np.asarray(param.get('data'))
shape = tuple(param.get('shape'))
shaped_params.append(data.reshape(shape))
base_layer.set_weights(shaped_params)
return base_layer
def print_layer_shapes(model, input_shapes):
"""
Utility function to print the shape of the output at each layer of a Model
Arguments:
model: instance of Model / Merge
input_shapes: dict (Graph), list of tuples (Merge) or tuple (Sequential)
"""
if model.__class__.__name__ in ['Sequential', 'Merge']:
# in this case input_shapes is a tuple, or a list [shape1, shape2]
if not isinstance(input_shapes[0], tuple):
input_shapes = [input_shapes]
inputs = model.get_input(train=False)
if not isinstance(inputs, list):
inputs = [inputs]
input_dummy = [np.zeros(shape, dtype=np.float32)
for shape in input_shapes]
layers = model.layers
elif model.__class__.__name__ == 'Graph':
# in this case input_shapes is a dictionary
inputs = [model.inputs[name].input
for name in model.input_order]
input_dummy = [np.zeros(input_shapes[name], dtype=np.float32)
for name in model.input_order]
layers = [model.nodes[c['name']] for c in model.node_config]
print("input shapes : ", input_shapes)
for l in layers:
shape_f = theano.function(inputs, l.get_output(train=False).shape,
on_unused_input='ignore')
out_shape = tuple(shape_f(*input_dummy))
config = l.get_config()
print('shape after %s: %s' % (config['name'], out_shape))
from .generic_utils import get_from_module
def get_layer(identifier, kwargs=None):
return get_from_module(identifier, globals(), 'layer', instantiate=True, kwargs=kwargs)
| Cadene/keras | keras/utils/layer_utils.py | Python | mit | 4,856 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentE3600A import *
from .. import scpi
class agilentE3634A(agilentE3600A, scpi.dcpwr.OCP):
"Agilent E3634A IVI DC power supply driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'E3634A')
super(agilentE3634A, self).__init__(*args, **kwargs)
self._output_count = 1
self._output_spec = [
{
'range': {
'P25V': (25.75, 7.21),
'P50V': (51.5, 4.12)
},
'ovp_max': 55.0,
'ocp_max': 7.5,
'voltage_max': 25.75,
'current_max': 7.21
}
]
self._memory_size = 3
self._init_outputs()
| sephalon/python-ivi | ivi/agilent/agilentE3634A.py | Python | mit | 1,924 |
#!/usr/bin/python
#
# Copyright 2011-2013 Software freedom conservancy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from selenium.webdriver.remote.command import Command
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.common.exceptions import WebDriverException
from .service import Service
from .options import Options
class WebDriver(RemoteWebDriver):
"""
Controls the ChromeDriver and allows you to drive the browser.
You will need to download the ChromeDriver executable from
http://chromedriver.storage.googleapis.com/index.html
"""
def __init__(self, executable_path="chromedriver", port=0,
chrome_options=None, service_args=None,
desired_capabilities=None, service_log_path=None):
"""
Creates a new instance of the chrome driver.
Starts the service and then creates new instance of chrome driver.
:Args:
- executable_path - path to the executable. If the default is used it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0, a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- chrome_options: this takes an instance of ChromeOptions
"""
if chrome_options is None:
# desired_capabilities stays as passed in
if desired_capabilities is None:
desired_capabilities = Options().to_capabilities()
else:
if desired_capabilities is None:
desired_capabilities = chrome_options.to_capabilities()
else:
desired_capabilities.update(chrome_options.to_capabilities())
self.service = Service(executable_path, port=port,
service_args=service_args, log_path=service_log_path)
self.service.start()
try:
RemoteWebDriver.__init__(self,
command_executor=self.service.service_url,
desired_capabilities=desired_capabilities,
keep_alive=True)
except:
self.quit()
raise
self._is_remote = False
def quit(self):
"""
Closes the browser and shuts down the ChromeDriver executable
that is started when starting the ChromeDriver
"""
try:
RemoteWebDriver.quit(self)
except:
# We don't care about the message because something probably has gone wrong
pass
finally:
self.service.stop()
| smallswan267/octoplus | selenium/webdriver/chrome/webdriver.py | Python | mit | 3,175 |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
import logging
from flexget import plugin
from flexget.event import event
from flexget.utils.log import log_once
try:
from flexget.plugins.internal.api_rottentomatoes import lookup_movie, API_KEY
except ImportError:
raise plugin.DependencyError(issued_by='rottentomatoes_lookup', missing='api_rottentomatoes',
message='rottentomatoes_lookup requires the `api_rottentomatoes` plugin')
log = logging.getLogger('rottentomatoes_lookup')
def get_rt_url(movie):
for link in movie.links:
if link.name == 'alternate':
return link.url
class PluginRottenTomatoesLookup(object):
"""
Retrieves Rotten Tomatoes information for entries.
Example::
rottentomatoes_lookup: yes
"""
field_map = {
'rt_name': 'title',
'rt_id': 'id',
'rt_year': 'year',
'rt_genres': lambda movie: [genre.name for genre in movie.genres],
'rt_mpaa_rating': 'mpaa_rating',
'rt_runtime': 'runtime',
'rt_critics_consensus': 'critics_consensus',
'rt_releases': lambda movie: dict((release.name, release.date) for
release in movie.release_dates),
'rt_critics_rating': 'critics_rating',
'rt_critics_score': 'critics_score',
'rt_audience_rating': 'audience_rating',
'rt_audience_score': 'audience_score',
'rt_average_score': lambda movie: (movie.critics_score + movie.audience_score) / 2,
'rt_synopsis': 'synopsis',
'rt_posters': lambda movie: dict((poster.name, poster.url) for poster in movie.posters),
'rt_actors': lambda movie: [actor.name for actor in movie.cast],
'rt_directors': lambda movie: [director.name for director in movie.directors],
'rt_studio': 'studio',
'rt_alternate_ids': lambda movie: dict((alt_id.name, alt_id.id)
for alt_id in movie.alternate_ids),
'rt_url': get_rt_url,
# Generic fields filled by all movie lookup plugins:
'movie_name': 'title',
'movie_year': 'year'}
schema = {'oneOf': [
{'type': 'boolean'},
{'type': 'string', 'description': 'provide a custom api key'}
]}
def __init__(self):
self.key = None
def lazy_loader(self, entry):
"""Does the lookup for this entry and populates the entry fields.
:param entry: entry to perform lookup on
:param field: the field to be populated (others may be populated as well)
:returns: the field value
"""
try:
self.lookup(entry, key=self.key)
except plugin.PluginError as e:
log_once(e.value.capitalize(), logger=log)
def lookup(self, entry, search_allowed=True, key=None):
"""
Perform Rotten Tomatoes lookup for entry.
:param entry: Entry instance
:param search_allowed: Allow fallback to search
:param key: optionally specify an API key to use
:raises PluginError: Failure reason
"""
if not key:
key = self.key or API_KEY
movie = lookup_movie(smart_match=entry['title'],
rottentomatoes_id=entry.get('rt_id', eval_lazy=False),
only_cached=(not search_allowed),
api_key=key
)
log.debug(u'Got movie: %s' % movie)
entry.update_using_map(self.field_map, movie)
if not entry.get('imdb_id', eval_lazy=False):
for alt_id in movie.alternate_ids:
if alt_id.name == 'imdb':
entry['imdb_id'] = 'tt' + alt_id.id
break
def on_task_metainfo(self, task, config):
if not config:
return
if isinstance(config, basestring):
self.key = config.lower()
else:
self.key = None
for entry in task.entries:
entry.register_lazy_func(self.lazy_loader, self.field_map)
@property
def movie_identifier(self):
"""Returns the plugin main identifier type"""
return 'rt_id'
@event('plugin.register')
def register_plugin():
plugin.register(PluginRottenTomatoesLookup, 'rottentomatoes_lookup', api_ver=2, interfaces=['task', 'movie_metainfo'])
| sean797/Flexget | flexget/plugins/metainfo/rottentomatoes_lookup.py | Python | mit | 4,533 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test of the omero import control.
Copyright 2009 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import pytest
from path import path
import omero.clients
import uuid
from omero.cli import CLI, NonZeroReturnCode
# Workaround for a poorly named module
plugin = __import__('omero.plugins.import', globals(), locals(),
['ImportControl'], -1)
ImportControl = plugin.ImportControl
help_arguments = ("-h", "--javahelp", "--java-help", "--advanced-help")
class MockClient(omero.clients.BaseClient):
def setSessionId(self, uuid):
self._uuid = uuid
def getSessionId(self):
return self._uuid
class TestImport(object):
def setup_method(self, method):
self.cli = CLI()
self.cli.register("import", ImportControl, "TEST")
self.args = ["import"]
def add_client_dir(self):
dist_dir = path(__file__) / ".." / ".." / ".." / ".." / ".." / ".." /\
".." / "dist" # FIXME: should not be hard-coded
dist_dir = dist_dir.abspath()
client_dir = dist_dir / "lib" / "client"
self.args += ["--clientdir", client_dir]
def mkdir(self, parent, name, with_ds_store=False):
child = parent / name
child.mkdir()
if with_ds_store:
ds_store = child / ".DS_STORE"
ds_store.write("")
return child
def mkfakescreen(self, screen_dir, nplates=2, nruns=2, nwells=2,
nfields=4, with_ds_store=False):
fieldfiles = []
for iplate in range(nplates):
plate_dir = self.mkdir(
screen_dir, "Plate00%s" % str(iplate),
with_ds_store=with_ds_store)
for irun in range(nruns):
run_dir = self.mkdir(
plate_dir, "Run00%s" % str(irun),
with_ds_store=with_ds_store)
for iwell in range(nwells):
well_dir = self.mkdir(
run_dir, "WellA00%s" % str(iwell),
with_ds_store=with_ds_store)
for ifield in range(nfields):
fieldfile = (well_dir / ("Field00%s.fake" %
str(ifield)))
fieldfile.write('')
fieldfiles.append(fieldfile)
return fieldfiles
def mkfakepattern(self, tmpdir, nangles=7, ntimepoints=10):
spim_dir = tmpdir.join("SPIM")
spim_dir.mkdir()
tiffiles = []
for angle in range(1, nangles + 1):
for timepoint in range(1, ntimepoints + 1):
tiffile = (spim_dir / ("spim_TL%s_Angle%s.fake" %
(str(timepoint), str(angle))))
tiffile.write('')
print str(tiffile)
tiffiles.append(tiffile)
patternfile = spim_dir / "spim.pattern"
patternfile.write("spim_TL<1-%s>_Angle<1-%s>.fake"
% (str(ntimepoints), str(nangles)))
assert len(tiffiles) == nangles * ntimepoints
return patternfile, tiffiles
def testDropBoxArgs(self):
class MockImportControl(ImportControl):
def importer(this, args):
assert args.server == "localhost"
assert args.port == "4064"
assert args.key == "b0742975-03a1-4f6d-b0ac-639943f1a147"
assert args.errs == "/tmp/dropbox.err"
assert args.file == "/tmp/dropbox.out"
self.cli.register("mock-import", MockImportControl, "HELP")
self.args = ['-s', 'localhost', '-p', '4064', '-k',
'b0742975-03a1-4f6d-b0ac-639943f1a147']
self.args += ['mock-import', '---errs=/tmp/dropbox.err']
self.args += ['---file=/tmp/dropbox.out']
self.args += ['--', '/OMERO/DropBox/root/tinyTest.d3d.dv']
self.cli.invoke(self.args)
@pytest.mark.parametrize('help_argument', help_arguments)
def testHelp(self, help_argument):
"""Test help arguments"""
self.args += [help_argument]
self.cli.invoke(self.args)
@pytest.mark.parametrize('clientdir_exists', [True, False])
def testImportNoClientDirFails(self, tmpdir, clientdir_exists):
"""Test fake screen import"""
fakefile = tmpdir.join("test.fake")
fakefile.write('')
if clientdir_exists:
self.args += ["--clientdir", str(tmpdir)]
self.args += [str(fakefile)]
with pytest.raises(NonZeroReturnCode):
self.cli.invoke(self.args, strict=True)
@pytest.mark.parametrize("data", (("1", False), ("3", True)))
def testImportDepth(self, tmpdir, capfd, data):
"""Test import using depth argument"""
dir1 = tmpdir.join("a")
dir1.mkdir()
dir2 = dir1 / "b"
dir2.mkdir()
fakefile = dir2 / "test.fake"
fakefile.write('')
self.add_client_dir()
self.args += ["-f", "--debug=ERROR"]
self.args += [str(dir1)]
depth, result = data
self.cli.invoke(self.args + ["--depth=%s" % depth], strict=True)
o, e = capfd.readouterr()
if result:
assert str(fakefile) in str(o)
else:
assert str(fakefile) not in str(o)
def testImportFakeImage(self, tmpdir, capfd):
"""Test fake image import"""
fakefile = tmpdir.join("test.fake")
fakefile.write('')
self.add_client_dir()
self.args += ["-f", "--debug=ERROR"]
self.args += [str(fakefile)]
self.cli.invoke(self.args, strict=True)
o, e = capfd.readouterr()
outputlines = str(o).split('\n')
reader = 'loci.formats.in.FakeReader'
assert outputlines[-2] == str(fakefile)
assert outputlines[-3] == \
"# Group: %s SPW: false Reader: %s" % (str(fakefile), reader)
@pytest.mark.parametrize('with_ds_store', (True, False))
def testImportFakeScreen(self, tmpdir, capfd, with_ds_store):
"""Test fake screen import"""
screen_dir = tmpdir.join("screen.fake")
screen_dir.mkdir()
fieldfiles = self.mkfakescreen(
screen_dir, with_ds_store=with_ds_store)
self.add_client_dir()
self.args += ["-f", "--debug=ERROR"]
self.args += [str(fieldfiles[0])]
self.cli.invoke(self.args, strict=True)
o, e = capfd.readouterr()
outputlines = str(o).split('\n')
reader = 'loci.formats.in.FakeReader'
assert outputlines[-len(fieldfiles)-2] == \
"# Group: %s SPW: true Reader: %s" % (str(fieldfiles[0]), reader)
for i in range(len(fieldfiles)):
assert outputlines[-1-len(fieldfiles)+i] == str(fieldfiles[i])
def testImportPattern(self, tmpdir, capfd):
"""Test pattern import"""
patternfile, tiffiles = self.mkfakepattern(tmpdir)
self.add_client_dir()
self.args += ["-f", "--debug=ERROR"]
self.args += [str(patternfile)]
self.cli.invoke(self.args, strict=True)
o, e = capfd.readouterr()
outputlines = str(o).split('\n')
reader = 'loci.formats.in.FilePatternReader'
print o
assert outputlines[-len(tiffiles)-3] == \
"# Group: %s SPW: false Reader: %s" % (str(patternfile), reader)
assert outputlines[-len(tiffiles)-2] == str(patternfile)
for i in range(len(tiffiles)):
assert outputlines[-1-len(tiffiles)+i] == str(tiffiles[i])
@pytest.mark.parametrize('hostname', ['localhost', 'servername'])
@pytest.mark.parametrize('port', [None, 4064, 14064])
def testLoginArguments(self, monkeypatch, hostname, port, tmpdir):
self.args += ['test.fake']
control = self.cli.controls['import']
control.command_args = []
sessionid = str(uuid.uuid4())
def new_client(x):
if port:
c = MockClient(hostname, port)
else:
c = MockClient(hostname)
c.setSessionId(sessionid)
return c
monkeypatch.setattr(self.cli, 'conn', new_client)
ice_config = tmpdir / 'ice.config'
ice_config.write('omero.host=%s\nomero.port=%g' % (
hostname, (port or 4064)))
monkeypatch.setenv("ICE_CONFIG", ice_config)
control.set_login_arguments(self.cli.parser.parse_args(self.args))
expected_args = ['-s', '%s' % hostname]
expected_args += ['-p', '%s' % (port or 4064)]
expected_args += ['-k', '%s' % sessionid]
assert control.command_args == expected_args
| dominikl/openmicroscopy | components/tools/OmeroPy/test/unit/clitest/test_import.py | Python | gpl-2.0 | 8,731 |
#!/usr/bin/python
#
# Copyright (C) 2012 Oracle Corporation
#
# This file is part of VirtualBox Open Source Edition (OSE), as
# available from http://www.virtualbox.org. This file is free software;
# you can redistribute it and/or modify it under the terms of the GNU
# General Public License (GPL) as published by the Free Software
# Foundation, in version 2 as it comes in the "COPYING" file of the
# VirtualBox OSE distribution. VirtualBox OSE is distributed in the
# hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
#
# Things needed to be set up before running this sample:
# - Install Python and verify it works (2.7.2 will do, 3.x is untested yet)
# - On Windows: Install the PyWin32 extensions for your Python version
# (see http://sourceforge.net/projects/pywin32/)
# - If not already done, set the environment variable "VBOX_INSTALL_PATH"
# to point to your VirtualBox installation directory (which in turn must have
# the "sdk" subfolder")
# - Install the VirtualBox Python bindings by doing a
# "[python] vboxapisetup.py install"
# - Run this sample with "[python] clienttest.py"
import os,sys
import traceback
#
# Converts an enumeration to a printable string.
#
def enumToString(constants, enum, elem):
all = constants.all_values(enum)
for e in all.keys():
if str(elem) == str(all[e]):
return e
return "<unknown>"
def main(argv):
from vboxapi import VirtualBoxManager
# This is a VirtualBox COM/XPCOM API client, no data needed.
wrapper = VirtualBoxManager(None, None)
# Get the VirtualBox manager
mgr = wrapper.mgr
# Get the global VirtualBox object
vbox = wrapper.vbox
print "Running VirtualBox version %s" %(vbox.version)
# Get all constants through the Python wrapper code
vboxConstants = wrapper.constants
# Enumerate all defined machines
for mach in wrapper.getArray(vbox, 'machines'):
try:
# Be prepared for failures - the VM can be inaccessible
vmname = '<inaccessible>'
try:
vmname = mach.name
except Exception, e:
None
vmid = '';
try:
vmid = mach.id
except Exception, e:
None
# Print some basic VM information even if there were errors
print "Machine name: %s [%s]" %(vmname,vmid)
if vmname == '<inaccessible>' or vmid == '':
continue
# Print some basic VM information
print " State: %s" %(enumToString(vboxConstants, "MachineState", mach.state))
print " Session state: %s" %(enumToString(vboxConstants, "SessionState", mach.sessionState))
# Do some stuff which requires a running VM
if mach.state == vboxConstants.MachineState_Running:
# Get the session object
session = mgr.getSessionObject(vbox)
# Lock the current machine (shared mode, since we won't modify the machine)
mach.lockMachine(session, vboxConstants.LockType_Shared)
# Acquire the VM's console and guest object
console = session.console
guest = console.guest
# Retrieve the current Guest Additions runlevel and print
# the installed Guest Additions version
addRunLevel = guest.additionsRunLevel
print " Additions State: %s" %(enumToString(vboxConstants, "AdditionsRunLevelType", addRunLevel))
if addRunLevel != vboxConstants.AdditionsRunLevelType_None:
print " Additions Ver: %s" %(guest.additionsVersion)
# Get the VM's display object
display = console.display
# Get the VM's current display resolution + bit depth + position
screenNum = 0 # From first screen
(screenW, screenH, screenBPP, screenX, screenY, _) = display.getScreenResolution(screenNum)
print " Display (%d): %dx%d, %d BPP at %d,%d" %(screenNum, screenW, screenH, screenBPP, screenX, screenY)
# We're done -- don't forget to unlock the machine!
session.unlockMachine()
except Exception, e:
print "Errror [%s]: %s" %(mach.name, str(e))
traceback.print_exc()
# Call destructor and delete wrapper
del wrapper
if __name__ == '__main__':
main(sys.argv)
| miguelinux/vbox | src/VBox/Main/webservice/samples/python/clienttest.py | Python | gpl-2.0 | 4,520 |
#-*- coding:utf-8 -*-
'''
Created on 18/2/2015
@author: PC06
'''
from flaskext.mysql import MySQL
from flask import Flask
class DBcon():
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
pass
def conexion(self):
mysql = MySQL()
app = Flask(__name__)
app.config['MYSQL_DATABASE_USER'] = 'python'
app.config['MYSQL_DATABASE_PASSWORD'] = '123456'
app.config['MYSQL_DATABASE_DB'] = 'ventas'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
return mysql
| Elvirita/reposelvira | elviraae/ec/edu/itsae/conn/DBcon.py | Python | gpl-2.0 | 604 |
# -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pwd
import random
import re
import string
import sys
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.plugins import get_plugin_class
from ansible.utils.ssh_functions import check_for_controlpersist
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['PlayContext']
# the magic variable mapping dictionary below is used to translate
# host/inventory variables to fields in the PlayContext
# object. The dictionary values are tuples, to account for aliases
# in variable names.
MAGIC_VARIABLE_MAPPING = dict(
# base
connection=('ansible_connection', ),
module_compression=('ansible_module_compression', ),
shell=('ansible_shell_type', ),
executable=('ansible_shell_executable', ),
remote_tmp_dir=('ansible_remote_tmp', ),
# connection common
remote_addr=('ansible_ssh_host', 'ansible_host'),
remote_user=('ansible_ssh_user', 'ansible_user'),
password=('ansible_ssh_pass', 'ansible_password'),
port=('ansible_ssh_port', 'ansible_port'),
pipelining=('ansible_ssh_pipelining', 'ansible_pipelining'),
timeout=('ansible_ssh_timeout', 'ansible_timeout'),
private_key_file=('ansible_ssh_private_key_file', 'ansible_private_key_file'),
# networking modules
network_os=('ansible_network_os', ),
connection_user=('ansible_connection_user',),
# ssh TODO: remove
ssh_executable=('ansible_ssh_executable', ),
ssh_common_args=('ansible_ssh_common_args', ),
sftp_extra_args=('ansible_sftp_extra_args', ),
scp_extra_args=('ansible_scp_extra_args', ),
ssh_extra_args=('ansible_ssh_extra_args', ),
ssh_transfer_method=('ansible_ssh_transfer_method', ),
# docker TODO: remove
docker_extra_args=('ansible_docker_extra_args', ),
# become
become=('ansible_become', ),
become_method=('ansible_become_method', ),
become_user=('ansible_become_user', ),
become_pass=('ansible_become_password', 'ansible_become_pass'),
become_exe=('ansible_become_exe', ),
become_flags=('ansible_become_flags', ),
# deprecated
sudo=('ansible_sudo', ),
sudo_user=('ansible_sudo_user', ),
sudo_pass=('ansible_sudo_password', 'ansible_sudo_pass'),
sudo_exe=('ansible_sudo_exe', ),
sudo_flags=('ansible_sudo_flags', ),
su=('ansible_su', ),
su_user=('ansible_su_user', ),
su_pass=('ansible_su_password', 'ansible_su_pass'),
su_exe=('ansible_su_exe', ),
su_flags=('ansible_su_flags', ),
)
# TODO: needs to be configurable
b_SU_PROMPT_LOCALIZATIONS = [
to_bytes('Password'),
to_bytes('암호'),
to_bytes('パスワード'),
to_bytes('Adgangskode'),
to_bytes('Contraseña'),
to_bytes('Contrasenya'),
to_bytes('Hasło'),
to_bytes('Heslo'),
to_bytes('Jelszó'),
to_bytes('Lösenord'),
to_bytes('Mật khẩu'),
to_bytes('Mot de passe'),
to_bytes('Parola'),
to_bytes('Parool'),
to_bytes('Pasahitza'),
to_bytes('Passord'),
to_bytes('Passwort'),
to_bytes('Salasana'),
to_bytes('Sandi'),
to_bytes('Senha'),
to_bytes('Wachtwoord'),
to_bytes('ססמה'),
to_bytes('Лозинка'),
to_bytes('Парола'),
to_bytes('Пароль'),
to_bytes('गुप्तशब्द'),
to_bytes('शब्दकूट'),
to_bytes('సంకేతపదము'),
to_bytes('හස්පදය'),
to_bytes('密码'),
to_bytes('密碼'),
to_bytes('口令'),
]
TASK_ATTRIBUTE_OVERRIDES = (
'become',
'become_user',
'become_pass',
'become_method',
'become_flags',
'connection',
'docker_extra_args', # TODO: remove
'delegate_to',
'no_log',
'remote_user',
)
RESET_VARS = (
'ansible_connection',
'ansible_user',
'ansible_host',
'ansible_port',
# TODO: ???
'ansible_docker_extra_args',
'ansible_ssh_host',
'ansible_ssh_pass',
'ansible_ssh_port',
'ansible_ssh_user',
'ansible_ssh_private_key_file',
'ansible_ssh_pipelining',
'ansible_ssh_executable',
)
OPTION_FLAGS = ('connection', 'remote_user', 'private_key_file', 'verbosity', 'force_handlers', 'step', 'start_at_task', 'diff',
'ssh_common_args', 'docker_extra_args', 'sftp_extra_args', 'scp_extra_args', 'ssh_extra_args')
class PlayContext(Base):
'''
This class is used to consolidate the connection information for
hosts in a play and child tasks, where the task may override some
connection/authentication information.
'''
# base
_module_compression = FieldAttribute(isa='string', default=C.DEFAULT_MODULE_COMPRESSION)
_shell = FieldAttribute(isa='string')
_executable = FieldAttribute(isa='string', default=C.DEFAULT_EXECUTABLE)
# connection fields, some are inherited from Base:
# (connection, port, remote_user, environment, no_log)
_remote_addr = FieldAttribute(isa='string')
_remote_tmp_dir = FieldAttribute(isa='string', default=C.DEFAULT_REMOTE_TMP)
_password = FieldAttribute(isa='string')
_timeout = FieldAttribute(isa='int', default=C.DEFAULT_TIMEOUT)
_connection_user = FieldAttribute(isa='string')
_private_key_file = FieldAttribute(isa='string', default=C.DEFAULT_PRIVATE_KEY_FILE)
_pipelining = FieldAttribute(isa='bool', default=C.ANSIBLE_PIPELINING)
# networking modules
_network_os = FieldAttribute(isa='string')
# docker FIXME: remove these
_docker_extra_args = FieldAttribute(isa='string')
# ssh # FIXME: remove these
_ssh_executable = FieldAttribute(isa='string', default=C.ANSIBLE_SSH_EXECUTABLE)
_ssh_args = FieldAttribute(isa='string', default=C.ANSIBLE_SSH_ARGS)
_ssh_common_args = FieldAttribute(isa='string')
_sftp_extra_args = FieldAttribute(isa='string')
_scp_extra_args = FieldAttribute(isa='string')
_ssh_extra_args = FieldAttribute(isa='string')
_ssh_transfer_method = FieldAttribute(isa='string', default=C.DEFAULT_SSH_TRANSFER_METHOD)
# ???
_connection_lockfd = FieldAttribute(isa='int')
# privilege escalation fields
_become = FieldAttribute(isa='bool')
_become_method = FieldAttribute(isa='string')
_become_user = FieldAttribute(isa='string')
_become_pass = FieldAttribute(isa='string')
_become_exe = FieldAttribute(isa='string', default=C.DEFAULT_BECOME_EXE)
_become_flags = FieldAttribute(isa='string', default=C.DEFAULT_BECOME_FLAGS)
_prompt = FieldAttribute(isa='string')
# DEPRECATED: backwards compatibility fields for sudo/su
_sudo_exe = FieldAttribute(isa='string', default=C.DEFAULT_SUDO_EXE)
_sudo_flags = FieldAttribute(isa='string', default=C.DEFAULT_SUDO_FLAGS)
_sudo_pass = FieldAttribute(isa='string')
_su_exe = FieldAttribute(isa='string', default=C.DEFAULT_SU_EXE)
_su_flags = FieldAttribute(isa='string', default=C.DEFAULT_SU_FLAGS)
_su_pass = FieldAttribute(isa='string')
# general flags
_verbosity = FieldAttribute(isa='int', default=0)
_only_tags = FieldAttribute(isa='set', default=set())
_skip_tags = FieldAttribute(isa='set', default=set())
_force_handlers = FieldAttribute(isa='bool', default=False)
_start_at_task = FieldAttribute(isa='string')
_step = FieldAttribute(isa='bool', default=False)
# Fact gathering settings
_gather_subset = FieldAttribute(isa='string', default=C.DEFAULT_GATHER_SUBSET)
_gather_timeout = FieldAttribute(isa='string', default=C.DEFAULT_GATHER_TIMEOUT)
_fact_path = FieldAttribute(isa='string', default=C.DEFAULT_FACT_PATH)
def __init__(self, play=None, options=None, passwords=None, connection_lockfd=None):
super(PlayContext, self).__init__()
if passwords is None:
passwords = {}
self.password = passwords.get('conn_pass', '')
self.become_pass = passwords.get('become_pass', '')
self.prompt = ''
self.success_key = ''
# a file descriptor to be used during locking operations
self.connection_lockfd = connection_lockfd
# set options before play to allow play to override them
if options:
self.set_options(options)
if play:
self.set_play(play)
def set_play(self, play):
'''
Configures this connection information instance with data from
the play class.
'''
if play.connection:
self.connection = play.connection
if play.remote_user:
self.remote_user = play.remote_user
if play.port:
self.port = int(play.port)
if play.become is not None:
self.become = play.become
if play.become_method:
self.become_method = play.become_method
if play.become_user:
self.become_user = play.become_user
if play.force_handlers is not None:
self.force_handlers = play.force_handlers
def set_options_from_plugin(self, plugin):
# generic derived from connection plugin, temporary for backwards compat, in the end we should not set play_context properties
# get options for plugins
options = C.config.get_configuration_definitions(get_plugin_class(plugin), plugin._load_name)
for option in options:
if option:
flag = options[option].get('name')
if flag:
setattr(self, flag, self.connection.get_option(flag))
# TODO: made irrelavent by above
# get ssh options
# for flag in ('ssh_common_args', 'docker_extra_args', 'sftp_extra_args', 'scp_extra_args', 'ssh_extra_args'):
# setattr(self, flag, getattr(options, flag, ''))
def set_options(self, options):
'''
Configures this connection information instance with data from
options specified by the user on the command line. These have a
lower precedence than those set on the play or host.
'''
# privilege escalation
self.become = options.become
self.become_method = options.become_method
self.become_user = options.become_user
self.check_mode = boolean(options.check, strict=False)
self.diff = boolean(options.diff, strict=False)
# general flags (should we move out?)
# should only be 'non plugin' flags
for flag in OPTION_FLAGS:
attribute = getattr(options, flag, False)
if attribute:
setattr(self, flag, attribute)
if hasattr(options, 'timeout') and options.timeout:
self.timeout = int(options.timeout)
# get the tag info from options. We check to see if the options have
# the attribute, as it is not always added via the CLI
if hasattr(options, 'tags'):
self.only_tags.update(options.tags)
if len(self.only_tags) == 0:
self.only_tags = set(['all'])
if hasattr(options, 'skip_tags'):
self.skip_tags.update(options.skip_tags)
def set_task_and_variable_override(self, task, variables, templar):
'''
Sets attributes from the task if they are set, which will override
those from the play.
:arg task: the task object with the parameters that were set on it
:arg variables: variables from inventory
:arg templar: templar instance if templating variables is needed
'''
new_info = self.copy()
# loop through a subset of attributes on the task object and set
# connection fields based on their values
for attr in TASK_ATTRIBUTE_OVERRIDES:
if hasattr(task, attr):
attr_val = getattr(task, attr)
if attr_val is not None:
setattr(new_info, attr, attr_val)
# next, use the MAGIC_VARIABLE_MAPPING dictionary to update this
# connection info object with 'magic' variables from the variable list.
# If the value 'ansible_delegated_vars' is in the variables, it means
# we have a delegated-to host, so we check there first before looking
# at the variables in general
if task.delegate_to is not None:
# In the case of a loop, the delegated_to host may have been
# templated based on the loop variable, so we try and locate
# the host name in the delegated variable dictionary here
delegated_host_name = templar.template(task.delegate_to)
delegated_vars = variables.get('ansible_delegated_vars', dict()).get(delegated_host_name, dict())
delegated_transport = C.DEFAULT_TRANSPORT
for transport_var in MAGIC_VARIABLE_MAPPING.get('connection'):
if transport_var in delegated_vars:
delegated_transport = delegated_vars[transport_var]
break
# make sure this delegated_to host has something set for its remote
# address, otherwise we default to connecting to it by name. This
# may happen when users put an IP entry into their inventory, or if
# they rely on DNS for a non-inventory hostname
for address_var in ('ansible_%s_host' % transport_var,) + MAGIC_VARIABLE_MAPPING.get('remote_addr'):
if address_var in delegated_vars:
break
else:
display.debug("no remote address found for delegated host %s\nusing its name, so success depends on DNS resolution" % delegated_host_name)
delegated_vars['ansible_host'] = delegated_host_name
# reset the port back to the default if none was specified, to prevent
# the delegated host from inheriting the original host's setting
for port_var in ('ansible_%s_port' % transport_var,) + MAGIC_VARIABLE_MAPPING.get('port'):
if port_var in delegated_vars:
break
else:
if delegated_transport == 'winrm':
delegated_vars['ansible_port'] = 5986
else:
delegated_vars['ansible_port'] = C.DEFAULT_REMOTE_PORT
# and likewise for the remote user
for user_var in ('ansible_%s_user' % transport_var,) + MAGIC_VARIABLE_MAPPING.get('remote_user'):
if user_var in delegated_vars and delegated_vars[user_var]:
break
else:
delegated_vars['ansible_user'] = task.remote_user or self.remote_user
else:
delegated_vars = dict()
# setup shell
for exe_var in MAGIC_VARIABLE_MAPPING.get('executable'):
if exe_var in variables:
setattr(new_info, 'executable', variables.get(exe_var))
attrs_considered = []
for (attr, variable_names) in iteritems(MAGIC_VARIABLE_MAPPING):
for variable_name in variable_names:
if attr in attrs_considered:
continue
# if delegation task ONLY use delegated host vars, avoid delegated FOR host vars
if task.delegate_to is not None:
if isinstance(delegated_vars, dict) and variable_name in delegated_vars:
setattr(new_info, attr, delegated_vars[variable_name])
attrs_considered.append(attr)
elif variable_name in variables:
setattr(new_info, attr, variables[variable_name])
attrs_considered.append(attr)
# no else, as no other vars should be considered
# become legacy updates -- from commandline
if not new_info.become_pass:
if new_info.become_method == 'sudo' and new_info.sudo_pass:
new_info.become_pass = new_info.sudo_pass
elif new_info.become_method == 'su' and new_info.su_pass:
new_info.become_pass = new_info.su_pass
# become legacy updates -- from inventory file (inventory overrides
# commandline)
for become_pass_name in MAGIC_VARIABLE_MAPPING.get('become_pass'):
if become_pass_name in variables:
break
else: # This is a for-else
if new_info.become_method == 'sudo':
for sudo_pass_name in MAGIC_VARIABLE_MAPPING.get('sudo_pass'):
if sudo_pass_name in variables:
setattr(new_info, 'become_pass', variables[sudo_pass_name])
break
elif new_info.become_method == 'su':
for su_pass_name in MAGIC_VARIABLE_MAPPING.get('su_pass'):
if su_pass_name in variables:
setattr(new_info, 'become_pass', variables[su_pass_name])
break
# make sure we get port defaults if needed
if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None:
new_info.port = int(C.DEFAULT_REMOTE_PORT)
# special overrides for the connection setting
if len(delegated_vars) > 0:
# in the event that we were using local before make sure to reset the
# connection type to the default transport for the delegated-to host,
# if not otherwise specified
for connection_type in MAGIC_VARIABLE_MAPPING.get('connection'):
if connection_type in delegated_vars:
break
else:
remote_addr_local = new_info.remote_addr in C.LOCALHOST
inv_hostname_local = delegated_vars.get('inventory_hostname') in C.LOCALHOST
if remote_addr_local and inv_hostname_local:
setattr(new_info, 'connection', 'local')
elif getattr(new_info, 'connection', None) == 'local' and (not remote_addr_local or not inv_hostname_local):
setattr(new_info, 'connection', C.DEFAULT_TRANSPORT)
# if the final connection type is local, reset the remote_user value to that of the currently logged in user
# this ensures any become settings are obeyed correctly
# we store original in 'connection_user' for use of network/other modules that fallback to it as login user
if new_info.connection == 'local':
if not new_info.connection_user:
new_info.connection_user = new_info.remote_user
new_info.remote_user = pwd.getpwuid(os.getuid()).pw_name
# set no_log to default if it was not previouslly set
if new_info.no_log is None:
new_info.no_log = C.DEFAULT_NO_LOG
if task.always_run:
display.deprecated("always_run is deprecated. Use check_mode = no instead.", version="2.4", removed=False)
new_info.check_mode = False
# check_mode replaces always_run, overwrite always_run if both are given
if task.check_mode is not None:
new_info.check_mode = task.check_mode
if task.diff is not None:
new_info.diff = task.diff
return new_info
def make_become_cmd(self, cmd, executable=None):
""" helper function to create privilege escalation commands """
prompt = None
success_key = None
self.prompt = None
if self.become:
if not executable:
executable = self.executable
becomecmd = None
randbits = ''.join(random.choice(string.ascii_lowercase) for x in range(32))
success_key = 'BECOME-SUCCESS-%s' % randbits
success_cmd = shlex_quote('echo %s; %s' % (success_key, cmd))
if executable:
command = '%s -c %s' % (executable, success_cmd)
else:
command = success_cmd
# set executable to use for the privilege escalation method, with various overrides
exe = self.become_exe or getattr(self, '%s_exe' % self.become_method, self.become_method)
# set flags to use for the privilege escalation method, with various overrides
flags = self.become_flags or getattr(self, '%s_flags' % self.become_method, '')
if self.become_method == 'sudo':
# If we have a password, we run sudo with a randomly-generated
# prompt set using -p. Otherwise we run it with default -n, which makes
# it fail if it would have prompted for a password.
# Cannot rely on -n as it can be removed from defaults, which should be
# done for older versions of sudo that do not support the option.
#
# Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with shlex_quote()
# and pass the quoted string to the user's shell.
# force quick error if password is required but not supplied, should prevent sudo hangs.
if self.become_pass:
prompt = '[sudo via ansible, key=%s] password: ' % randbits
becomecmd = '%s %s -p "%s" -u %s %s' % (exe, flags.replace('-n', ''), prompt, self.become_user, command)
else:
becomecmd = '%s %s -u %s %s' % (exe, flags, self.become_user, command)
elif self.become_method == 'su':
# passing code ref to examine prompt as simple string comparisson isn't good enough with su
def detect_su_prompt(b_data):
b_password_string = b"|".join([b'(\w+\'s )?' + x for x in b_SU_PROMPT_LOCALIZATIONS])
# Colon or unicode fullwidth colon
b_password_string = b_password_string + to_bytes(u' ?(:|:) ?')
b_SU_PROMPT_LOCALIZATIONS_RE = re.compile(b_password_string, flags=re.IGNORECASE)
return bool(b_SU_PROMPT_LOCALIZATIONS_RE.match(b_data))
prompt = detect_su_prompt
becomecmd = '%s %s %s -c %s' % (exe, flags, self.become_user, shlex_quote(command))
elif self.become_method == 'pbrun':
prompt = 'Password:'
becomecmd = '%s %s -u %s %s' % (exe, flags, self.become_user, success_cmd)
elif self.become_method == 'ksu':
def detect_ksu_prompt(b_data):
return re.match(b"Kerberos password for .*@.*:", b_data)
prompt = detect_ksu_prompt
becomecmd = '%s %s %s -e %s' % (exe, self.become_user, flags, command)
elif self.become_method == 'pfexec':
# No user as it uses it's own exec_attr to figure it out
becomecmd = '%s %s "%s"' % (exe, flags, success_cmd)
elif self.become_method == 'runas':
# become is handled inside the WinRM connection plugin
display.warning("The Windows 'runas' become method is experimental, and may change significantly in future Ansible releases.")
if not self.become_user:
raise AnsibleError(("The 'runas' become method requires a username "
"(specify with the '--become-user' CLI arg, the 'become_user' keyword, or the 'ansible_become_user' variable)"))
becomecmd = cmd
elif self.become_method == 'doas':
prompt = 'doas (%s@' % self.remote_user
exe = self.become_exe or 'doas'
if not self.become_pass:
flags += ' -n '
if self.become_user:
flags += ' -u %s ' % self.become_user
# FIXME: make shell independent
becomecmd = '%s %s echo %s && %s %s env ANSIBLE=true %s' % (exe, flags, success_key, exe, flags, cmd)
elif self.become_method == 'dzdo':
exe = self.become_exe or 'dzdo'
if self.become_pass:
prompt = '[dzdo via ansible, key=%s] password: ' % randbits
becomecmd = '%s -p %s -u %s %s' % (exe, shlex_quote(prompt), self.become_user, command)
else:
becomecmd = '%s -u %s %s' % (exe, self.become_user, command)
elif self.become_method == 'pmrun':
exe = self.become_exe or 'pmrun'
prompt = 'Enter UPM user password:'
becomecmd = '%s %s %s' % (exe, flags, shlex_quote(command))
else:
raise AnsibleError("Privilege escalation method not found: %s" % self.become_method)
if self.become_pass:
self.prompt = prompt
self.success_key = success_key
return becomecmd
return cmd
def update_vars(self, variables):
'''
Adds 'magic' variables relating to connections to the variable dictionary provided.
In case users need to access from the play, this is a legacy from runner.
'''
for prop, var_list in MAGIC_VARIABLE_MAPPING.items():
try:
if 'become' in prop:
continue
var_val = getattr(self, prop)
for var_opt in var_list:
if var_opt not in variables and var_val is not None:
variables[var_opt] = var_val
except AttributeError:
continue
def _get_attr_connection(self):
''' connections are special, this takes care of responding correctly '''
conn_type = None
if self._attributes['connection'] == 'smart':
conn_type = 'ssh'
if sys.platform.startswith('darwin') and self.password:
# due to a current bug in sshpass on OSX, which can trigger
# a kernel panic even for non-privileged users, we revert to
# paramiko on that OS when a SSH password is specified
conn_type = "paramiko"
else:
# see if SSH can support ControlPersist if not use paramiko
if not check_for_controlpersist(self.ssh_executable):
conn_type = "paramiko"
# if someone did `connection: persistent`, default it to using a persistent paramiko connection to avoid problems
elif self._attributes['connection'] == 'persistent':
conn_type = 'paramiko'
if conn_type:
self.connection = conn_type
return self._attributes['connection']
| erjohnso/ansible | lib/ansible/playbook/play_context.py | Python | gpl-3.0 | 27,712 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class ProcurementOrder(models.Model):
_inherit = 'procurement.order'
task_id = fields.Many2one('project.task', 'Task', copy=False)
def _is_procurement_task(self):
return self.product_id.type == 'service' and self.product_id.track_service == 'task'
@api.multi
def _assign(self):
self.ensure_one()
res = super(ProcurementOrder, self)._assign()
if not res:
# if there isn't any specific procurement.rule defined for the product, we may want to create a task
return self._is_procurement_task()
return res
@api.multi
def _run(self):
self.ensure_one()
if self._is_procurement_task() and not self.task_id:
# If the SO was confirmed, cancelled, set to draft then confirmed, avoid creating a new
# task.
if self.sale_line_id:
existing_task = self.env['project.task'].search(
[('sale_line_id', '=', self.sale_line_id.id)]
)
if existing_task:
return existing_task
# create a task for the procurement
return self._create_service_task()
return super(ProcurementOrder, self)._run()
def _convert_qty_company_hours(self):
company_time_uom_id = self.env.user.company_id.project_time_mode_id
if self.product_uom.id != company_time_uom_id.id and self.product_uom.category_id.id == company_time_uom_id.category_id.id:
planned_hours = self.product_uom._compute_quantity(self.product_qty, company_time_uom_id)
else:
planned_hours = self.product_qty
return planned_hours
def _get_project(self):
Project = self.env['project.project']
project = self.product_id.with_context(force_company=self.company_id.id).project_id
if not project and self.sale_line_id:
# find the project corresponding to the analytic account of the sales order
account = self.sale_line_id.order_id.project_id
if not account:
self.sale_line_id.order_id._create_analytic_account()
account = self.sale_line_id.order_id.project_id
project = Project.search([('analytic_account_id', '=', account.id)], limit=1)
if not project:
project_id = account.project_create({'name': account.name, 'use_tasks': True})
project = Project.browse(project_id)
return project
def _create_service_task(self):
project = self._get_project()
planned_hours = self._convert_qty_company_hours()
task = self.env['project.task'].create({
'name': '%s:%s' % (self.origin or '', self.product_id.name),
'date_deadline': self.date_planned,
'planned_hours': planned_hours,
'remaining_hours': planned_hours,
'partner_id': self.sale_line_id.order_id.partner_id.id or self.partner_dest_id.id,
'user_id': self.env.uid,
'procurement_id': self.id,
'description': self.name + '\n',
'project_id': project.id,
'company_id': self.company_id.id,
})
self.write({'task_id': task.id})
msg_body = _("Task Created (%s): <a href=# data-oe-model=project.task data-oe-id=%d>%s</a>") % (self.product_id.name, task.id, task.name)
self.message_post(body=msg_body)
if self.sale_line_id.order_id:
self.sale_line_id.order_id.message_post(body=msg_body)
task_msg = _("This task has been created from: <a href=# data-oe-model=sale.order data-oe-id=%d>%s</a> (%s)") % (self.sale_line_id.order_id.id, self.sale_line_id.order_id.name, self.product_id.name)
task.message_post(body=task_msg)
return task
| chienlieu2017/it_management | odoo/addons/sale_timesheet/models/procurement.py | Python | gpl-3.0 | 3,947 |
# Generated from TacticNotations.g by ANTLR 4.7.2
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\f")
buf.write("f\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\3\2\3\2\3\2\3\3\3\3")
buf.write("\3\3\3\3\3\3\3\3\5\3!\n\3\3\4\3\4\3\5\3\5\3\6\3\6\3\6")
buf.write("\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3")
buf.write("\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6")
buf.write("\3\6\5\6F\n\6\3\7\3\7\3\b\3\b\6\bL\n\b\r\b\16\bM\5\bP")
buf.write("\n\b\3\t\3\t\5\tT\n\t\3\t\6\tW\n\t\r\t\16\tX\3\n\3\n\3")
buf.write("\n\6\n^\n\n\r\n\16\n_\3\13\6\13c\n\13\r\13\16\13d\2\2")
buf.write("\f\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\3\2\5")
buf.write("\4\2BBaa\6\2\"\"BBaa}\177\5\2\62;C\\c|\2v\2\3\3\2\2\2")
buf.write("\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r")
buf.write("\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3")
buf.write("\2\2\2\3\27\3\2\2\2\5 \3\2\2\2\7\"\3\2\2\2\t$\3\2\2\2")
buf.write("\13E\3\2\2\2\rG\3\2\2\2\17O\3\2\2\2\21Q\3\2\2\2\23Z\3")
buf.write("\2\2\2\25b\3\2\2\2\27\30\7}\2\2\30\31\7~\2\2\31\4\3\2")
buf.write("\2\2\32\33\7}\2\2\33!\7-\2\2\34\35\7}\2\2\35!\7,\2\2\36")
buf.write("\37\7}\2\2\37!\7A\2\2 \32\3\2\2\2 \34\3\2\2\2 \36\3\2")
buf.write("\2\2!\6\3\2\2\2\"#\7}\2\2#\b\3\2\2\2$%\7\177\2\2%\n\3")
buf.write("\2\2\2&\'\7\'\2\2\'F\7}\2\2()\7\'\2\2)F\7\177\2\2*+\7")
buf.write("\'\2\2+F\7~\2\2,-\7b\2\2-.\7\'\2\2.F\7}\2\2/\60\7B\2\2")
buf.write("\60\61\7\'\2\2\61F\7}\2\2\62\63\7\'\2\2\63\64\7~\2\2\64")
buf.write("F\7/\2\2\65\66\7\'\2\2\66\67\7~\2\2\678\7/\2\28F\7@\2")
buf.write("\29:\7\'\2\2:;\7~\2\2;F\7~\2\2<=\7\'\2\2=>\7~\2\2>?\7")
buf.write("~\2\2?F\7~\2\2@A\7\'\2\2AB\7~\2\2BC\7~\2\2CD\7~\2\2DF")
buf.write("\7~\2\2E&\3\2\2\2E(\3\2\2\2E*\3\2\2\2E,\3\2\2\2E/\3\2")
buf.write("\2\2E\62\3\2\2\2E\65\3\2\2\2E9\3\2\2\2E<\3\2\2\2E@\3\2")
buf.write("\2\2F\f\3\2\2\2GH\7~\2\2H\16\3\2\2\2IP\t\2\2\2JL\n\3\2")
buf.write("\2KJ\3\2\2\2LM\3\2\2\2MK\3\2\2\2MN\3\2\2\2NP\3\2\2\2O")
buf.write("I\3\2\2\2OK\3\2\2\2P\20\3\2\2\2QV\7B\2\2RT\7a\2\2SR\3")
buf.write("\2\2\2ST\3\2\2\2TU\3\2\2\2UW\t\4\2\2VS\3\2\2\2WX\3\2\2")
buf.write("\2XV\3\2\2\2XY\3\2\2\2Y\22\3\2\2\2Z[\7a\2\2[]\7a\2\2\\")
buf.write("^\t\4\2\2]\\\3\2\2\2^_\3\2\2\2_]\3\2\2\2_`\3\2\2\2`\24")
buf.write("\3\2\2\2ac\7\"\2\2ba\3\2\2\2cd\3\2\2\2db\3\2\2\2de\3\2")
buf.write("\2\2e\26\3\2\2\2\13\2 EMOSX_d\2")
return buf.getvalue()
class TacticNotationsLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
LALT = 1
LGROUP = 2
LBRACE = 3
RBRACE = 4
ESCAPED = 5
PIPE = 6
ATOM = 7
ID = 8
SUB = 9
WHITESPACE = 10
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'{|'", "'{'", "'}'", "'|'" ]
symbolicNames = [ "<INVALID>",
"LALT", "LGROUP", "LBRACE", "RBRACE", "ESCAPED", "PIPE", "ATOM",
"ID", "SUB", "WHITESPACE" ]
ruleNames = [ "LALT", "LGROUP", "LBRACE", "RBRACE", "ESCAPED", "PIPE",
"ATOM", "ID", "SUB", "WHITESPACE" ]
grammarFileName = "TacticNotations.g"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| Zimmi48/coq | doc/tools/coqrst/notations/TacticNotationsLexer.py | Python | lgpl-2.1 | 3,961 |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def load_test_data(load_onto=None):
from openstack_dashboard.test.test_data import ceilometer_data
from openstack_dashboard.test.test_data import cinder_data
from openstack_dashboard.test.test_data import exceptions
from openstack_dashboard.test.test_data import glance_data
from openstack_dashboard.test.test_data import heat_data
from openstack_dashboard.test.test_data import keystone_data
from openstack_dashboard.test.test_data import neutron_data
from openstack_dashboard.test.test_data import nova_data
from openstack_dashboard.test.test_data import swift_data
from openstack_dashboard.test.test_data import trove_data
# The order of these loaders matters, some depend on others.
loaders = (
exceptions.data,
keystone_data.data,
glance_data.data,
nova_data.data,
cinder_data.data,
neutron_data.data,
swift_data.data,
heat_data.data,
ceilometer_data.data,
trove_data.data,
)
if load_onto:
for data_func in loaders:
data_func(load_onto)
return load_onto
else:
return TestData(*loaders)
class TestData(object):
"""Holder object for test data. Any functions passed to the init method
will be called with the ``TestData`` object as their only argument. They
can then load data onto the object as desired.
The idea is to use the instantiated object like this::
>>> import glance_data
>>> TEST = TestData(glance_data.data)
>>> TEST.images.list()
[<Image: visible_image>, <Image: invisible_image>]
>>> TEST.images.first()
<Image: visible_image>
You can load as little or as much data as you like as long as the loaders
don't conflict with each other.
See the
:class:`~openstack_dashboard.test.test_data.utils.TestDataContainer`
class for a list of available methods.
"""
def __init__(self, *args):
for data_func in args:
data_func(self)
class TestDataContainer(object):
"""A container for test data objects.
The behavior of this class is meant to mimic a "manager" class, which
has convenient shortcuts for common actions like "list", "filter", "get",
and "add".
"""
def __init__(self):
self._objects = []
def add(self, *args):
"""Add a new object to this container.
Generally this method should only be used during data loading, since
adding data during a test can affect the results of other tests.
"""
for obj in args:
if obj not in self._objects:
self._objects.append(obj)
def list(self):
"""Returns a list of all objects in this container."""
return self._objects
def filter(self, filtered=None, **kwargs):
"""Returns objects in this container whose attributes match the given
keyword arguments.
"""
if filtered is None:
filtered = self._objects
try:
key, value = kwargs.popitem()
except KeyError:
# We're out of filters, return
return filtered
def get_match(obj):
return hasattr(obj, key) and getattr(obj, key) == value
return self.filter(filtered=filter(get_match, filtered), **kwargs)
def get(self, **kwargs):
"""Returns the single object in this container whose attributes match
the given keyword arguments. An error will be raised if the arguments
provided don't return exactly one match.
"""
matches = self.filter(**kwargs)
if not matches:
raise Exception("No matches found.")
elif len(matches) > 1:
raise Exception("Multiple matches found.")
else:
return matches.pop()
def first(self):
"""Returns the first object from this container."""
return self._objects[0]
def count(self):
return len(self._objects)
| spring-week-topos/horizon-week | openstack_dashboard/test/test_data/utils.py | Python | apache-2.0 | 4,617 |
from nose.tools import * # noqa: F403
from tests.base import AdminTestCase
from osf_tests.factories import NodeFactory, UserFactory
from osf.utils.permissions import ADMIN
from admin.nodes.serializers import serialize_simple_user_and_node_permissions, serialize_node
class TestNodeSerializers(AdminTestCase):
def test_serialize_node(self):
node = NodeFactory()
info = serialize_node(node)
assert_is_instance(info, dict)
assert_equal(info['parent'], node.parent_id)
assert_equal(info['title'], node.title)
assert_equal(info['children'], [])
assert_equal(info['id'], node._id)
assert_equal(info['public'], node.is_public)
assert_equal(len(info['contributors']), 1)
assert_false(info['deleted'])
def test_serialize_deleted(self):
node = NodeFactory()
info = serialize_node(node)
assert_false(info['deleted'])
node.is_deleted = True
info = serialize_node(node)
assert_true(info['deleted'])
node.is_deleted = False
info = serialize_node(node)
assert_false(info['deleted'])
def test_serialize_simple_user(self):
user = UserFactory()
node = NodeFactory(creator=user)
info = serialize_simple_user_and_node_permissions(node, user)
assert_is_instance(info, dict)
assert_equal(info['id'], user._id)
assert_equal(info['name'], user.fullname)
assert_equal(info['permission'], ADMIN)
| mfraezz/osf.io | admin_tests/nodes/test_serializers.py | Python | apache-2.0 | 1,496 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("zerver", "0301_fix_unread_messages_in_deactivated_streams"),
]
operations = [
# We do Stream lookups case-insensitively with respect to the name, but we were missing
# the appropriate (realm_id, upper(name::text)) unique index to enforce uniqueness
# on database level.
migrations.RunSQL(
"""
CREATE UNIQUE INDEX zerver_stream_realm_id_name_uniq ON zerver_stream (realm_id, upper(name::text));
"""
),
migrations.AlterUniqueTogether(
name="stream",
unique_together=set(),
),
]
| andersk/zulip | zerver/migrations/0302_case_insensitive_stream_name_index.py | Python | apache-2.0 | 706 |
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Cloudscaling Group, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models.
"""
import six
from sqlalchemy import Column, Integer
from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper
from solum.openstack.common.db.sqlalchemy import session as sa
from solum.openstack.common import timeutils
class ModelBase(object):
"""Base class for models."""
__table_initialized__ = False
def save(self, session=None):
"""Save this object."""
if not session:
session = sa.get_session()
# NOTE(boris-42): This part of code should be look like:
# session.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicitly.
with session.begin(subtransactions=True):
session.add(self)
session.flush()
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
@property
def _extra_keys(self):
"""Specifies custom fields
Subclasses can override this property to return a list
of custom fields that should be included in their dict
representation.
For reference check tests/db/sqlalchemy/test_models.py
"""
return []
def __iter__(self):
columns = dict(object_mapper(self).columns).keys()
# NOTE(russellb): Allow models to specify other keys that can be looked
# up, beyond the actual db columns. An example would be the 'name'
# property for an Instance.
columns.extend(self._extra_keys)
self._i = iter(columns)
return self
def next(self):
n = six.advance_iterator(self._i)
return n, getattr(self, n)
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in six.iteritems(values):
setattr(self, k, v)
def iteritems(self):
"""Make the model object behave like a dict.
Includes attributes from joins.
"""
local = dict(self)
joined = dict([(k, v) for k, v in six.iteritems(self.__dict__)
if not k[0] == '_'])
local.update(joined)
return six.iteritems(local)
class TimestampMixin(object):
created_at = Column(DateTime, default=lambda: timeutils.utcnow())
updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())
class SoftDeleteMixin(object):
deleted_at = Column(DateTime)
deleted = Column(Integer, default=0)
def soft_delete(self, session=None):
"""Mark this object as deleted."""
self.deleted = self.id
self.deleted_at = timeutils.utcnow()
self.save(session=session)
| alex/solum | solum/openstack/common/db/sqlalchemy/models.py | Python | apache-2.0 | 3,969 |
""" msgfmt tool """
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/msgfmt.py 2014/03/02 14:18:15 garyo"
from SCons.Builder import BuilderBase
#############################################################################
class _MOFileBuilder(BuilderBase):
""" The builder class for `MO` files.
The reason for this builder to exists and its purpose is quite simillar
as for `_POFileBuilder`. This time, we extend list of sources, not targets,
and call `BuilderBase._execute()` only once (as we assume single-target
here).
"""
def _execute(self, env, target, source, *args, **kw):
# Here we add support for 'LINGUAS_FILE' keyword. Emitter is not suitable
# in this case, as it is called too late (after multiple sources
# are handled single_source builder.
import SCons.Util
from SCons.Tool.GettextCommon import _read_linguas_from_files
linguas_files = None
if env.has_key('LINGUAS_FILE') and env['LINGUAS_FILE'] is not None:
linguas_files = env['LINGUAS_FILE']
# This should prevent from endless recursion.
env['LINGUAS_FILE'] = None
# We read only languages. Suffixes shall be added automatically.
linguas = _read_linguas_from_files(env, linguas_files)
if SCons.Util.is_List(source):
source.extend(linguas)
elif source is not None:
source = [source] + linguas
else:
source = linguas
result = BuilderBase._execute(self,env,target,source,*args, **kw)
if linguas_files is not None:
env['LINGUAS_FILE'] = linguas_files
return result
#############################################################################
#############################################################################
def _create_mo_file_builder(env, **kw):
""" Create builder object for `MOFiles` builder """
import SCons.Action
# FIXME: What factory use for source? Ours or their?
kw['action'] = SCons.Action.Action('$MSGFMTCOM','$MSGFMTCOMSTR')
kw['suffix'] = '$MOSUFFIX'
kw['src_suffix'] = '$POSUFFIX'
kw['src_builder'] = '_POUpdateBuilder'
kw['single_source'] = True
return _MOFileBuilder(**kw)
#############################################################################
#############################################################################
def generate(env,**kw):
""" Generate `msgfmt` tool """
import SCons.Util
from SCons.Tool.GettextCommon import _detect_msgfmt
try:
env['MSGFMT'] = _detect_msgfmt(env)
except:
env['MSGFMT'] = 'msgfmt'
env.SetDefault(
MSGFMTFLAGS = [ SCons.Util.CLVar('-c') ],
MSGFMTCOM = '$MSGFMT $MSGFMTFLAGS -o $TARGET $SOURCE',
MSGFMTCOMSTR = '',
MOSUFFIX = ['.mo'],
POSUFFIX = ['.po']
)
env.Append( BUILDERS = { 'MOFiles' : _create_mo_file_builder(env) } )
#############################################################################
#############################################################################
def exists(env):
""" Check if the tool exists """
from SCons.Tool.GettextCommon import _msgfmt_exists
try:
return _msgfmt_exists(env)
except:
return False
#############################################################################
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| h4ck3rm1k3/OpenWrt-Firefly-SDK | staging_dir/host/lib/scons-2.3.1/SCons/Tool/msgfmt.py | Python | gpl-2.0 | 4,460 |
#####
import sys
import inspect
from pylons import config
import logging
import zkpylons.lib.helpers as h
from pylons import request, response, session, tmpl_context as c
from zkpylons.lib.helpers import redirect_to
from pylons.util import class_name_from_module_name
from zkpylons.model import meta
from pylons.controllers.util import abort
from zkpylons.lib.base import BaseController, render
from zkpylons.model import URLHash
log = logging.getLogger(__name__)
class SecretHashController(BaseController):
def lookup(self, hash):
c.hash = URLHash.find_by_hash(hash)
if c.hash is None:
abort(404, "Sorry, Invalid Hash.")
return self.transfer(url=c.hash.url)
# as per http://www.mail-archive.com/[email protected]/msg06643.html
def transfer(controller = None, action = None, url = None, **kwargs):
"""usage:
1. result = transfer(url = "/someurl/someaction")
2. result = transfer(controller = "/controller1/sub_controller2",
action = "test") # kwargs will pass to action.
"""
if (url != None):
route_map = config['routes.map']
match_route= route_map.match(url)
if (match_route == None):
raise(Exception("no route matched url '%s'" % url))
# if
controller = match_route["controller"].replace("/", ".")
action = match_route["action"]
del(match_route["controller"])
del(match_route["action"])
kwargs.update(match_route)
else:
if (controller == None):
route_map = config['routes.map']
match_route = route_map.match("/")
if (match_route == None):
raise(Exception("no route matched url '%s'" % url))
# if
controller = match_route["controller"].replace("/", ".")
if (action == None):
action = match_route["action"]
# if
del(match_route["controller"])
del(match_route["action"])
kwargs.update(match_route)
else:
controller = controller.replace("/", ".")
if (action == None):
action = "index"
# if
# if
# if
full_module_name = config['pylons.package'] + '.controllers.' + controller
__traceback_hide__ = 'before_and_this'
try:
__import__(full_module_name)
except ImportError, e:
raise(NotImplementedError("'%s' not found: %s" % (controller, e)))
# try
module_name = controller.split('.')[-1]
class_name = class_name_from_module_name(module_name) + 'Controller'
controller_class = getattr(sys.modules[full_module_name], class_name)
controller_inst = controller_class()
if (hasattr(controller_inst, action)):
action_method = getattr(controller_inst, action, None)
#if (not isinstance(action_method, types.MethodType)):
# raise(NotImplementedError("action '%s' not found in '%s'" % (action, controller)))
# if
if (hasattr(controller_inst, "__before__")):
before_method = getattr(controller_inst, "__before__", None)
#if (isinstance(before_method, types.MethodType)):
# before_method(action)
# if
# if
action_args_name, action_args, action_kargs, action_defaults = inspect.getargspec(action_method)
del(action_args_name[0])
call_kargs = {}
for k, v in kwargs.iteritems():
if (k in action_args_name):
call_kargs[k] = v
# if
# for
result = action_method(**call_kargs)
if (hasattr(controller_inst, "__after__")):
after_method = getattr(controller_inst, "__after__", None)
#if (isinstance(after_method, types.MethodType)):
# after_method(action)
# if
# if
return(result)
else:
raise(NotImplementedError("action '%s' not found in '%s'" % (action, controller)))
# if
# def
| neillc/zookeepr | zkpylons/controllers/secret_hash.py | Python | gpl-2.0 | 4,319 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from units.mock.loader import DictDataLoader
from mock import MagicMock
from ansible.template import Templar
from ansible import errors
from ansible.playbook import conditional
class TestConditional(unittest.TestCase):
def setUp(self):
self.loader = DictDataLoader({})
self.cond = conditional.Conditional(loader=self.loader)
self.templar = Templar(loader=self.loader, variables={})
def _eval_con(self, when=None, variables=None):
when = when or []
variables = variables or {}
self.cond.when = when
ret = self.cond.evaluate_conditional(self.templar, variables)
return ret
def test_false(self):
when = [u"False"]
ret = self._eval_con(when, {})
self.assertFalse(ret)
def test_true(self):
when = [u"True"]
ret = self._eval_con(when, {})
self.assertTrue(ret)
def test_true_boolean(self):
self.cond.when = [True]
m = MagicMock()
ret = self.cond.evaluate_conditional(m, {})
self.assertTrue(ret)
self.assertFalse(m.is_template.called)
def test_false_boolean(self):
self.cond.when = [False]
m = MagicMock()
ret = self.cond.evaluate_conditional(m, {})
self.assertFalse(ret)
self.assertFalse(m.is_template.called)
def test_undefined(self):
when = [u"{{ some_undefined_thing }}"]
self.assertRaisesRegex(errors.AnsibleError, "The conditional check '{{ some_undefined_thing }}' failed",
self._eval_con, when, {})
def test_defined(self):
variables = {'some_defined_thing': True}
when = [u"{{ some_defined_thing }}"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_dict_defined_values(self):
variables = {'dict_value': 1,
'some_defined_dict': {'key1': 'value1',
'key2': '{{ dict_value }}'}}
when = [u"some_defined_dict"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_dict_defined_values_is_defined(self):
variables = {'dict_value': 1,
'some_defined_dict': {'key1': 'value1',
'key2': '{{ dict_value }}'}}
when = [u"some_defined_dict.key1 is defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_dict_defined_multiple_values_is_defined(self):
variables = {'dict_value': 1,
'some_defined_dict': {'key1': 'value1',
'key2': '{{ dict_value }}'}}
when = [u"some_defined_dict.key1 is defined",
u"some_defined_dict.key2 is not undefined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_nested_hostvars_undefined_values(self):
variables = {'dict_value': 1,
'hostvars': {'host1': {'key1': 'value1',
'key2': '{{ dict_value }}'},
'host2': '{{ dict_value }}',
'host3': '{{ undefined_dict_value }}',
# no host4
},
'some_dict': {'some_dict_key1': '{{ hostvars["host3"] }}'}
}
when = [u"some_dict.some_dict_key1 == hostvars['host3']"]
# self._eval_con(when, variables)
self.assertRaisesRegex(errors.AnsibleError,
r"The conditional check 'some_dict.some_dict_key1 == hostvars\['host3'\]' failed",
# "The conditional check 'some_dict.some_dict_key1 == hostvars['host3']' failed",
# "The conditional check 'some_dict.some_dict_key1 == hostvars['host3']' failed.",
self._eval_con,
when, variables)
def test_dict_undefined_values_bare(self):
variables = {'dict_value': 1,
'some_defined_dict_with_undefined_values': {'key1': 'value1',
'key2': '{{ dict_value }}',
'key3': '{{ undefined_dict_value }}'
}}
# raises an exception when a non-string conditional is passed to extract_defined_undefined()
when = [u"some_defined_dict_with_undefined_values"]
self.assertRaisesRegex(errors.AnsibleError,
"The conditional check 'some_defined_dict_with_undefined_values' failed.",
self._eval_con,
when, variables)
def test_dict_undefined_values_is_defined(self):
variables = {'dict_value': 1,
'some_defined_dict_with_undefined_values': {'key1': 'value1',
'key2': '{{ dict_value }}',
'key3': '{{ undefined_dict_value }}'
}}
when = [u"some_defined_dict_with_undefined_values is defined"]
self.assertRaisesRegex(errors.AnsibleError,
"The conditional check 'some_defined_dict_with_undefined_values is defined' failed.",
self._eval_con,
when, variables)
def test_is_defined(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_undefined(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is undefined"]
ret = self._eval_con(when, variables)
self.assertFalse(ret)
def test_is_undefined_and_defined(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is undefined", u"some_defined_thing is defined"]
ret = self._eval_con(when, variables)
self.assertFalse(ret)
def test_is_undefined_and_defined_reversed(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is defined", u"some_defined_thing is undefined"]
ret = self._eval_con(when, variables)
self.assertFalse(ret)
def test_is_not_undefined(self):
variables = {'some_defined_thing': True}
when = [u"some_defined_thing is not undefined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_not_defined(self):
variables = {'some_defined_thing': True}
when = [u"some_undefined_thing is not defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_hostvars_quotes_is_defined(self):
variables = {'hostvars': {'some_host': {}},
'compare_targets_single': "hostvars['some_host']",
'compare_targets_double': 'hostvars["some_host"]',
'compare_targets': {'double': '{{ compare_targets_double }}',
'single': "{{ compare_targets_single }}"},
}
when = [u"hostvars['some_host'] is defined",
u'hostvars["some_host"] is defined',
u"{{ compare_targets.double }} is defined",
u"{{ compare_targets.single }} is defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_hostvars_quotes_is_defined_but_is_not_defined(self):
variables = {'hostvars': {'some_host': {}},
'compare_targets_single': "hostvars['some_host']",
'compare_targets_double': 'hostvars["some_host"]',
'compare_targets': {'double': '{{ compare_targets_double }}',
'single': "{{ compare_targets_single }}"},
}
when = [u"hostvars['some_host'] is defined",
u'hostvars["some_host"] is defined',
u"{{ compare_targets.triple }} is defined",
u"{{ compare_targets.quadruple }} is defined"]
self.assertRaisesRegex(errors.AnsibleError,
"The conditional check '{{ compare_targets.triple }} is defined' failed",
self._eval_con,
when, variables)
def test_is_hostvars_host_is_defined(self):
variables = {'hostvars': {'some_host': {}, }}
when = [u"hostvars['some_host'] is defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_hostvars_host_undefined_is_defined(self):
variables = {'hostvars': {'some_host': {}, }}
when = [u"hostvars['some_undefined_host'] is defined"]
ret = self._eval_con(when, variables)
self.assertFalse(ret)
def test_is_hostvars_host_undefined_is_undefined(self):
variables = {'hostvars': {'some_host': {}, }}
when = [u"hostvars['some_undefined_host'] is undefined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
def test_is_hostvars_host_undefined_is_not_defined(self):
variables = {'hostvars': {'some_host': {}, }}
when = [u"hostvars['some_undefined_host'] is not defined"]
ret = self._eval_con(when, variables)
self.assertTrue(ret)
| mattclay/ansible | test/units/playbook/test_conditional.py | Python | gpl-3.0 | 9,810 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This is a virtual module that is entirely implemented as an action plugin and runs on the controller
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: template
version_added: historical
options:
follow:
description:
- Determine whether symbolic links should be followed.
- When set to C(yes) symbolic links will be followed, if they exist.
- When set to C(no) symbolic links will not be followed.
- Previous to Ansible 2.4, this was hardcoded as C(yes).
type: bool
default: no
version_added: '2.4'
notes:
- You can use the M(copy) module with the C(content:) option if you prefer the template inline,
as part of the playbook.
- For Windows you can use M(win_template) which uses '\\r\\n' as C(newline_sequence) by default.
seealso:
- module: copy
- module: win_copy
- module: win_template
author:
- Ansible Core Team
- Michael DeHaan
extends_documentation_fragment:
- backup
- files
- template_common
- validate
'''
EXAMPLES = r'''
- name: Template a file to /etc/files.conf
template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: '0644'
- name: Template a file, using symbolic modes (equivalent to 0644)
template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: u=rw,g=r,o=r
- name: Copy a version of named.conf that is dependent on the OS. setype obtained by doing ls -Z /etc/named.conf on original file
template:
src: named.conf_{{ ansible_os_family}}.j2
dest: /etc/named.conf
group: named
setype: named_conf_t
mode: 0640
- name: Create a DOS-style text file from a template
template:
src: config.ini.j2
dest: /share/windows/config.ini
newline_sequence: '\r\n'
- name: Copy a new sudoers file into place, after passing validation with visudo
template:
src: /mine/sudoers
dest: /etc/sudoers
validate: /usr/sbin/visudo -cf %s
- name: Update sshd configuration safely, avoid locking yourself out
template:
src: etc/ssh/sshd_config.j2
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: '0600'
validate: /usr/sbin/sshd -t -f %s
backup: yes
'''
| Dhivyap/ansible | lib/ansible/modules/files/template.py | Python | gpl-3.0 | 2,564 |
from __future__ import unicode_literals
from django import template
from temba.channels.views import get_channel_icon
register = template.Library()
@register.filter
def channel_icon(channel):
return get_channel_icon(channel.channel_type)
| praekelt/rapidpro | temba/channels/templatetags/channels.py | Python | agpl-3.0 | 246 |
#!/usr/bin/env python
from Acspy.Nc.CommonNC import CommonNC
from Acspy.Nc.Supplier import Supplier
import datacapEx
from datacapEx import ExecBlockProcessedEvent, DataCapturerId, ExecBlockStartedEvent, ScanStartedEvent
import asdmEX
s = Supplier('pyTest-NC')
name = 'DATACAP1'
s.publishEvent(name)
sessionId = asdmEX.IDLEntityRef('SessionId','X1','SID','1.0')
sb = asdmEX.IDLEntityRef('SB1','X1','SB1','1.0')
dcId = DataCapturerId (name, 'arrayId', sessionId, sb)
execBlockId = asdmEX.IDLEntityRef('ExecBlockId','X1','SB1','1.0')
d = ExecBlockProcessedEvent( dcId, 'statu', execBlockId, 0)
s.publishEvent(d)
execId = asdmEX.IDLEntityRef('4','3','2', '1')
execBlockId = asdmEX.IDLEntityRef('1','2','3','4')
sse = ScanStartedEvent(execId, "something", 4, [datacapEx.LAST, datacapEx.LAST],0)
s.publishEvent(sse)
execId = "23"
execBlockEntityRef = asdmEX.IDLEntityRef(execId,"X00000000","0","0")
sbId = asdmEX.IDLEntityRef(execId,"X00000000","0","0")
arrayId = "1"
time = 100
startExecBlock = datacapEx.ExecBlockStartedEvent(execBlockEntityRef,sbId,sessionId,arrayId,time)
s.publishEvent(startExecBlock)
endExecBlock = datacapEx.ExecBlockEndedEvent(execBlockEntityRef,sbId,sessionId,arrayId,datacapEx.SUCCESS,time+10)
s.publishEvent(endExecBlock)
print "All structures successfully sent!!"
s.destroyNotificationChannel()
| ACS-Community/ACS | LGPL/CommonSoftware/nctest/ws/test/pyStructureEventTest.py | Python | lgpl-2.1 | 1,355 |
from mock import *
from .gp_unittest import *
from gppylib.programs.gppkg import GpPkgProgram
import sys
class GpPkgProgramTestCase(GpTestCase):
def setUp(self):
self.mock_cmd = Mock()
self.mock_gppkg = Mock()
self.mock_uninstall_package = Mock()
self.apply_patches([
patch('gppylib.operations.package.logger', return_value=Mock(spec=['log', 'info', 'debug', 'error'])),
patch('gppylib.programs.gppkg.Command', return_value=self.mock_cmd),
patch('gppylib.programs.gppkg.Gppkg', return_value=self.mock_gppkg),
patch('gppylib.programs.gppkg.UninstallPackage', return_value=self.mock_uninstall_package),
patch('os.listdir')
])
self.mock_logger = self.get_mock_from_apply_patch('logger')
self.mock_listdir = self.get_mock_from_apply_patch('listdir')
def test__remove_raises_when_gppkg_was_not_installed(self):
sys.argv = ["gppkg", "--remove", "sample"]
get_result_mock = Mock()
get_result_mock.stdout.strip.return_value = "RPM version 4.8.0"
self.mock_listdir.return_value = ['another.gppkg']
self.mock_cmd.get_results.return_value = get_result_mock
parser = GpPkgProgram.create_parser()
options, args = parser.parse_args()
with self.assertRaisesRegex(Exception, "Package sample has not been installed"):
self.subject = GpPkgProgram(options, args)
self.subject.run()
def test__remove_succeeds_when_gppkg_had_been_installed(self):
sys.argv = ["gppkg", "--remove", "sample"]
get_result_mock = Mock()
get_result_mock.stdout.strip.return_value = "RPM version 4.8.0"
self.mock_cmd.get_results.return_value = get_result_mock
self.mock_listdir.return_value = ['sample.gppkg', 'another.gppkg', 'sample2.gppkg']
self.mock_gppkg.from_package_path.return_value = []
self.mock_uninstall_package.run.return_value = None
parser = GpPkgProgram.create_parser()
options, args = parser.parse_args()
self.subject = GpPkgProgram(options, args)
self.subject.run()
self.mock_listdir.assert_called()
self.mock_uninstall_package.run.assert_called_once()
def test__input_matches_multiple_packages(self):
sys.argv = ["gppkg", "--remove", "sampl"]
get_result_mock = Mock()
get_result_mock.stdout.strip.return_value = "RPM version 4.8.0"
self.mock_cmd.get_results.return_value = get_result_mock
self.mock_listdir.return_value = ['sample.gppkg', 'sample2.gppkg', 'another.gppkg']
self.mock_gppkg.from_package_path.return_value = []
self.mock_uninstall_package.run.return_value = None
parser = GpPkgProgram.create_parser()
options, args = parser.parse_args()
self.subject = GpPkgProgram(options, args)
with self.assertRaisesRegex(Exception, "Remove request 'sampl' too broad. "
"Multiple packages match remove request: \( sample.gppkg, sample2.gppkg \)."):
self.subject.run()
self.assertFalse(self.mock_uninstall_package.run.called)
def test__input_exact_match_when_wildcard_would_have_more(self):
sys.argv = ["gppkg", "--remove", "sample"]
get_result_mock = Mock()
get_result_mock.stdout.strip.return_value = "RPM version 4.8.0"
self.mock_cmd.get_results.return_value = get_result_mock
self.mock_listdir.return_value = ['sample.gppkg', 'sample2.gppkg', 'another.gppkg']
self.mock_gppkg.from_package_path.return_value = []
self.mock_uninstall_package.run.return_value = None
parser = GpPkgProgram.create_parser()
options, args = parser.parse_args()
self.subject = GpPkgProgram(options, args)
self.subject.run()
self.mock_listdir.assert_called()
self.mock_uninstall_package.run.assert_called_once()
| 50wu/gpdb | gpMgmt/bin/gppylib/test/unit/test_unit_gppkg.py | Python | apache-2.0 | 3,975 |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from datetime import date
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_date04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {'xl/charts/chart1.xml': ['<c:formatCode']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
date_format = workbook.add_format({'num_format': 14})
chart.axis_ids = [51761152, 51762688]
worksheet.set_column('A:A', 12)
dates = [date(2013, 1, 1),
date(2013, 1, 2),
date(2013, 1, 3),
date(2013, 1, 4),
date(2013, 1, 5),
date(2013, 1, 6),
date(2013, 1, 7),
date(2013, 1, 8),
date(2013, 1, 9),
date(2013, 1, 10)]
values = [10, 30, 20, 40, 20, 60, 50, 40, 30, 30]
worksheet.write_column('A1', dates, date_format)
worksheet.write_column('B1', values)
chart.add_series({
'categories': '=Sheet1!$A$1:$A$10',
'values': '=Sheet1!$B$1:$B$10',
})
chart.set_x_axis({
'date_axis': True,
'minor_unit': 1,
'major_unit': 1,
'minor_unit_type': 'months',
'major_unit_type': 'years',
'num_format': 'dd/mm/yyyy',
'num_format_linked': True,
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| liukaijv/XlsxWriter | xlsxwriter/test/comparison/test_chart_date04.py | Python | bsd-2-clause | 2,223 |
""" DIRECT Nine DoF Manipulation Panel """
from direct.showbase.DirectObject import DirectObject
from direct.directtools.DirectGlobals import *
from direct.tkwidgets.AppShell import AppShell
from direct.tkwidgets.Dial import AngleDial
from direct.tkwidgets.Floater import Floater
from Tkinter import Button, Menubutton, Menu, StringVar
from pandac.PandaModules import *
import Tkinter, Pmw
"""
TODO:
Task to monitor pose
"""
class Placer(AppShell):
# Override class variables here
appname = 'Placer Panel'
frameWidth = 625
frameHeight = 215
usecommandarea = 0
usestatusarea = 0
def __init__(self, parent = None, **kw):
INITOPT = Pmw.INITOPT
optiondefs = (
('title', self.appname, None),
('nodePath', SEditor.camera, None),
)
self.defineoptions(kw, optiondefs)
# Call superclass initialization function
AppShell.__init__(self)
self.initialiseoptions(Placer)
# Accept the message from sceneEditor to update the information about the target nodePath
self.accept('placerUpdate', self.updatePlacer)
def appInit(self):
# Initialize state
self.tempCS = SEditor.group.attachNewNode('placerTempCS')
self.orbitFromCS = SEditor.group.attachNewNode(
'placerOrbitFromCS')
self.orbitToCS = SEditor.group.attachNewNode('placerOrbitToCS')
self.refCS = self.tempCS
# Dictionary keeping track of all node paths manipulated so far
self.nodePathDict = {}
self.nodePathDict['camera'] = SEditor.camera
self.nodePathDict['widget'] = SEditor.widget
self.nodePathNames = ['camera', 'widget', 'selected']
self.refNodePathDict = {}
self.refNodePathDict['parent'] = self['nodePath'].getParent()
self.refNodePathDict['render'] = render
self.refNodePathDict['camera'] = SEditor.camera
self.refNodePathDict['widget'] = SEditor.widget
self.refNodePathNames = ['parent', 'self', 'render',
'camera', 'widget', 'selected']
# Initial state
self.initPos = Vec3(0)
self.initHpr = Vec3(0)
self.initScale = Vec3(1)
self.deltaHpr = Vec3(0)
# Offset for orbital mode
self.posOffset = Vec3(0)
# Set up event hooks
self.undoEvents = [('DIRECT_undo', self.undoHook),
('DIRECT_pushUndo', self.pushUndoHook),
('DIRECT_undoListEmpty', self.undoListEmptyHook),
('DIRECT_redo', self.redoHook),
('DIRECT_pushRedo', self.pushRedoHook),
('DIRECT_redoListEmpty', self.redoListEmptyHook)]
for event, method in self.undoEvents:
self.accept(event, method)
# Init movement mode
self.movementMode = 'Relative To:'
def createInterface(self):
# The interior of the toplevel panel
interior = self.interior()
interior['relief'] = Tkinter.FLAT
# Add placer commands to menubar
self.menuBar.addmenu('Placer', 'Placer Panel Operations')
self.menuBar.addmenuitem('Placer', 'command',
'Zero Node Path',
label = 'Zero All',
command = self.zeroAll)
self.menuBar.addmenuitem('Placer', 'command',
'Reset Node Path',
label = 'Reset All',
command = self.resetAll)
self.menuBar.addmenuitem('Placer', 'command',
'Print Node Path Info',
label = 'Print Info',
command = self.printNodePathInfo)
self.menuBar.addmenuitem(
'Placer', 'command',
'Toggle widget visability',
label = 'Toggle Widget Vis',
command = SEditor.toggleWidgetVis)
self.menuBar.addmenuitem(
'Placer', 'command',
'Toggle widget manipulation mode',
label = 'Toggle Widget Mode',
command = SEditor.manipulationControl.toggleObjectHandlesMode)
# Get a handle to the menu frame
menuFrame = self.menuFrame
self.nodePathMenu = Pmw.ComboBox(
menuFrame, labelpos = Tkinter.W, label_text = 'Node Path:',
entry_width = 20,
selectioncommand = self.selectNodePathNamed,
scrolledlist_items = self.nodePathNames)
self.nodePathMenu.selectitem('selected')
self.nodePathMenuEntry = (
self.nodePathMenu.component('entryfield_entry'))
self.nodePathMenuBG = (
self.nodePathMenuEntry.configure('background')[3])
self.nodePathMenu.pack(side = 'left', fill = 'x', expand = 1)
self.bind(self.nodePathMenu, 'Select node path to manipulate')
modeMenu = Pmw.OptionMenu(menuFrame,
items = ('Relative To:',
'Orbit:'),
initialitem = 'Relative To:',
command = self.setMovementMode,
menubutton_width = 8)
modeMenu.pack(side = 'left', expand = 0)
self.bind(modeMenu, 'Select manipulation mode')
self.refNodePathMenu = Pmw.ComboBox(
menuFrame, entry_width = 16,
selectioncommand = self.selectRefNodePathNamed,
scrolledlist_items = self.refNodePathNames)
self.refNodePathMenu.selectitem('parent')
self.refNodePathMenuEntry = (
self.refNodePathMenu.component('entryfield_entry'))
self.refNodePathMenu.pack(side = 'left', fill = 'x', expand = 1)
self.bind(self.refNodePathMenu, 'Select relative node path')
self.undoButton = Button(menuFrame, text = 'Undo',
command = SEditor.undo)
if SEditor.undoList:
self.undoButton['state'] = 'normal'
else:
self.undoButton['state'] = 'disabled'
self.undoButton.pack(side = 'left', expand = 0)
self.bind(self.undoButton, 'Undo last operation')
self.redoButton = Button(menuFrame, text = 'Redo',
command = SEditor.redo)
if SEditor.redoList:
self.redoButton['state'] = 'normal'
else:
self.redoButton['state'] = 'disabled'
self.redoButton.pack(side = 'left', expand = 0)
self.bind(self.redoButton, 'Redo last operation')
# Create and pack the Pos Controls
posGroup = Pmw.Group(interior,
tag_pyclass = Menubutton,
tag_text = 'Position',
tag_font=('MSSansSerif', 14),
tag_activebackground = '#909090',
ring_relief = Tkinter.RIDGE)
posMenubutton = posGroup.component('tag')
self.bind(posMenubutton, 'Position menu operations')
posMenu = Menu(posMenubutton, tearoff = 0)
posMenu.add_command(label = 'Set to zero', command = self.zeroPos)
posMenu.add_command(label = 'Reset initial',
command = self.resetPos)
posMenubutton['menu'] = posMenu
posGroup.pack(side='left', fill = 'both', expand = 1)
posInterior = posGroup.interior()
# Create the dials
self.posX = self.createcomponent('posX', (), None,
Floater, (posInterior,),
text = 'X', relief = Tkinter.FLAT,
value = 0.0,
label_foreground = 'Red')
self.posX['commandData'] = ['x']
self.posX['preCallback'] = self.xformStart
self.posX['postCallback'] = self.xformStop
self.posX['callbackData'] = ['x']
self.posX.pack(expand=1,fill='both')
self.posY = self.createcomponent('posY', (), None,
Floater, (posInterior,),
text = 'Y', relief = Tkinter.FLAT,
value = 0.0,
label_foreground = '#00A000')
self.posY['commandData'] = ['y']
self.posY['preCallback'] = self.xformStart
self.posY['postCallback'] = self.xformStop
self.posY['callbackData'] = ['y']
self.posY.pack(expand=1,fill='both')
self.posZ = self.createcomponent('posZ', (), None,
Floater, (posInterior,),
text = 'Z', relief = Tkinter.FLAT,
value = 0.0,
label_foreground = 'Blue')
self.posZ['commandData'] = ['z']
self.posZ['preCallback'] = self.xformStart
self.posZ['postCallback'] = self.xformStop
self.posZ['callbackData'] = ['z']
self.posZ.pack(expand=1,fill='both')
# Create and pack the Hpr Controls
hprGroup = Pmw.Group(interior,
tag_pyclass = Menubutton,
tag_text = 'Orientation',
tag_font=('MSSansSerif', 14),
tag_activebackground = '#909090',
ring_relief = Tkinter.RIDGE)
hprMenubutton = hprGroup.component('tag')
self.bind(hprMenubutton, 'Orientation menu operations')
hprMenu = Menu(hprMenubutton, tearoff = 0)
hprMenu.add_command(label = 'Set to zero', command = self.zeroHpr)
hprMenu.add_command(label = 'Reset initial', command = self.resetHpr)
hprMenubutton['menu'] = hprMenu
hprGroup.pack(side='left',fill = 'both', expand = 1)
hprInterior = hprGroup.interior()
# Create the dials
self.hprH = self.createcomponent('hprH', (), None,
AngleDial, (hprInterior,),
style = 'mini',
text = 'H', value = 0.0,
relief = Tkinter.FLAT,
label_foreground = 'blue')
self.hprH['commandData'] = ['h']
self.hprH['preCallback'] = self.xformStart
self.hprH['postCallback'] = self.xformStop
self.hprH['callbackData'] = ['h']
self.hprH.pack(expand=1,fill='both')
self.hprP = self.createcomponent('hprP', (), None,
AngleDial, (hprInterior,),
style = 'mini',
text = 'P', value = 0.0,
relief = Tkinter.FLAT,
label_foreground = 'red')
self.hprP['commandData'] = ['p']
self.hprP['preCallback'] = self.xformStart
self.hprP['postCallback'] = self.xformStop
self.hprP['callbackData'] = ['p']
self.hprP.pack(expand=1,fill='both')
self.hprR = self.createcomponent('hprR', (), None,
AngleDial, (hprInterior,),
style = 'mini',
text = 'R', value = 0.0,
relief = Tkinter.FLAT,
label_foreground = '#00A000')
self.hprR['commandData'] = ['r']
self.hprR['preCallback'] = self.xformStart
self.hprR['postCallback'] = self.xformStop
self.hprR['callbackData'] = ['r']
self.hprR.pack(expand=1,fill='both')
# Create and pack the Scale Controls
# The available scaling modes
self.scalingMode = StringVar()
self.scalingMode.set('Scale Uniform')
# The scaling widgets
scaleGroup = Pmw.Group(interior,
tag_text = 'Scale Uniform',
tag_pyclass = Menubutton,
tag_font=('MSSansSerif', 14),
tag_activebackground = '#909090',
ring_relief = Tkinter.RIDGE)
self.scaleMenubutton = scaleGroup.component('tag')
self.bind(self.scaleMenubutton, 'Scale menu operations')
self.scaleMenubutton['textvariable'] = self.scalingMode
# Scaling menu
scaleMenu = Menu(self.scaleMenubutton, tearoff = 0)
scaleMenu.add_command(label = 'Set to unity',
command = self.unitScale)
scaleMenu.add_command(label = 'Reset initial',
command = self.resetScale)
scaleMenu.add_radiobutton(label = 'Scale Free',
variable = self.scalingMode)
scaleMenu.add_radiobutton(label = 'Scale Uniform',
variable = self.scalingMode)
scaleMenu.add_radiobutton(label = 'Scale Proportional',
variable = self.scalingMode)
self.scaleMenubutton['menu'] = scaleMenu
# Pack group widgets
scaleGroup.pack(side='left',fill = 'both', expand = 1)
scaleInterior = scaleGroup.interior()
# Create the dials
self.scaleX = self.createcomponent('scaleX', (), None,
Floater, (scaleInterior,),
text = 'X Scale',
relief = Tkinter.FLAT,
min = 0.0001, value = 1.0,
resetValue = 1.0,
label_foreground = 'Red')
self.scaleX['commandData'] = ['sx']
self.scaleX['callbackData'] = ['sx']
self.scaleX['preCallback'] = self.xformStart
self.scaleX['postCallback'] = self.xformStop
self.scaleX.pack(expand=1,fill='both')
self.scaleY = self.createcomponent('scaleY', (), None,
Floater, (scaleInterior,),
text = 'Y Scale',
relief = Tkinter.FLAT,
min = 0.0001, value = 1.0,
resetValue = 1.0,
label_foreground = '#00A000')
self.scaleY['commandData'] = ['sy']
self.scaleY['callbackData'] = ['sy']
self.scaleY['preCallback'] = self.xformStart
self.scaleY['postCallback'] = self.xformStop
self.scaleY.pack(expand=1,fill='both')
self.scaleZ = self.createcomponent('scaleZ', (), None,
Floater, (scaleInterior,),
text = 'Z Scale',
relief = Tkinter.FLAT,
min = 0.0001, value = 1.0,
resetValue = 1.0,
label_foreground = 'Blue')
self.scaleZ['commandData'] = ['sz']
self.scaleZ['callbackData'] = ['sz']
self.scaleZ['preCallback'] = self.xformStart
self.scaleZ['postCallback'] = self.xformStop
self.scaleZ.pack(expand=1,fill='both')
# Make sure appropriate labels are showing
self.setMovementMode('Relative To:')
# Set up placer for inital node path
self.selectNodePathNamed('init')
self.selectRefNodePathNamed('parent')
# Update place to reflect initial state
self.updatePlacer()
# Now that you're done setting up, attach commands
self.posX['command'] = self.xform
self.posY['command'] = self.xform
self.posZ['command'] = self.xform
self.hprH['command'] = self.xform
self.hprP['command'] = self.xform
self.hprR['command'] = self.xform
self.scaleX['command'] = self.xform
self.scaleY['command'] = self.xform
self.scaleZ['command'] = self.xform
### WIDGET OPERATIONS ###
def setMovementMode(self, movementMode):
# Set prefix
namePrefix = ''
self.movementMode = movementMode
if (movementMode == 'Relative To:'):
namePrefix = 'Relative '
elif (movementMode == 'Orbit:'):
namePrefix = 'Orbit '
# Update pos widgets
self.posX['text'] = namePrefix + 'X'
self.posY['text'] = namePrefix + 'Y'
self.posZ['text'] = namePrefix + 'Z'
# Update hpr widgets
if (movementMode == 'Orbit:'):
namePrefix = 'Orbit delta '
self.hprH['text'] = namePrefix + 'H'
self.hprP['text'] = namePrefix + 'P'
self.hprR['text'] = namePrefix + 'R'
# Update temp cs and initialize widgets
self.updatePlacer()
def setScalingMode(self):
if self['nodePath']:
scale = self['nodePath'].getScale()
if ((scale[0] != scale[1]) or
(scale[0] != scale[2]) or
(scale[1] != scale[2])):
self.scalingMode.set('Scale Free')
def selectNodePathNamed(self, name):
nodePath = None
if name == 'init':
nodePath = self['nodePath']
# Add Combo box entry for the initial node path
self.addNodePath(nodePath)
elif name == 'selected':
nodePath = SEditor.selected.last
# Add Combo box entry for this selected object
self.addNodePath(nodePath)
else:
nodePath = self.nodePathDict.get(name, None)
if (nodePath == None):
# See if this evaluates into a node path
try:
nodePath = eval(name)
if isinstance(nodePath, NodePath):
self.addNodePath(nodePath)
else:
# Good eval but not a node path, give up
nodePath = None
except:
# Bogus eval
nodePath = None
# Clear bogus entry from listbox
listbox = self.nodePathMenu.component('scrolledlist')
listbox.setlist(self.nodePathNames)
else:
if name == 'widget':
# Record relationship between selected nodes and widget
SEditor.selected.getWrtAll()
# Update active node path
self.setActiveNodePath(nodePath)
def setActiveNodePath(self, nodePath):
self['nodePath'] = nodePath
if self['nodePath']:
self.nodePathMenuEntry.configure(
background = self.nodePathMenuBG)
# Check to see if node path and ref node path are the same
if ((self.refCS != None) and
(self.refCS.id() == self['nodePath'].id())):
# Yes they are, use temp CS as ref
# This calls updatePlacer
self.setReferenceNodePath(self.tempCS)
# update listbox accordingly
self.refNodePathMenu.selectitem('parent')
else:
# Record initial value and initialize the widgets
self.updatePlacer()
# Record initial position
self.updateResetValues(self['nodePath'])
# Set scaling mode based on node path's current scale
self.setScalingMode()
else:
# Flash entry
self.nodePathMenuEntry.configure(background = 'Pink')
def selectRefNodePathNamed(self, name):
nodePath = None
if name == 'self':
nodePath = self.tempCS
elif name == 'selected':
nodePath = SEditor.selected.last
# Add Combo box entry for this selected object
self.addRefNodePath(nodePath)
elif name == 'parent':
nodePath = self['nodePath'].getParent()
else:
nodePath = self.refNodePathDict.get(name, None)
if (nodePath == None):
# See if this evaluates into a node path
try:
nodePath = eval(name)
if isinstance(nodePath, NodePath):
self.addRefNodePath(nodePath)
else:
# Good eval but not a node path, give up
nodePath = None
except:
# Bogus eval
nodePath = None
# Clear bogus entry from listbox
listbox = self.refNodePathMenu.component('scrolledlist')
listbox.setlist(self.refNodePathNames)
# Check to see if node path and ref node path are the same
if (nodePath != None) and (nodePath.id() == self['nodePath'].id()):
# Yes they are, use temp CS and update listbox accordingly
nodePath = self.tempCS
self.refNodePathMenu.selectitem('parent')
# Update ref node path
self.setReferenceNodePath(nodePath)
def setReferenceNodePath(self, nodePath):
self.refCS = nodePath
if self.refCS:
self.refNodePathMenuEntry.configure(
background = self.nodePathMenuBG)
# Update placer to reflect new state
self.updatePlacer()
else:
# Flash entry
self.refNodePathMenuEntry.configure(background = 'Pink')
def addNodePath(self, nodePath):
self.addNodePathToDict(nodePath, self.nodePathNames,
self.nodePathMenu, self.nodePathDict)
def addRefNodePath(self, nodePath):
self.addNodePathToDict(nodePath, self.refNodePathNames,
self.refNodePathMenu, self.refNodePathDict)
def addNodePathToDict(self, nodePath, names, menu, dict):
if not nodePath:
return
# Get node path's name
name = nodePath.getName()
if name in ['parent', 'render', 'camera']:
dictName = name
else:
# Generate a unique name for the dict
dictName = name + '-' + `nodePath.id()`
if not dict.has_key(dictName):
# Update combo box to include new item
names.append(dictName)
listbox = menu.component('scrolledlist')
listbox.setlist(names)
# Add new item to dictionary
dict[dictName] = nodePath
menu.selectitem(dictName)
def updatePlacer(self):
pos = Vec3(0)
hpr = Vec3(0)
scale = Vec3(1)
np = self['nodePath']
if (np != None) and isinstance(np, NodePath):
# Update temp CS
self.updateAuxiliaryCoordinateSystems()
# Update widgets
if self.movementMode == 'Orbit:':
pos.assign(self.posOffset)
hpr.assign(ZERO_VEC)
scale.assign(np.getScale())
elif self.refCS:
pos.assign(np.getPos(self.refCS))
hpr.assign(np.getHpr(self.refCS))
scale.assign(np.getScale())
self.updatePosWidgets(pos)
self.updateHprWidgets(hpr)
self.updateScaleWidgets(scale)
def updateAuxiliaryCoordinateSystems(self):
# Temp CS
self.tempCS.setPosHpr(self['nodePath'], 0,0,0,0,0,0)
# Orbit CS
# At reference
self.orbitFromCS.setPos(self.refCS, 0,0,0)
# But aligned with target
self.orbitFromCS.setHpr(self['nodePath'], 0,0,0)
# Also update to CS
self.orbitToCS.setPosHpr(self.orbitFromCS, 0,0,0,0,0,0)
# Get offset from origin
self.posOffset.assign(self['nodePath'].getPos(self.orbitFromCS))
### NODE PATH TRANSFORMATION OPERATIONS ###
def xform(self, value, axis):
if axis in ['sx', 'sy', 'sz']:
self.xformScale(value,axis)
elif self.movementMode == 'Relative To:':
self.xformRelative(value, axis)
elif self.movementMode == 'Orbit:':
self.xformOrbit(value, axis)
if self.nodePathMenu.get() == 'widget':
if SEditor.manipulationControl.fSetCoa:
# Update coa based on current widget position
SEditor.selected.last.mCoa2Dnp.assign(
SEditor.widget.getMat(SEditor.selected.last))
else:
# Move the objects with the widget
SEditor.selected.moveWrtWidgetAll()
def xformStart(self, data):
# Record undo point
self.pushUndo()
# If moving widget kill follow task and update wrts
if self.nodePathMenu.get() == 'widget':
taskMgr.remove('followSelectedNodePath')
# Record relationship between selected nodes and widget
SEditor.selected.getWrtAll()
# Record initial state
self.deltaHpr = self['nodePath'].getHpr(self.refCS)
# Update placer to reflect new state
self.updatePlacer()
def xformStop(self, data):
# Throw event to signal manipulation done
# Send nodepath as a list
messenger.send('DIRECT_manipulateObjectCleanup', [[self['nodePath']]])
# Update placer to reflect new state
self.updatePlacer()
# If moving widget restart follow task
if self.nodePathMenu.get() == 'widget':
# Restart followSelectedNodePath task
SEditor.manipulationControl.spawnFollowSelectedNodePathTask()
def xformRelative(self, value, axis):
nodePath = self['nodePath']
if (nodePath != None) and (self.refCS != None):
if axis == 'x':
nodePath.setX(self.refCS, value)
elif axis == 'y':
nodePath.setY(self.refCS, value)
elif axis == 'z':
nodePath.setZ(self.refCS, value)
else:
if axis == 'h':
self.deltaHpr.setX(value)
elif axis == 'p':
self.deltaHpr.setY(value)
elif axis == 'r':
self.deltaHpr.setZ(value)
# Put node path at new hpr
nodePath.setHpr(self.refCS, self.deltaHpr)
def xformOrbit(self, value, axis):
nodePath = self['nodePath']
if ((nodePath != None) and (self.refCS != None) and
(self.orbitFromCS != None) and (self.orbitToCS != None)):
if axis == 'x':
self.posOffset.setX(value)
elif axis == 'y':
self.posOffset.setY(value)
elif axis == 'z':
self.posOffset.setZ(value)
elif axis == 'h':
self.orbitToCS.setH(self.orbitFromCS, value)
elif axis == 'p':
self.orbitToCS.setP(self.orbitFromCS, value)
elif axis == 'r':
self.orbitToCS.setR(self.orbitFromCS, value)
nodePath.setPosHpr(self.orbitToCS, self.posOffset, ZERO_VEC)
def xformScale(self, value, axis):
if self['nodePath']:
mode = self.scalingMode.get()
scale = self['nodePath'].getScale()
if mode == 'Scale Free':
if axis == 'sx':
scale.setX(value)
elif axis == 'sy':
scale.setY(value)
elif axis == 'sz':
scale.setZ(value)
elif mode == 'Scale Uniform':
scale.set(value,value,value)
elif mode == 'Scale Proportional':
if axis == 'sx':
sf = value/scale[0]
elif axis == 'sy':
sf = value/scale[1]
elif axis == 'sz':
sf = value/scale[2]
scale = scale * sf
self['nodePath'].setScale(scale)
def updatePosWidgets(self, pos):
self.posX.set(pos[0])
self.posY.set(pos[1])
self.posZ.set(pos[2])
def updateHprWidgets(self, hpr):
self.hprH.set(hpr[0])
self.hprP.set(hpr[1])
self.hprR.set(hpr[2])
def updateScaleWidgets(self, scale):
self.scaleX.set(scale[0])
self.scaleY.set(scale[1])
self.scaleZ.set(scale[2])
def zeroAll(self):
self.xformStart(None)
self.updatePosWidgets(ZERO_VEC)
self.updateHprWidgets(ZERO_VEC)
self.updateScaleWidgets(UNIT_VEC)
self.xformStop(None)
def zeroPos(self):
self.xformStart(None)
self.updatePosWidgets(ZERO_VEC)
self.xformStop(None)
def zeroHpr(self):
self.xformStart(None)
self.updateHprWidgets(ZERO_VEC)
self.xformStop(None)
def unitScale(self):
self.xformStart(None)
self.updateScaleWidgets(UNIT_VEC)
self.xformStop(None)
def updateResetValues(self, nodePath):
self.initPos.assign(nodePath.getPos())
self.posX['resetValue'] = self.initPos[0]
self.posY['resetValue'] = self.initPos[1]
self.posZ['resetValue'] = self.initPos[2]
self.initHpr.assign(nodePath.getHpr())
self.hprH['resetValue'] = self.initHpr[0]
self.hprP['resetValue'] = self.initHpr[1]
self.hprR['resetValue'] = self.initHpr[2]
self.initScale.assign(nodePath.getScale())
self.scaleX['resetValue'] = self.initScale[0]
self.scaleY['resetValue'] = self.initScale[1]
self.scaleZ['resetValue'] = self.initScale[2]
def resetAll(self):
if self['nodePath']:
self.xformStart(None)
self['nodePath'].setPosHprScale(
self.initPos, self.initHpr, self.initScale)
self.xformStop(None)
def resetPos(self):
if self['nodePath']:
self.xformStart(None)
self['nodePath'].setPos(self.initPos)
self.xformStop(None)
def resetHpr(self):
if self['nodePath']:
self.xformStart(None)
self['nodePath'].setHpr(self.initHpr)
self.xformStop(None)
def resetScale(self):
if self['nodePath']:
self.xformStart(None)
self['nodePath'].setScale(self.initScale)
self.xformStop(None)
def pushUndo(self, fResetRedo = 1):
SEditor.pushUndo([self['nodePath']])
def undoHook(self, nodePathList = []):
# Reflect new changes
self.updatePlacer()
def pushUndoHook(self):
# Make sure button is reactivated
self.undoButton.configure(state = 'normal')
def undoListEmptyHook(self):
# Make sure button is deactivated
self.undoButton.configure(state = 'disabled')
def pushRedo(self):
SEditor.pushRedo([self['nodePath']])
def redoHook(self, nodePathList = []):
# Reflect new changes
self.updatePlacer()
def pushRedoHook(self):
# Make sure button is reactivated
self.redoButton.configure(state = 'normal')
def redoListEmptyHook(self):
# Make sure button is deactivated
self.redoButton.configure(state = 'disabled')
def printNodePathInfo(self):
np = self['nodePath']
if np:
name = np.getName()
pos = np.getPos()
hpr = np.getHpr()
scale = np.getScale()
posString = '%.2f, %.2f, %.2f' % (pos[0], pos[1], pos[2])
hprString = '%.2f, %.2f, %.2f' % (hpr[0], hpr[1], hpr[2])
scaleString = '%.2f, %.2f, %.2f' % (scale[0], scale[1], scale[2])
print 'NodePath: %s' % name
print 'Pos: %s' % posString
print 'Hpr: %s' % hprString
print 'Scale: %s' % scaleString
print ('%s.setPosHprScale(%s, %s, %s)' %
(name, posString, hprString, scaleString))
def onDestroy(self, event):
# Remove hooks
for event, method in self.undoEvents:
self.ignore(event)
self.tempCS.removeNode()
self.orbitFromCS.removeNode()
self.orbitToCS.removeNode()
# send out a message to let sceneEditor know that placer panel has been closed.
# Also, stop accepting the updata message from sceneEditor
messenger.send('Placer_close')
self.ignore('placerUpdate')
def place(nodePath):
return Placer(nodePath = nodePath)
######################################################################
# Create demo in root window for testing.
if __name__ == '__main__':
root = Pmw.initialise()
widget = Placer()
| hj3938/panda3d | contrib/src/sceneeditor/sePlacer.py | Python | bsd-3-clause | 33,063 |
Subsets and Splits