repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
jhawkesworth/ansible | lib/ansible/modules/remote_management/oneview/oneview_logical_interconnect_group_facts.py | 115 | 3101 | #!/usr/bin/python
# Copyright: (c) 2016-2017, Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_logical_interconnect_group_facts
short_description: Retrieve facts about one or more of the OneView Logical Interconnect Groups
description:
- Retrieve facts about one or more of the Logical Interconnect Groups from OneView
version_added: "2.5"
requirements:
- hpOneView >= 2.0.1
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
name:
description:
- Logical Interconnect Group name.
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Logical Interconnect Groups
oneview_logical_interconnect_group_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 500
no_log: true
delegate_to: localhost
- debug: var=logical_interconnect_groups
- name: Gather paginated, filtered and sorted facts about Logical Interconnect Groups
oneview_logical_interconnect_group_facts:
params:
start: 0
count: 3
sort: name:descending
filter: name=LIGName
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 500
no_log: true
delegate_to: localhost
- debug: var=logical_interconnect_groups
- name: Gather facts about a Logical Interconnect Group by name
oneview_logical_interconnect_group_facts:
name: logical lnterconnect group name
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 500
no_log: true
delegate_to: localhost
- debug: var=logical_interconnect_groups
'''
RETURN = '''
logical_interconnect_groups:
description: Has all the OneView facts about the Logical Interconnect Groups.
returned: Always, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class LogicalInterconnectGroupFactsModule(OneViewModuleBase):
def __init__(self):
argument_spec = dict(
name=dict(type='str'),
params=dict(type='dict'),
)
super(LogicalInterconnectGroupFactsModule, self).__init__(additional_arg_spec=argument_spec)
def execute_module(self):
if self.module.params.get('name'):
ligs = self.oneview_client.logical_interconnect_groups.get_by('name', self.module.params['name'])
else:
ligs = self.oneview_client.logical_interconnect_groups.get_all(**self.facts_params)
return dict(changed=False, ansible_facts=dict(logical_interconnect_groups=ligs))
def main():
LogicalInterconnectGroupFactsModule().run()
if __name__ == '__main__':
main()
| gpl-3.0 |
ganeti-github-testing/ganeti-test-1 | test/py/ganeti.luxi_unittest.py | 9 | 1635 | #!/usr/bin/python
#
# Copyright (C) 2010, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for unittesting the luxi module.
Currently empty (after all the tests moved to ganeti.rpc.client_unittest.py)."""
import unittest
from ganeti import constants
from ganeti import errors
from ganeti import luxi
from ganeti import serializer
import testutils
| bsd-2-clause |
weihautin/anki | aqt/forms/preview.py | 1 | 1921 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'designer/preview.ui'
#
# Created: Fri Aug 22 00:57:31 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(335, 282)
self.verticalLayout_3 = QtGui.QVBoxLayout(Form)
self.verticalLayout_3.setMargin(0)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.frontPrevBox = QtGui.QVBoxLayout(self.groupBox)
self.frontPrevBox.setMargin(0)
self.frontPrevBox.setObjectName(_fromUtf8("frontPrevBox"))
self.verticalLayout_3.addWidget(self.groupBox)
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.backPrevBox = QtGui.QVBoxLayout(self.groupBox_2)
self.backPrevBox.setMargin(0)
self.backPrevBox.setObjectName(_fromUtf8("backPrevBox"))
self.verticalLayout_3.addWidget(self.groupBox_2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_("Form"))
self.groupBox.setTitle(_("Front Preview"))
self.groupBox_2.setTitle(_("Back Preview"))
| agpl-3.0 |
carljm/django | tests/proxy_models/tests.py | 20 | 15823 | from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.auth.models import User as AuthUser
from django.contrib.contenttypes.models import ContentType
from django.core import checks, management
from django.db import DEFAULT_DB_ALIAS, models
from django.db.models import signals
from django.test import TestCase, override_settings
from django.test.utils import isolate_apps
from django.urls import reverse
from .admin import admin as force_admin_model_registration # NOQA
from .models import (
Abstract, BaseUser, Bug, Country, Improvement, Issue, LowerStatusPerson,
MultiUserProxy, MyPerson, MyPersonProxy, OtherPerson, Person, ProxyBug,
ProxyImprovement, ProxyProxyBug, ProxyTrackerUser, State, StateProxy,
StatusPerson, TrackerUser, User, UserProxy, UserProxyProxy,
)
class ProxyModelTests(TestCase):
def test_same_manager_queries(self):
"""
The MyPerson model should be generating the same database queries as
the Person model (when the same manager is used in each case).
"""
my_person_sql = MyPerson.other.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
person_sql = Person.objects.order_by("name").query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertEqual(my_person_sql, person_sql)
def test_inheritance_new_table(self):
"""
The StatusPerson models should have its own table (it's using ORM-level
inheritance).
"""
sp_sql = StatusPerson.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
p_sql = Person.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertNotEqual(sp_sql, p_sql)
def test_basic_proxy(self):
"""
Creating a Person makes them accessible through the MyPerson proxy.
"""
person = Person.objects.create(name="Foo McBar")
self.assertEqual(len(Person.objects.all()), 1)
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(MyPerson.objects.get(name="Foo McBar").id, person.id)
self.assertFalse(MyPerson.objects.get(id=person.id).has_special_name())
def test_no_proxy(self):
"""
Person is not proxied by StatusPerson subclass.
"""
Person.objects.create(name="Foo McBar")
self.assertEqual(list(StatusPerson.objects.all()), [])
def test_basic_proxy_reverse(self):
"""
A new MyPerson also shows up as a standard Person.
"""
MyPerson.objects.create(name="Bazza del Frob")
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(len(Person.objects.all()), 1)
LowerStatusPerson.objects.create(status="low", name="homer")
lsps = [lsp.name for lsp in LowerStatusPerson.objects.all()]
self.assertEqual(lsps, ["homer"])
def test_correct_type_proxy_of_proxy(self):
"""
Correct type when querying a proxy of proxy
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
pp = sorted(mpp.name for mpp in MyPersonProxy.objects.all())
self.assertEqual(pp, ['Bazza del Frob', 'Foo McBar', 'homer'])
def test_proxy_included_in_ancestors(self):
"""
Proxy models are included in the ancestors for a model's DoesNotExist
and MultipleObjectsReturned
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
with self.assertRaises(Person.DoesNotExist):
MyPersonProxy.objects.get(name='Zathras')
with self.assertRaises(Person.MultipleObjectsReturned):
MyPersonProxy.objects.get(id__lt=max_id + 1)
with self.assertRaises(Person.DoesNotExist):
StatusPerson.objects.get(name='Zathras')
StatusPerson.objects.create(name='Bazza Jr.')
StatusPerson.objects.create(name='Foo Jr.')
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
with self.assertRaises(Person.MultipleObjectsReturned):
StatusPerson.objects.get(id__lt=max_id + 1)
def test_abstract_base_with_model_fields(self):
msg = "Abstract base class containing model fields not permitted for proxy model 'NoAbstract'."
with self.assertRaisesMessage(TypeError, msg):
class NoAbstract(Abstract):
class Meta:
proxy = True
def test_too_many_concrete_classes(self):
msg = "Proxy model 'TooManyBases' has more than one non-abstract model base class."
with self.assertRaisesMessage(TypeError, msg):
class TooManyBases(User, Person):
class Meta:
proxy = True
def test_no_base_classes(self):
msg = "Proxy model 'NoBaseClasses' has no non-abstract model base class."
with self.assertRaisesMessage(TypeError, msg):
class NoBaseClasses(models.Model):
class Meta:
proxy = True
@isolate_apps('proxy_models')
def test_new_fields(self):
class NoNewFields(Person):
newfield = models.BooleanField()
class Meta:
proxy = True
errors = NoNewFields.check()
expected = [
checks.Error(
"Proxy model 'NoNewFields' contains model fields.",
id='models.E017',
)
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPABLE_MODEL='proxy_models.AlternateModel')
@isolate_apps('proxy_models')
def test_swappable(self):
class SwappableModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class AlternateModel(models.Model):
pass
# You can't proxy a swapped model
with self.assertRaises(TypeError):
class ProxyModel(SwappableModel):
class Meta:
proxy = True
def test_myperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in MyPerson.objects.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in MyPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'fred'])
def test_otherperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in OtherPerson.objects.all()]
self.assertEqual(resp, ['barney', 'wilma'])
resp = [p.name for p in OtherPerson.excluder.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in OtherPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'wilma'])
def test_permissions_created(self):
from django.contrib.auth.models import Permission
Permission.objects.get(name="May display users information")
def test_proxy_model_signals(self):
"""
Test save signals for proxy models
"""
output = []
def make_handler(model, event):
def _handler(*args, **kwargs):
output.append('%s %s save' % (model, event))
return _handler
h1 = make_handler('MyPerson', 'pre')
h2 = make_handler('MyPerson', 'post')
h3 = make_handler('Person', 'pre')
h4 = make_handler('Person', 'post')
signals.pre_save.connect(h1, sender=MyPerson)
signals.post_save.connect(h2, sender=MyPerson)
signals.pre_save.connect(h3, sender=Person)
signals.post_save.connect(h4, sender=Person)
MyPerson.objects.create(name="dino")
self.assertEqual(output, [
'MyPerson pre save',
'MyPerson post save'
])
output = []
h5 = make_handler('MyPersonProxy', 'pre')
h6 = make_handler('MyPersonProxy', 'post')
signals.pre_save.connect(h5, sender=MyPersonProxy)
signals.post_save.connect(h6, sender=MyPersonProxy)
MyPersonProxy.objects.create(name="pebbles")
self.assertEqual(output, [
'MyPersonProxy pre save',
'MyPersonProxy post save'
])
signals.pre_save.disconnect(h1, sender=MyPerson)
signals.post_save.disconnect(h2, sender=MyPerson)
signals.pre_save.disconnect(h3, sender=Person)
signals.post_save.disconnect(h4, sender=Person)
signals.pre_save.disconnect(h5, sender=MyPersonProxy)
signals.post_save.disconnect(h6, sender=MyPersonProxy)
def test_content_type(self):
ctype = ContentType.objects.get_for_model
self.assertIs(ctype(Person), ctype(OtherPerson))
def test_user_proxy_models(self):
User.objects.create(name='Bruce')
resp = [u.name for u in User.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxyProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
self.assertEqual([u.name for u in MultiUserProxy.objects.all()], ['Bruce'])
def test_proxy_for_model(self):
self.assertEqual(UserProxy, UserProxyProxy._meta.proxy_for_model)
def test_concrete_model(self):
self.assertEqual(User, UserProxyProxy._meta.concrete_model)
def test_proxy_delete(self):
"""
Proxy objects can be deleted
"""
User.objects.create(name='Bruce')
u2 = UserProxy.objects.create(name='George')
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce', 'George'])
u2.delete()
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_select_related(self):
"""
We can still use `select_related()` to include related models in our
querysets.
"""
country = Country.objects.create(name='Australia')
State.objects.create(name='New South Wales', country=country)
resp = [s.name for s in State.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
resp = [s.name for s in StateProxy.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
self.assertEqual(StateProxy.objects.get(name='New South Wales').name, 'New South Wales')
resp = StateProxy.objects.select_related().get(name='New South Wales')
self.assertEqual(resp.name, 'New South Wales')
def test_filter_proxy_relation_reverse(self):
tu = TrackerUser.objects.create(name='Contributor', status='contrib')
ptu = ProxyTrackerUser.objects.get()
issue = Issue.objects.create(assignee=tu)
self.assertEqual(tu.issues.get(), issue)
self.assertEqual(ptu.issues.get(), issue)
self.assertSequenceEqual(TrackerUser.objects.filter(issues=issue), [tu])
self.assertSequenceEqual(ProxyTrackerUser.objects.filter(issues=issue), [ptu])
def test_proxy_bug(self):
contributor = ProxyTrackerUser.objects.create(name='Contributor', status='contrib')
someone = BaseUser.objects.create(name='Someone')
Bug.objects.create(summary='fix this', version='1.1beta', assignee=contributor, reporter=someone)
pcontributor = ProxyTrackerUser.objects.create(name='OtherContributor', status='proxy')
Improvement.objects.create(
summary='improve that', version='1.1beta',
assignee=contributor, reporter=pcontributor,
associated_bug=ProxyProxyBug.objects.all()[0],
)
# Related field filter on proxy
resp = ProxyBug.objects.get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Select related + filter on proxy
resp = ProxyBug.objects.select_related().get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Proxy of proxy, select_related + filter
resp = ProxyProxyBug.objects.select_related().get(
version__icontains='beta'
)
self.assertEqual(repr(resp), '<ProxyProxyBug: ProxyProxyBug:fix this>')
# Select related + filter on a related proxy field
resp = ProxyImprovement.objects.select_related().get(
reporter__name__icontains='butor'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
# Select related + filter on a related proxy of proxy field
resp = ProxyImprovement.objects.select_related().get(
associated_bug__summary__icontains='fix'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
def test_proxy_load_from_fixture(self):
management.call_command('loaddata', 'mypeople.json', verbosity=0)
p = MyPerson.objects.get(pk=100)
self.assertEqual(p.name, 'Elvis Presley')
def test_eq(self):
self.assertEqual(MyPerson(id=100), Person(id=100))
@override_settings(ROOT_URLCONF='proxy_models.urls')
class ProxyModelAdminTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = AuthUser.objects.create(is_superuser=True, is_staff=True)
cls.tu1 = ProxyTrackerUser.objects.create(name='Django Pony', status='emperor')
cls.i1 = Issue.objects.create(summary="Pony's Issue", assignee=cls.tu1)
def test_cascade_delete_proxy_model_admin_warning(self):
"""
Test if admin gives warning about cascade deleting models referenced
to concrete model by deleting proxy object.
"""
tracker_user = TrackerUser.objects.all()[0]
base_user = BaseUser.objects.all()[0]
issue = Issue.objects.all()[0]
with self.assertNumQueries(6):
collector = admin.utils.NestedObjects('default')
collector.collect(ProxyTrackerUser.objects.all())
self.assertIn(tracker_user, collector.edges.get(None, ()))
self.assertIn(base_user, collector.edges.get(None, ()))
self.assertIn(issue, collector.edges.get(tracker_user, ()))
def test_delete_str_in_model_admin(self):
"""
Test if the admin delete page shows the correct string representation
for a proxy model.
"""
user = TrackerUser.objects.get(name='Django Pony')
proxy = ProxyTrackerUser.objects.get(name='Django Pony')
user_str = 'Tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_trackeruser_change', args=(user.pk,)), user
)
proxy_str = 'Proxy tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_proxytrackeruser_change', args=(proxy.pk,)), proxy
)
self.client.force_login(self.superuser)
response = self.client.get(reverse('admin_proxy:proxy_models_trackeruser_delete', args=(user.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, user_str)
response = self.client.get(reverse('admin_proxy:proxy_models_proxytrackeruser_delete', args=(proxy.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, proxy_str)
| bsd-3-clause |
catapult-project/catapult | third_party/google-endpoints/endpoints/protojson.py | 7 | 3743 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Endpoints-specific implementation of ProtoRPC's ProtoJson class."""
import base64
from protorpc import messages
from protorpc import protojson
# pylint: disable=g-bad-name
__all__ = ['EndpointsProtoJson']
class EndpointsProtoJson(protojson.ProtoJson):
"""Endpoints-specific implementation of ProtoRPC's ProtoJson class.
We need to adjust the way some types of data are encoded to ensure they're
consistent with the existing API pipeline. This class adjusts the JSON
encoding as needed.
This may be used in a multithreaded environment, so take care to ensure
that this class (and its parent, protojson.ProtoJson) remain thread-safe.
"""
def encode_field(self, field, value):
"""Encode a python field value to a JSON value.
Args:
field: A ProtoRPC field instance.
value: A python value supported by field.
Returns:
A JSON serializable value appropriate for field.
"""
# Override the handling of 64-bit integers, so they're always encoded
# as strings.
if (isinstance(field, messages.IntegerField) and
field.variant in (messages.Variant.INT64,
messages.Variant.UINT64,
messages.Variant.SINT64)):
if value not in (None, [], ()):
# Convert and replace the value.
if isinstance(value, list):
value = [str(subvalue) for subvalue in value]
else:
value = str(value)
return value
return super(EndpointsProtoJson, self).encode_field(field, value)
@staticmethod
def __pad_value(value, pad_len_multiple, pad_char):
"""Add padding characters to the value if needed.
Args:
value: The string value to be padded.
pad_len_multiple: Pad the result so its length is a multiple
of pad_len_multiple.
pad_char: The character to use for padding.
Returns:
The string value with padding characters added.
"""
assert pad_len_multiple > 0
assert len(pad_char) == 1
padding_length = (pad_len_multiple -
(len(value) % pad_len_multiple)) % pad_len_multiple
return value + pad_char * padding_length
def decode_field(self, field, value):
"""Decode a JSON value to a python value.
Args:
field: A ProtoRPC field instance.
value: A serialized JSON value.
Returns:
A Python value compatible with field.
"""
# Override BytesField handling. Client libraries typically use a url-safe
# encoding. b64decode doesn't handle these gracefully. urlsafe_b64decode
# handles both cases safely. Also add padding if the padding is incorrect.
if isinstance(field, messages.BytesField):
try:
# Need to call str(value) because ProtoRPC likes to pass values
# as unicode, and urlsafe_b64decode can only handle bytes.
padded_value = self.__pad_value(str(value), 4, '=')
return base64.urlsafe_b64decode(padded_value)
except (TypeError, UnicodeEncodeError), err:
raise messages.DecodeError('Base64 decoding error: %s' % err)
return super(EndpointsProtoJson, self).decode_field(field, value)
| bsd-3-clause |
inspirehep/beard-server | tests/test_celery.py | 1 | 8690 | # -*- coding: utf-8 -*-
#
# This file is part of Inspire.
# Copyright (C) 2016 CERN.
#
# Inspire is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Inspire is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Inspire; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Test Celery tasks."""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import pytest
@pytest.mark.xfail(reason="Need to rebuild linkage.dat with scikit-learn-18.x")
def test_make_clusters_new_signatures():
"""Test if signatures will be allocated in bucket for new signatures.
This test checks if new signatures will be clustered together,
and then put in the second bucket, which contains new signatures.
"""
from beard_server.tasks import make_clusters
records = [
{
"title": "Towards graphene-based detectors for" +
" dark matter directional detection",
"year": 2015,
"publication_id": 1395222,
"authors": [
"Wang, Shang-Yung"
]
}, {
"title": "Induced Einstein-Kalb-Ramond theory and" +
" the black hole",
"year": 1996,
"publication_id": 428605,
"authors": [
"Kao, W.F.",
"Chyi, Tzuu-Kang",
"Dai, W.B.",
"Wang, Shang-Yung",
"Lin, Shih-Yuun"
]
}
]
signatures = [
{
"author_affiliation": "Taiwan, Natl. Chiao Tung U.",
"author_name": "Wang, Shang-Yung",
"publication_id": 1395222,
"signature_id": "Wang_1395222",
},
{
"author_affiliation": "Taiwan, Natl. Chiao Tung U.",
"author_name": "Wang, Shang-Yung",
"publication_id": 428605,
"signature_id": "Wang_428605",
},
{
"author_affiliation": "Taiwan, Natl. Chiao Tung U.",
"author_name": "Lin, Shih-Yuin",
"publication_id": 428605,
"signature_id": "Lin_428605",
}
]
# Bucket with new clusters only.
result = ({}, {'0': ['Lin_428605'],
'1': ['Wang_1395222', 'Wang_428605']})
assert make_clusters(records, signatures) == result
@pytest.mark.xfail(reason="Need to rebuild linkage.dat with scikit-learn-18.x")
def test_make_clusters_profile_exists():
"""Test if signatures will be allocated in bucket for matched
signatures.
This test checks if signatures, which already have profiles
will be clustered together, and then put in the first bucket,
which contains signatures matched by recid and Beard together.
"""
from beard_server.tasks import make_clusters
records = [
{
"title": "Towards graphene-based detectors for" +
" dark matter directional detection",
"year": 2015,
"publication_id": 1395222,
"authors": [
"Wang, Shang-Yung"
]
}, {
"title": "Induced Einstein-Kalb-Ramond theory and" +
" the black hole",
"year": 1996,
"publication_id": 428605,
"authors": [
"Kao, W.F.",
"Chyi, Tzuu-Kang",
"Dai, W.B.",
"Wang, Shang-Yung",
"Lin, Shih-Yuun"
]
}
]
signatures = [
{
"author_affiliation": "Taiwan, Natl. Chiao Tung U.",
"author_name": "Wang, Shang-Yung",
"publication_id": 1395222,
"signature_id": "Wang_1395222",
"author_recid": "A",
},
{
"author_affiliation": "Taiwan, Natl. Chiao Tung U.",
"author_name": "Wang, Shang-Yung",
"publication_id": 428605,
"signature_id": "Wang_428605",
"author_recid": "A",
},
{
"author_affiliation": "Taiwan, Natl. Chiao Tung U.",
"author_name": "Lin, Shih-Yuin",
"publication_id": 428605,
"signature_id": "Lin_428605",
"author_recid": "B",
}
]
# Bucket with old clusters only.
result = ({'A': ['Wang_1395222', 'Wang_428605'], 'B': ['Lin_428605']},
{})
assert make_clusters(records, signatures) == result
@pytest.mark.xfail(reason="Need to rebuild linkage.dat with scikit-learn-18.x")
def test_make_clusters_profile_exists_new_arrives():
"""Test if signatures will be allocated in buckets for matched
and new signatures.
This test checks what will happen if a system already has some
signatures with profiles assigned to them and a new signature
is arriving. The output should show two signatures clustered together
in "old" bucket and a new signature in the bucket for new data.
"""
from beard_server.tasks import make_clusters
records = [
{
"title": "Towards graphene-based detectors for" +
" dark matter directional detection",
"year": 2015,
"publication_id": 1395222,
"authors": [
"Wang, Shang-Yung"
]
}, {
"title": "Induced Einstein-Kalb-Ramond theory and" +
" the black hole",
"year": 1996,
"publication_id": 428605,
"authors": [
"Kao, W.F.",
"Chyi, Tzuu-Kang",
"Dai, W.B.",
"Wang, Shang-Yung",
"Lin, Shih-Yuun"
]
}
]
signatures = [
{
"author_affiliation": "Taiwan, Natl. Chiao Tung U.",
"author_name": "Wang, Shang-Yung",
"publication_id": 1395222,
"signature_id": "Wang_1395222",
"author_recid": "A",
},
{
"author_affiliation": "Taiwan, Natl. Chiao Tung U.",
"author_name": "Wang, Shang-Yung",
"publication_id": 428605,
"signature_id": "Wang_428605",
"author_recid": "A",
},
{
"author_affiliation": "Taiwan, Natl. Chiao Tung U.",
"author_name": "Lin, Shih-Yuin",
"publication_id": 428605,
"signature_id": "Lin_428605",
}
]
# Bucket with old clusters only.
result = ({'A': ['Wang_1395222', 'Wang_428605']},
{'0': ['Lin_428605']})
assert make_clusters(records, signatures) == result
@pytest.mark.xfail(reason="Need to rebuild linkage.dat with scikit-learn-18.x")
def test_conflict_resolver():
"""This methods checks conflict resolver."""
from beard_server.tasks import solve_conflicts
claimed_signatures = [{
'author_affiliation': 'Peking U.',
'publication_id': '13c3cca8-b0bf-42f5-90d4-e3dfcced0511',
'author_name': 'Wang, Yi-Nan',
'signature_id': 'd63537a8-1df4-4436-b5ed-224da5b5028c',
'publication': {
'collaboration': False,
'authors': ['Hohm, Olaf', 'Wang, Yi-Nan'],
'publication_id': '13c3cca8-b0bf-42f5-90d4-e3dfcced0511',
'year': '2015'
}}
]
not_claimed_signatures = [{
'author_affiliation': 'Peking U.',
'publication_id': '1a16f6ba-7428-479c-8ff6-274d699e3f7b',
'author_name': 'Wang, Yi-Nan',
'signature_id': 'fcf53cb9-2d19-433b-b735-f6c1de9a6d57',
'publication': {
'collaboration': False,
'authors': ['Washington Taylor', 'Yi-Nan Wang'],
'publication_id': '1a16f6ba-7428-479c-8ff6-274d699e3f7b',
'year': '2015'
}}
]
result = {
'd63537a8-1df4-4436-b5ed-224da5b5028c': [
'fcf53cb9-2d19-433b-b735-f6c1de9a6d57'
]
}
assert solve_conflicts(
claimed_signatures, not_claimed_signatures) == result
| gpl-2.0 |
sameerparekh/pants | tests/python/pants_test/android/tasks/test_dx_compile_integration.py | 15 | 1523 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import unittest
from pants_test.android.android_integration_test import AndroidIntegrationTest
class DxCompileIntegrationTest(AndroidIntegrationTest):
"""Integration test for DxCompile
The Android SDK is modular, finding an SDK on the PATH is no guarantee that there is
a dx.jar anywhere on disk. The TOOLS are the ones required by the target in 'test_dx_compile'
method. If you add a target, you may need to expand the TOOLS list and perhaps define new
BUILD_TOOLS or TARGET_SDK class variables.
"""
TOOLS = [
os.path.join('build-tools', AndroidIntegrationTest.BUILD_TOOLS, 'lib', 'dx.jar'),
os.path.join('platforms', 'android-' + AndroidIntegrationTest.TARGET_SDK, 'android.jar')
]
tools = AndroidIntegrationTest.requirements(TOOLS)
@unittest.skipUnless(tools, reason='Android integration test requires tools {0!r} '
'and ANDROID_HOME set in path.'.format(TOOLS))
def test_dx_compile(self):
self.dx_test(AndroidIntegrationTest.TEST_TARGET)
def dx_test(self, target):
pants_run = self.run_pants(['dex', target])
self.assert_success(pants_run)
# TODO(mateor) decompile with smali and verify contents of created dex file.
| apache-2.0 |
tonyseek/sqlalchemy-utils | sqlalchemy_utils/functions/render.py | 2 | 1977 | import inspect
import six
import sqlalchemy as sa
from .mock import create_mock_engine
def render_expression(expression, bind, stream=None):
"""Generate a SQL expression from the passed python expression.
Only the global variable, `engine`, is available for use in the
expression. Additional local variables may be passed in the context
parameter.
Note this function is meant for convenience and protected usage. Do NOT
blindly pass user input to this function as it uses exec.
:param bind: A SQLAlchemy engine or bind URL.
:param stream: Render all DDL operations to the stream.
"""
# Create a stream if not present.
if stream is None:
stream = six.moves.cStringIO()
engine = create_mock_engine(bind, stream)
# Navigate the stack and find the calling frame that allows the
# expression to execuate.
for frame in inspect.stack()[1:]:
try:
frame = frame[0]
local = dict(frame.f_locals)
local['engine'] = engine
six.exec_(expression, frame.f_globals, local)
break
except:
pass
else:
raise ValueError('Not a valid python expression', engine)
return stream
def render_statement(statement, bind=None):
"""
Generate an SQL expression string with bound parameters rendered inline
for the given SQLAlchemy statement.
:param statement: SQLAlchemy Query object.
:param bind:
Optional SQLAlchemy bind, if None uses the bind of the given query
object.
"""
if isinstance(statement, sa.orm.query.Query):
if bind is None:
bind = statement.session.get_bind(statement._mapper_zero_or_none())
statement = statement.statement
elif bind is None:
bind = statement.bind
stream = six.moves.cStringIO()
engine = create_mock_engine(bind.engine, stream=stream)
engine.execute(statement)
return stream.getvalue()
| bsd-3-clause |
zlorb/mitmproxy | mitmproxy/net/server_spec.py | 7 | 2088 | """
Parse scheme, host and port from a string.
"""
import collections
import re
from typing import Tuple
from mitmproxy.net import check
ServerSpec = collections.namedtuple("ServerSpec", ["scheme", "address"])
server_spec_re = re.compile(
r"""
^
(?:(?P<scheme>\w+)://)? # scheme is optional
(?P<host>[^:/]+|\[.+\]) # hostname can be DNS name, IPv4, or IPv6 address.
(?::(?P<port>\d+))? # port is optional
/? # we allow a trailing backslash, but no path
$
""",
re.VERBOSE
)
def parse(server_spec: str) -> ServerSpec:
"""
Parses a server mode specification, e.g.:
- http://example.com/
- example.org
- example.com:443
Raises:
ValueError, if the server specification is invalid.
"""
m = server_spec_re.match(server_spec)
if not m:
raise ValueError("Invalid server specification: {}".format(server_spec))
# defaulting to https/port 443 may annoy some folks, but it's secure-by-default.
scheme = m.group("scheme") or "https"
if scheme not in ("http", "https"):
raise ValueError("Invalid server scheme: {}".format(scheme))
host = m.group("host")
# IPv6 brackets
if host.startswith("[") and host.endswith("]"):
host = host[1:-1]
if not check.is_valid_host(host.encode("idna")):
raise ValueError("Invalid hostname: {}".format(host))
if m.group("port"):
port = int(m.group("port"))
else:
port = {
"http": 80,
"https": 443
}[scheme]
if not check.is_valid_port(port):
raise ValueError("Invalid port: {}".format(port))
return ServerSpec(scheme, (host, port))
def parse_with_mode(mode: str) -> Tuple[str, ServerSpec]:
"""
Parse a proxy mode specification, which is usually just (reverse|upstream):server-spec
Returns:
A (mode, server_spec) tuple.
Raises:
ValueError, if the specification is invalid.
"""
mode, server_spec = mode.split(":", maxsplit=1)
return mode, parse(server_spec)
| mit |
leekchan/djangae | djangae/db/backends/appengine/dnf.py | 6 | 9258 | import copy
from itertools import product
from django.db.models.sql.datastructures import EmptyResultSet
from djangae.db.backends.appengine.query import WhereNode
from django.db import NotSupportedError
def preprocess_node(node, negated):
to_remove = []
# Go through the children of this node and if any of the
# child nodes are leaf nodes, then explode them if necessary
for child in node.children:
if child.is_leaf:
if child.operator == "ISNULL":
value = not child.value if node.negated else child.value
if value:
child.operator = "="
child.value = None
else:
child.operator = ">"
child.value = None
elif node.negated and child.operator == "=":
# Excluded equalities become inequalities
lhs, rhs = WhereNode(), WhereNode()
lhs.column = rhs.column = child.column
lhs.value = rhs.value = child.value
lhs.operator = "<"
rhs.operator = ">"
child.operator = child.value = child.column = None
child.connector = "OR"
child.children = [lhs, rhs]
assert not child.is_leaf
elif child.operator == "IN":
# Explode IN filters into a series of 'OR statements to make life
# easier later
new_children = []
for value in child.value:
if node.negated:
lhs, rhs = WhereNode(), WhereNode()
lhs.column = rhs.column = child.column
lhs.value = rhs.value = value
lhs.operator = "<"
rhs.operator = ">"
bridge = WhereNode()
bridge.connector = "OR"
bridge.children = [ lhs, rhs ]
new_children.append(bridge)
else:
new_node = WhereNode()
new_node.operator = "="
new_node.value = value
new_node.column = child.column
new_children.append(new_node)
child.column = None
child.operator = None
child.connector = "AND" if negated else "OR"
child.value = None
child.children = new_children
assert not child.is_leaf
elif child.operator == "RANGE":
lhs, rhs = WhereNode(), WhereNode()
lhs.column = rhs.column = child.column
if node.negated:
lhs.operator = "<"
rhs.operator = ">"
child.connector = "OR"
else:
lhs.operator = ">="
rhs.operator = "<="
child.connector = "AND"
lhs.value = child.value[0]
rhs.value = child.value[1]
child.column = child.operator = child.value = None
child.children = [ lhs, rhs ]
assert not child.is_leaf
elif node.negated:
# Move the negation down the tree
child.negated = not child.negated
# If this node was negated, we flip everything
if node.negated:
node.negated = False
node.connector = "AND" if node.connector == "OR" else "OR"
for child in to_remove:
node.children.remove(child)
return node
def normalize_query(query):
where = query.where
# If there are no filters then this is already normalized
if where is None:
return query
def walk_tree(where, original_negated=False):
negated = original_negated
if where.negated:
negated = not negated
preprocess_node(where, negated)
rewalk = False
for child in where.children:
if where.connector == "AND" and child.children and child.connector == 'AND' and not child.negated:
where.children.remove(child)
where.children.extend(child.children)
rewalk = True
elif child.connector == "AND" and len(child.children) == 1 and not child.negated:
# Promote leaf nodes if they are the only child under an AND. Just for consistency
where.children.remove(child)
where.children.extend(child.children)
rewalk = True
elif len(child.children) > 1 and child.connector == 'AND' and child.negated:
new_grandchildren = []
for grandchild in child.children:
new_node = WhereNode()
new_node.negated = True
new_node.children = [ grandchild ]
new_grandchildren.append(new_node)
child.children = new_grandchildren
child.connector = 'OR'
rewalk = True
else:
walk_tree(child, negated)
if rewalk:
walk_tree(where, original_negated)
if where.connector == 'AND' and any([x.connector == 'OR' for x in where.children]):
# ANDs should have been taken care of!
assert not any([x.connector == 'AND' and not x.is_leaf for x in where.children ])
product_list = []
for child in where.children:
if child.connector == 'OR':
product_list.append(child.children)
else:
product_list.append([child])
producted = product(*product_list)
new_children = []
for branch in producted:
new_and = WhereNode()
new_and.connector = 'AND'
new_and.children = list(copy.deepcopy(branch))
new_children.append(new_and)
where.connector = 'OR'
where.children = list(set(new_children))
walk_tree(where, original_negated)
elif where.connector == 'OR':
new_children = []
for child in where.children:
if child.connector == 'OR':
new_children.extend(child.children)
else:
new_children.append(child)
where.children = list(set(new_children))
walk_tree(where)
if where.connector != 'OR':
new_node = WhereNode()
new_node.connector = 'OR'
new_node.children = [ where ]
query._where = new_node
all_pks = True
for and_branch in query.where.children:
if and_branch.is_leaf:
children = [ and_branch ]
else:
children = and_branch.children
for node in children:
if node.column == "__key__" and node.operator in ("=", "IN"):
break
else:
all_pks = False
break
if (not all_pks) and len(query.where.children) > 30:
raise NotSupportedError("Unable to run query as it required more than 30 subqueries")
def remove_empty_in(node):
"""
Once we are normalized, if any of the branches filters
on an empty list, we can remove that entire branch from the
query. If this leaves no branches, then the result set is empty
"""
# This is a bit ugly, but you try and do it more succinctly :)
# We have the following possible situations for IN queries with an empty
# value:
# - Negated: One of the nodes in the and branch will always be true and is therefore
# unnecessary, we leave it alone though
# - Not negated: The entire AND branch will always be false, so that branch can be removed
# if that was the last branch, then the queryset will be empty
# Everything got wiped out!
if node.connector == 'OR' and len(node.children) == 0:
raise EmptyResultSet()
for and_branch in node.children[:]:
if and_branch.is_leaf and and_branch.operator == "IN" and not len(and_branch.value):
node.children.remove(and_branch)
if not node.children:
raise EmptyResultSet()
remove_empty_in(where)
def detect_conflicting_key_filter(node):
assert node.connector == "OR"
for and_branch in node.children[:]:
# If we have a Root OR with leaf elements, we don't need to worry
if and_branch.is_leaf:
break
pk_equality_found = None
for child in and_branch.children:
if child.column == "__key__" and child.operator == "=":
if pk_equality_found and pk_equality_found != child.value:
# Remove this AND branch as it's impossible to return anything
node.children.remove(and_branch)
else:
pk_equality_found = child.value
if not node.children:
raise EmptyResultSet()
detect_conflicting_key_filter(query.where)
return query
| bsd-3-clause |
Red--Code/mt6589_kernel_3.4.67 | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
lukebarnard1/bokeh | bokeh/models/widgets/inputs.py | 24 | 4851 | """ Various kinds of input widgets and form controls.
"""
from __future__ import absolute_import
import six
from ...properties import Bool, Int, Float, String, Date, RelativeDelta, Enum, List, Dict, Tuple, Either, Instance
from ..actions import Callback
from ..widget import Widget
class InputWidget(Widget):
""" Abstract base class for input widgets. `InputWidget`` is not
generally useful to instantiate on its own.
"""
title = String(help="""
Widget's label.
""")
name = String(help="""
Widget's name.
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the input's value changes.
""")
@classmethod
def coerce_value(cls, val):
prop_obj = cls.lookup('value')
if isinstance(prop_obj, Float):
return float(val)
elif isinstance(prop_obj, Int):
return int(val)
elif isinstance(prop_obj, String):
return str(val)
else:
return val
@classmethod
def create(cls, *args, **kwargs):
""" Only called the first time we make an object,
whereas __init__ is called every time it's loaded
"""
if kwargs.get('title') is None:
kwargs['title'] = kwargs['name']
if kwargs.get('value') is not None:
kwargs['value'] = cls.coerce_value(kwargs.get('value'))
return cls(**kwargs)
class TextInput(InputWidget):
""" Single-line input widget. """
value = String(help="""
Initial or entered text value.
""")
class AutocompleteInput(TextInput):
""" Single-line input widget with auto-completion. """
completions = List(String, help="""
A list of completion strings. This will be used to guide the
user when he types-in a value.
""")
class Select(InputWidget):
""" Single-select widget.
"""
options = List(Either(String, Dict(String, String)), help="""
Available selection options.
""")
value = String(help="""
Initial or selected value.
""")
@classmethod
def create(self, *args, **kwargs):
options = kwargs.pop('options', [])
new_options = []
for opt in options:
if isinstance(opt, six.string_types):
opt = {'name' : opt, 'value' : opt}
new_options.append(opt)
kwargs['options'] = new_options
return super(Select, self).create(*args, **kwargs)
class MultiSelect(Select):
""" Multi-select widget.
"""
value = List(String, help="""
Initial or selected values.
""")
@classmethod
def create(self, *args, **kwargs):
options = kwargs.pop('options', [])
new_options = []
for opt in options:
if isinstance(opt, six.string_types):
opt = {'name' : opt, 'value' : opt}
new_options.append(opt)
kwargs['options'] = new_options
return super(Select, self).create(*args, **kwargs)
class Slider(InputWidget):
""" Slider-based number selection widget.
"""
value = Float(help="""
Initial or selected value.
""")
start = Float(help="""
The minimum allowable value.
""")
end = Float(help="""
The maximum allowable value.
""")
step = Float(help="""
The step between consecutive values.
""")
orientation = Enum("horizontal", "vertical", help="""
Orient the slider either horizontally (default) or vertically.
""")
class DateRangeSlider(InputWidget):
""" Slider-based date range selection widget.
"""
value = Tuple(Date, Date, help="""
The initial or selected date range.
""")
bounds = Tuple(Date, Date, help="""
The earliest and latest allowable dates.
""")
range = Tuple(RelativeDelta, RelativeDelta, help="""
[TDB]
""")
step = RelativeDelta(help="""
The step between consecutive dates.
""")
# formatter = Either(String, Function(Date))
# scales = DateRangeSliderScales ... # first, next, stop, label, format
enabled = Bool(True, help="""
Enable or disable this widget.
""")
arrows = Bool(True, help="""
Whether to show clickable arrows on both ends of the slider.
""")
value_labels = Enum("show", "hide", "change", help="""
Show or hide value labels on both sides of the slider.
""")
wheel_mode = Enum("scroll", "zoom", default=None, help="""
Whether mouse zoom should scroll or zoom selected range (or
do nothing).
""")
class DatePicker(InputWidget):
""" Calendar-based date picker widget.
"""
value = Date(help="""
The initial or picked date.
""")
min_date = Date(default=None, help="""
Optional earliest allowable date.
""")
max_date = Date(default=None, help="""
Optional latest allowable date.
""")
| bsd-3-clause |
aospx-kitkat/platform_external_chromium_org | tools/site_compare/drivers/win32/mouse.py | 189 | 6462 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""SiteCompare module for simulating mouse input.
This module contains functions that can be used to simulate a user
navigating using a pointing device. This includes mouse movement,
clicking with any button, and dragging.
"""
import time # for sleep
import win32api # for mouse_event
import win32con # Windows constants
import win32gui # for window functions
def ScreenToMouse(pt):
"""Convert a value in screen coordinates to mouse coordinates.
Mouse coordinates are specified as a percentage of screen dimensions,
normalized to 16 bits. 0 represents the far left/top of the screen,
65535 represents the far right/bottom. This function assumes that
the size of the screen is fixed at module load time and does not change
Args:
pt: the point of the coords to convert
Returns:
the converted point
"""
# Initialize the screen dimensions on first execution. Note that this
# function assumes that the screen dimensions do not change during run.
if not ScreenToMouse._SCREEN_DIMENSIONS:
desktop = win32gui.GetClientRect(win32gui.GetDesktopWindow())
ScreenToMouse._SCREEN_DIMENSIONS = (desktop[2], desktop[3])
return ((65535 * pt[0]) / ScreenToMouse._SCREEN_DIMENSIONS[0],
(65535 * pt[1]) / ScreenToMouse._SCREEN_DIMENSIONS[1])
ScreenToMouse._SCREEN_DIMENSIONS = None
def PressButton(down, button='left'):
"""Simulate a mouse button press or release at the current mouse location.
Args:
down: whether the button is pressed or released
button: which button is pressed
Returns:
None
"""
# Put the mouse_event flags in a convenient dictionary by button
flags = {
'left': (win32con.MOUSEEVENTF_LEFTUP, win32con.MOUSEEVENTF_LEFTDOWN),
'middle': (win32con.MOUSEEVENTF_MIDDLEUP, win32con.MOUSEEVENTF_MIDDLEDOWN),
'right': (win32con.MOUSEEVENTF_RIGHTUP, win32con.MOUSEEVENTF_RIGHTDOWN)
}
# hit the button
win32api.mouse_event(flags[button][down], 0, 0)
def ClickButton(button='left', click_time=0):
"""Press and release a mouse button at the current mouse location.
Args:
button: which button to click
click_time: duration between press and release
Returns:
None
"""
PressButton(True, button)
time.sleep(click_time)
PressButton(False, button)
def DoubleClickButton(button='left', click_time=0, time_between_clicks=0):
"""Double-click a mouse button at the current mouse location.
Args:
button: which button to click
click_time: duration between press and release
time_between_clicks: time to pause between clicks
Returns:
None
"""
ClickButton(button, click_time)
time.sleep(time_between_clicks)
ClickButton(button, click_time)
def MoveToLocation(pos, duration=0, tick=0.01):
"""Move the mouse cursor to a specified location, taking the specified time.
Args:
pos: position (in screen coordinates) to move to
duration: amount of time the move should take
tick: amount of time between successive moves of the mouse
Returns:
None
"""
# calculate the number of moves to reach the destination
num_steps = (duration/tick)+1
# get the current and final mouse position in mouse coords
current_location = ScreenToMouse(win32gui.GetCursorPos())
end_location = ScreenToMouse(pos)
# Calculate the step size
step_size = ((end_location[0]-current_location[0])/num_steps,
(end_location[1]-current_location[1])/num_steps)
step = 0
while step < num_steps:
# Move the mouse one step
current_location = (current_location[0]+step_size[0],
current_location[1]+step_size[1])
# Coerce the coords to int to avoid a warning from pywin32
win32api.mouse_event(
win32con.MOUSEEVENTF_MOVE|win32con.MOUSEEVENTF_ABSOLUTE,
int(current_location[0]), int(current_location[1]))
step += 1
time.sleep(tick)
def ClickAtLocation(pos, button='left', click_time=0):
"""Simulate a mouse click in a particular location, in screen coordinates.
Args:
pos: position in screen coordinates (x,y)
button: which button to click
click_time: duration of the click
Returns:
None
"""
MoveToLocation(pos)
ClickButton(button, click_time)
def ClickInWindow(hwnd, offset=None, button='left', click_time=0):
"""Simulate a user mouse click in the center of a window.
Args:
hwnd: handle of the window to click in
offset: where to click, defaults to dead center
button: which button to click
click_time: duration of the click
Returns:
Nothing
"""
rect = win32gui.GetClientRect(hwnd)
if offset is None: offset = (rect[2]/2, rect[3]/2)
# get the screen coordinates of the window's center
pos = win32gui.ClientToScreen(hwnd, offset)
ClickAtLocation(pos, button, click_time)
def DoubleClickInWindow(
hwnd, offset=None, button='left', click_time=0, time_between_clicks=0.1):
"""Simulate a user mouse double click in the center of a window.
Args:
hwnd: handle of the window to click in
offset: where to click, defaults to dead center
button: which button to click
click_time: duration of the clicks
time_between_clicks: length of time to pause between clicks
Returns:
Nothing
"""
ClickInWindow(hwnd, offset, button, click_time)
time.sleep(time_between_clicks)
ClickInWindow(hwnd, offset, button, click_time)
def main():
# We're being invoked rather than imported. Let's do some tests
screen_size = win32gui.GetClientRect(win32gui.GetDesktopWindow())
screen_size = (screen_size[2], screen_size[3])
# move the mouse (instantly) to the upper right corner
MoveToLocation((screen_size[0], 0))
# move the mouse (over five seconds) to the lower left corner
MoveToLocation((0, screen_size[1]), 5)
# click the left mouse button. This will open up the Start menu
# if the taskbar is at the bottom
ClickButton()
# wait a bit, then click the right button to open the context menu
time.sleep(3)
ClickButton('right')
# move the mouse away and then click the left button to dismiss the
# context menu
MoveToLocation((screen_size[0]/2, screen_size[1]/2), 3)
MoveToLocation((0, 0), 3)
ClickButton()
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
aaron-fz/neutron_full_sync | neutron/plugins/vmware/dbexts/nsxrouter.py | 16 | 2668 | # Copyright 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron.db import db_base_plugin_v2
from neutron.extensions import l3
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.dbexts import models
LOG = logging.getLogger(__name__)
class NsxRouterMixin(object):
"""Mixin class to enable nsx router support."""
nsx_attributes = []
def _extend_nsx_router_dict(self, router_res, router_db):
nsx_attrs = router_db['nsx_attributes']
# Return False if nsx attributes are not definied for this
# neutron router
for attr in self.nsx_attributes:
name = attr['name']
default = attr['default']
router_res[name] = (
nsx_attrs and nsx_attrs[name] or default)
def _process_nsx_router_create(
self, context, router_db, router_req):
if not router_db['nsx_attributes']:
kwargs = {}
for attr in self.nsx_attributes:
name = attr['name']
default = attr['default']
kwargs[name] = router_req.get(name, default)
nsx_attributes = models.NSXRouterExtAttributes(
router_id=router_db['id'], **kwargs)
context.session.add(nsx_attributes)
router_db['nsx_attributes'] = nsx_attributes
else:
# The situation where the record already exists will
# be likely once the NSXRouterExtAttributes model
# will allow for defining several attributes pertaining
# to different extensions
for attr in self.nsx_attributes:
name = attr['name']
default = attr['default']
router_db['nsx_attributes'][name] = router_req.get(
name, default)
LOG.debug(_("Nsx router extension successfully processed "
"for router:%s"), router_db['id'])
# Register dict extend functions for ports
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
l3.ROUTERS, ['_extend_nsx_router_dict'])
| apache-2.0 |
jonasfoe/COPASI | stochastic-testsuite/compare_sd.py | 2 | 1614 | #!/usr/bin/python
import sys
import math
import string
SD_FILE=sys.argv[1]
REFERENCE_SD_FILE=sys.argv[2]
REPEATS=float(sys.argv[3])
OUTFILE=sys.argv[4]
SD=file(SD_FILE,"r").readlines()
REFERENCE_SD=file(REFERENCE_SD_FILE,"r").readlines()
EXIT_STATUS=0
if(len(SD) != len(REFERENCE_SD)):
print "ERROR: The input files don't have the same number of lines."
sys.exit(1)
NUMCOLUMNS=len(string.split(SD[1],","))
OUT=file(OUTFILE,"w")
OUT.write(SD[0])
for X in range(2,len(SD)):
REF_SD_COLS=string.split(REFERENCE_SD[X],",")[1:]
SD_COLS=string.split(SD[X],",")[1:]
if(len(SD_COLS) != len(REF_SD_COLS)):
print "ERROR: Number of columns differs between files at line %d"%(X)
OUT.close()
sys.exit(1)
RESULT=str(X-1)
for Y in range(0,NUMCOLUMNS-1):
v=0.0
REF_SD_VALUE=float(REF_SD_COLS[Y])
SD_VALUE=float(SD_COLS[Y])
if(REF_SD_VALUE!=0.0):
v=(math.pow(SD_VALUE,2)/math.pow(REF_SD_VALUE,2)-1.0)*math.sqrt(REPEATS/2.0)
else:
if(SD_VALUE!=0.0):
print "ERROR at %s (%d, %d): Var: %f, RefVar: %f, Tol: %f."%(SD_FILE,X,Y+1,math.pow(SD_VALUE,2), math.pow(REF_SD_VALUE,2), 5.0*math.pow(REF_SD_VALUE,2)*math.sqrt(2.0/REPEATS))
if(math.fabs(v)>=5.0):
print "ERROR at %s (%d, %d): Var: %f, RefVar: %f, Tol: %f."%(SD_FILE,X,Y+1,math.pow(SD_VALUE,2), math.pow(REF_SD_VALUE,2), 5.0*math.pow(REF_SD_VALUE,2)*math.sqrt(2.0/REPEATS))
EXIT_STATUS=1
RESULT=string.join([RESULT,str(v)],",")
RESULT=RESULT+"\n"
OUT.write(RESULT)
OUT.close()
sys.exit(EXIT_STATUS)
| artistic-2.0 |
msmania/grpc | tools/distrib/python/grpcio_tools/protoc_lib_deps.py | 4 | 9508 |
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED BY make_grpcio_tools.py!
CC_FILES=['google/protobuf/compiler/zip_writer.cc', 'google/protobuf/compiler/subprocess.cc', 'google/protobuf/compiler/ruby/ruby_generator.cc', 'google/protobuf/compiler/python/python_generator.cc', 'google/protobuf/compiler/profile.pb.cc', 'google/protobuf/compiler/plugin.pb.cc', 'google/protobuf/compiler/plugin.cc', 'google/protobuf/compiler/php/php_generator.cc', 'google/protobuf/compiler/objectivec/objectivec_primitive_field.cc', 'google/protobuf/compiler/objectivec/objectivec_oneof.cc', 'google/protobuf/compiler/objectivec/objectivec_message_field.cc', 'google/protobuf/compiler/objectivec/objectivec_message.cc', 'google/protobuf/compiler/objectivec/objectivec_map_field.cc', 'google/protobuf/compiler/objectivec/objectivec_helpers.cc', 'google/protobuf/compiler/objectivec/objectivec_generator.cc', 'google/protobuf/compiler/objectivec/objectivec_file.cc', 'google/protobuf/compiler/objectivec/objectivec_field.cc', 'google/protobuf/compiler/objectivec/objectivec_extension.cc', 'google/protobuf/compiler/objectivec/objectivec_enum_field.cc', 'google/protobuf/compiler/objectivec/objectivec_enum.cc', 'google/protobuf/compiler/js/well_known_types_embed.cc', 'google/protobuf/compiler/js/js_generator.cc', 'google/protobuf/compiler/javanano/javanano_primitive_field.cc', 'google/protobuf/compiler/javanano/javanano_message_field.cc', 'google/protobuf/compiler/javanano/javanano_message.cc', 'google/protobuf/compiler/javanano/javanano_map_field.cc', 'google/protobuf/compiler/javanano/javanano_helpers.cc', 'google/protobuf/compiler/javanano/javanano_generator.cc', 'google/protobuf/compiler/javanano/javanano_file.cc', 'google/protobuf/compiler/javanano/javanano_field.cc', 'google/protobuf/compiler/javanano/javanano_extension.cc', 'google/protobuf/compiler/javanano/javanano_enum_field.cc', 'google/protobuf/compiler/javanano/javanano_enum.cc', 'google/protobuf/compiler/java/java_string_field_lite.cc', 'google/protobuf/compiler/java/java_string_field.cc', 'google/protobuf/compiler/java/java_shared_code_generator.cc', 'google/protobuf/compiler/java/java_service.cc', 'google/protobuf/compiler/java/java_primitive_field_lite.cc', 'google/protobuf/compiler/java/java_primitive_field.cc', 'google/protobuf/compiler/java/java_name_resolver.cc', 'google/protobuf/compiler/java/java_message_lite.cc', 'google/protobuf/compiler/java/java_message_field_lite.cc', 'google/protobuf/compiler/java/java_message_field.cc', 'google/protobuf/compiler/java/java_message_builder_lite.cc', 'google/protobuf/compiler/java/java_message_builder.cc', 'google/protobuf/compiler/java/java_message.cc', 'google/protobuf/compiler/java/java_map_field_lite.cc', 'google/protobuf/compiler/java/java_map_field.cc', 'google/protobuf/compiler/java/java_lazy_message_field_lite.cc', 'google/protobuf/compiler/java/java_lazy_message_field.cc', 'google/protobuf/compiler/java/java_helpers.cc', 'google/protobuf/compiler/java/java_generator_factory.cc', 'google/protobuf/compiler/java/java_generator.cc', 'google/protobuf/compiler/java/java_file.cc', 'google/protobuf/compiler/java/java_field.cc', 'google/protobuf/compiler/java/java_extension_lite.cc', 'google/protobuf/compiler/java/java_extension.cc', 'google/protobuf/compiler/java/java_enum_lite.cc', 'google/protobuf/compiler/java/java_enum_field_lite.cc', 'google/protobuf/compiler/java/java_enum_field.cc', 'google/protobuf/compiler/java/java_enum.cc', 'google/protobuf/compiler/java/java_doc_comment.cc', 'google/protobuf/compiler/java/java_context.cc', 'google/protobuf/compiler/csharp/csharp_wrapper_field.cc', 'google/protobuf/compiler/csharp/csharp_source_generator_base.cc', 'google/protobuf/compiler/csharp/csharp_repeated_primitive_field.cc', 'google/protobuf/compiler/csharp/csharp_repeated_message_field.cc', 'google/protobuf/compiler/csharp/csharp_repeated_enum_field.cc', 'google/protobuf/compiler/csharp/csharp_reflection_class.cc', 'google/protobuf/compiler/csharp/csharp_primitive_field.cc', 'google/protobuf/compiler/csharp/csharp_message_field.cc', 'google/protobuf/compiler/csharp/csharp_message.cc', 'google/protobuf/compiler/csharp/csharp_map_field.cc', 'google/protobuf/compiler/csharp/csharp_helpers.cc', 'google/protobuf/compiler/csharp/csharp_generator.cc', 'google/protobuf/compiler/csharp/csharp_field_base.cc', 'google/protobuf/compiler/csharp/csharp_enum_field.cc', 'google/protobuf/compiler/csharp/csharp_enum.cc', 'google/protobuf/compiler/csharp/csharp_doc_comment.cc', 'google/protobuf/compiler/cpp/cpp_string_field.cc', 'google/protobuf/compiler/cpp/cpp_service.cc', 'google/protobuf/compiler/cpp/cpp_primitive_field.cc', 'google/protobuf/compiler/cpp/cpp_message_field.cc', 'google/protobuf/compiler/cpp/cpp_message.cc', 'google/protobuf/compiler/cpp/cpp_map_field.cc', 'google/protobuf/compiler/cpp/cpp_helpers.cc', 'google/protobuf/compiler/cpp/cpp_generator.cc', 'google/protobuf/compiler/cpp/cpp_file.cc', 'google/protobuf/compiler/cpp/cpp_field.cc', 'google/protobuf/compiler/cpp/cpp_extension.cc', 'google/protobuf/compiler/cpp/cpp_enum_field.cc', 'google/protobuf/compiler/cpp/cpp_enum.cc', 'google/protobuf/compiler/command_line_interface.cc', 'google/protobuf/compiler/code_generator.cc', 'google/protobuf/wrappers.pb.cc', 'google/protobuf/wire_format.cc', 'google/protobuf/util/type_resolver_util.cc', 'google/protobuf/util/time_util.cc', 'google/protobuf/util/message_differencer.cc', 'google/protobuf/util/json_util.cc', 'google/protobuf/util/internal/utility.cc', 'google/protobuf/util/internal/type_info_test_helper.cc', 'google/protobuf/util/internal/type_info.cc', 'google/protobuf/util/internal/protostream_objectwriter.cc', 'google/protobuf/util/internal/protostream_objectsource.cc', 'google/protobuf/util/internal/proto_writer.cc', 'google/protobuf/util/internal/object_writer.cc', 'google/protobuf/util/internal/json_stream_parser.cc', 'google/protobuf/util/internal/json_objectwriter.cc', 'google/protobuf/util/internal/json_escaping.cc', 'google/protobuf/util/internal/field_mask_utility.cc', 'google/protobuf/util/internal/error_listener.cc', 'google/protobuf/util/internal/default_value_objectwriter.cc', 'google/protobuf/util/internal/datapiece.cc', 'google/protobuf/util/field_mask_util.cc', 'google/protobuf/util/field_comparator.cc', 'google/protobuf/util/delimited_message_util.cc', 'google/protobuf/unknown_field_set.cc', 'google/protobuf/type.pb.cc', 'google/protobuf/timestamp.pb.cc', 'google/protobuf/text_format.cc', 'google/protobuf/stubs/substitute.cc', 'google/protobuf/stubs/mathlimits.cc', 'google/protobuf/struct.pb.cc', 'google/protobuf/source_context.pb.cc', 'google/protobuf/service.cc', 'google/protobuf/reflection_ops.cc', 'google/protobuf/message.cc', 'google/protobuf/map_field.cc', 'google/protobuf/io/zero_copy_stream_impl.cc', 'google/protobuf/io/tokenizer.cc', 'google/protobuf/io/strtod.cc', 'google/protobuf/io/printer.cc', 'google/protobuf/io/gzip_stream.cc', 'google/protobuf/generated_message_reflection.cc', 'google/protobuf/field_mask.pb.cc', 'google/protobuf/extension_set_heavy.cc', 'google/protobuf/empty.pb.cc', 'google/protobuf/dynamic_message.cc', 'google/protobuf/duration.pb.cc', 'google/protobuf/descriptor_database.cc', 'google/protobuf/descriptor.pb.cc', 'google/protobuf/descriptor.cc', 'google/protobuf/compiler/parser.cc', 'google/protobuf/compiler/importer.cc', 'google/protobuf/api.pb.cc', 'google/protobuf/any.pb.cc', 'google/protobuf/any.cc', 'google/protobuf/wire_format_lite.cc', 'google/protobuf/stubs/time.cc', 'google/protobuf/stubs/strutil.cc', 'google/protobuf/stubs/structurally_valid.cc', 'google/protobuf/stubs/stringprintf.cc', 'google/protobuf/stubs/stringpiece.cc', 'google/protobuf/stubs/statusor.cc', 'google/protobuf/stubs/status.cc', 'google/protobuf/stubs/once.cc', 'google/protobuf/stubs/int128.cc', 'google/protobuf/stubs/common.cc', 'google/protobuf/stubs/bytestream.cc', 'google/protobuf/stubs/atomicops_internals_x86_msvc.cc', 'google/protobuf/stubs/atomicops_internals_x86_gcc.cc', 'google/protobuf/repeated_field.cc', 'google/protobuf/message_lite.cc', 'google/protobuf/io/zero_copy_stream_impl_lite.cc', 'google/protobuf/io/zero_copy_stream.cc', 'google/protobuf/io/coded_stream.cc', 'google/protobuf/generated_message_util.cc', 'google/protobuf/extension_set.cc', 'google/protobuf/arenastring.cc', 'google/protobuf/arena.cc', 'google/protobuf/compiler/js/embed.cc']
PROTO_FILES=['google/protobuf/wrappers.proto', 'google/protobuf/type.proto', 'google/protobuf/timestamp.proto', 'google/protobuf/struct.proto', 'google/protobuf/source_context.proto', 'google/protobuf/field_mask.proto', 'google/protobuf/empty.proto', 'google/protobuf/duration.proto', 'google/protobuf/descriptor.proto', 'google/protobuf/compiler/profile.proto', 'google/protobuf/compiler/plugin.proto', 'google/protobuf/api.proto', 'google/protobuf/any.proto']
CC_INCLUDE='third_party/protobuf/src'
PROTO_INCLUDE='third_party/protobuf/src'
| apache-2.0 |
utecuy/edx-platform | lms/envs/static.py | 100 | 2249 | """
This config file runs the simplest dev environment using sqlite, and db-based
sessions. Assumes structure:
/envroot/
/db # This is where it'll write the database file
/edx-platform # The location of this repo
/log # Where we're going to write log files
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
from .common import *
from openedx.core.lib.logsettings import get_logger_config
STATIC_GRAB = True
LOGGING = get_logger_config(ENV_ROOT / "log",
logging_env="dev",
tracking_filename="tracking.log",
debug=False)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ENV_ROOT / "db" / "edx.db",
}
}
CACHES = {
# This is the cache used for most things.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
}
}
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
############################ FILE UPLOADS (for discussion forums) #############################
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
MEDIA_ROOT = ENV_ROOT / "uploads"
MEDIA_URL = "/discussion/upfiles/"
FILE_UPLOAD_TEMP_DIR = ENV_ROOT / "uploads"
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
| agpl-3.0 |
hofschroeer/gnuradio | gr-fec/python/fec/LDPC/Generate_LDPC_matrix.py | 7 | 3273 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from .Generate_LDPC_matrix_functions import *
# This is an example of how to generate a parity check matrix for
# use with the LDPC Richardson Urbanke encoder. A significant amount
# of matrix manipulation is required, so this process should be done
# before using the encoder at run-time. This process can take quite
# a while, with more time required for larger matrices.
# Not all attempts to create a parity check matrix will be
# successful. The script will terminate and output error messages
# when the process fails. To increase verbosity, edit the verbose
# variable at the top of Generate_LDPC_matrix_functions.py.
# Because random number generation and
# shuffling methods are used, it is not possible to predict what
# starting conditions will result in success. It requires a bit of
# trial and error.
# ----------------------------------------------------------------- #
# First, generate a regular LDPC parity check matrix. Specify
# the properties desired. For example:
n = 200 # number of columns, corresponds to codeword length
p = 3 # column weight
q = 5 # row weight
parity_check_matrix = LDPC_matrix(n_p_q = [n,p,q])
# Richardson and Urbanke's preprocessing method requires a full rank
# matrix to start. The matrices generated by the
# regular_LDPC_code_contructor function will never be full rank. So,
# use the get_full_rank_H_matrix function.
newH = get_full_rank_H_matrix(parity_check_matrix.H)
# At this point, the matrix is no longer regular. (The row/column
# weights are not the same for all rows/columns.)
# Next, some preprocessing steps need to be performed as described
# Richardson and Urbanke in Modern Coding Theory, Appendix A. This
# can take a while...
[bestH,g] = get_best_matrix(newH,100)
# Print(out some of the resulting properties.)
n = bestH.shape[1]
k = n - bestH.shape[0]
print("Parity check matrix properties:")
print("\tSize :", bestH.shape)
print("\tRank :", linalg.matrix_rank(bestH))
print("\tRate : %.3f" % ((k*1.0) / n))
print("\tn :", n, " (codeword length)")
print("\tk :", k, " (info word length)")
print("\tgap : %i" % g)
# Save the matrix to an alist file for future use:
alist_filename = "n_%04i_k_%04i_gap_%02i.alist" % (n,k,g)
write_alist_file(alist_filename,bestH)
print('\nMatrix saved to alist file:', alist_filename, "\n")
| gpl-3.0 |
Arkahnn/VanillaRNN | VanillaRNN.py | 1 | 13887 | import random
import numpy as np
import numpy.matlib as mat
import tools
class RNN:
def __init__(self, dictionary, train, val, test, H, eta, alpha, t_prev):
self.dictionary, self.train, self.val, self.test = dictionary, train, val, test
self.D = len(dictionary)
#self.DU = self.D + 1
self.T = max(len(s) for s in (train + val + test))
self.H, self.eta, self.alpha = H, eta, alpha
self.n_train = len(train)
self.t_prev = t_prev
# self.bias = 0.33
# print('Total number of phrases: ', self.N)
# Weight assignment with Glorot uniform
wgtD = self.D ** (-0.5)
#wgtDU = self.DU ** (-0.5) # for the bias in X and U
wgtH = self.H ** (-0.5)
wgtBias = random.uniform(-wgtH, wgtH)
self.U = np.random.uniform(-wgtD, wgtD, (self.H, self.D)) # Hx(D+1) matrix
self.W = np.random.uniform(-wgtH, wgtH, (self.H, self.H)) # HxH matrix
self.V = np.random.uniform(-wgtH, wgtH, (self.D, self.H)) # DxH matrix
# # weight assignment with simple weights
# self.U = np.random.randn(self.H, self.D) * 0.01
# self.W = np.random.randn(self.H, self.H) * 0.01
# self.V = np.random.randn(self.D, self.H) * 0.01
# self.U[-1,:] = wgtBias
# self.V[:, -1] = wgtBias
# self.W[:, -1] = wgtBias
def init_main_params(self, data):
# Set X (input)
self.N = len(data)
self.X = np.zeros((self.N, self.T, self.D))
#self.X[:,:,-1] = 1 # bias
for n, sent in enumerate(data):
self.X[n, range(len(sent)), [self.dictionary.index(x) for x in sent]] = 1.0
# Set Y (labels)
self.Y = np.zeros((self.N, self.T, self.D))
self.Y[:, :-1, :] = self.X[:, 1:, :] # X[:, 1:, :-1] for the bias
# self.Y[:, -1:, 2] = 1
# Set S and O (hidden output and output)
self.S = np.zeros((self.N, self.T, self.H))
#
self.O = np.zeros((self.N, self.T, self.D))
#self.S[:, :, -1] = 1.0
# for c in data:
# self.S[:, len(c):,-1] = 0.0
# forward step of the RNN
def forward(self, X, U, S, O):
# 1. s = tanh(Ux + Ws_prev)
# 2. o = sigma(Vs)
# 3. U,W,V = L(y,o)
reLU = False
for t in range(self.T):
if reLU:
S[:, t, :] = (self.U.dot(X[:, t, :].T) + self.W.dot(S[:, t - 1, :].T)).T
S[:, t, :] = S[:, t, :] * (S[:, t, :] > 0)
else:
S[:, t, :] = self.out_HL(X[:, t, :].T, U, S[:, t - 1, :].T).T
# O[:, t, :] = self.softmax(self.V.dot(S[:, t, :].T)).T
O[:, t, :] = self.softmax(np.dot(self.V, S[:, t, :].T)).T
return S, O
# New version
def dLdO(self, Y, O):
#return (-self.Y)/self.O # returns a NxTxD matrix
return -Y / O # returns a Dx1 vector
def dOdV(self, dO_dVS, S):
#return np.einsum('ntd,nth->dh',dO_dVS,self.S) # returns a DxH matrix
#return np.einsum('ik,il->kl', dO_dVS, S) # returns a DxH matrix
return dO_dVS[:, None] * S[None, :] # Return a DxH matrix
def dOdS(self, dO_dVS):
#return np.einsum('ntd,dh->dh', dO_dVS, self.V) # returns a DxH matrix
#return np.einsum('ik,kl->kl', dO_dVS, self.V) # returns a DxH matrix
return dO_dVS[:, None] * self.V # Returns a DxH matrix - Da verificare!!!
def dSdU(self, S, X):
S = (1 - S ** 2)
#return np.einsum('nth,ntd->hd', S, self.X) # returns a HxD matrix
#return np.einsum('ik,il->kl', S, X) # returns a Hx(D+1) matrix
return S[:, None] * X[None, :] # Returns an HxD matrix
def dSdW(self, S, S0):
#S = (1 - S ** 2)
#return np.einsum('nth,nth->nth', S, S0) # returns an NxTxH matrix
#return np.einsum('ik,ik->ik', S, S0) # returns an NxH matrix
return S[:, None] * S0[None, :] # Returns a HxH vector
def dLdV(self, dL_dO, dO_dV):
#return np.einsum('ntd,dh->dh', dL_dO, dO_dV) # returns a DxH matrix
#return np.einsum('ik,kl->kl', dL_dO, dO_dV) # returns a DxH matrix
return dL_dO[:, None] * dO_dV # Returns a DxH matrix
def dLdS(self, dL_dO, dO_dS):
#return np.einsum('ntd,dh->dh', dL_dO, dO_dS) #returns a DxH matrix
#return np.einsum('ik,kl->kl', dL_dO, dO_dS) # returns a DxH matrix
return dL_dO.dot(dO_dS) # Returns an Hx1 matrix
def dLdU(self, dL_dS, dS_dU):
#return np.einsum('dh,hd->hd', dL_dS, dS_dU) # returns an HxD matrix
#return np.einsum('ij,jk->jk', dL_dS, dS_dU) # returns an Hx(D+1) matrix
return dL_dS[:, None] * dS_dU # Return HxD matrix
def dLdW(self, dL_dS, dS_dW):
#return np.einsum('dm,nth->mh', dL_dS, dS_dW) # returns an HxH matrix
# dL_dW = np.zeros((self.H, self.H))
# # for i in range(self.N):
# # for j in range(self.T):
# # for k in range(self.D):
# # dL_dW += dS_dW[i, j, :] * dL_dS[k, :]
# dS_dW1 = dS_dW.repeat(self.H).reshape(self.N, self.H, self.H)
# dS_dW1 = dS_dW1.sum(axis=0)
# for i in range(self.D):
# dL_dW += dS_dW1 * dL_dS[i,:]
# return dL_dW
#return np.einsum('dm,nth->mh', dL_dS, dS_dW) # returns an HxH matrix
return dL_dS[:, None] * dS_dW # Returns an HxH matrix
# backward pass of the RNN
def backprop(self):
reLU = False
delta_V, delta_U, delta_W = np.zeros(self.V.shape), np.zeros(self.U.shape), np.zeros(self.W.shape)
dL_dV, dL_dU, dL_dW = np.zeros(self.V.shape), np.zeros(self.U.shape), np.zeros(self.W.shape)
S0 = np.zeros(self.S.shape) # S(t-1)
S0[:, 1:, :] = self.S[:, :-1, :]
S2 = 1 - self.S**2
c = self.eta
#l = [len(a) for a in self.train]
#c = self.eta/(self.N * sum(l))
# Y1 = self.Y.nonzero() # elements of Y different from zero
# dL_dVS = self.Y
# dL_dVS[Y1[0], Y1[1], Y1[2]] = self.O[Y1[0], Y1[1], Y1[2]] - 1
dL_dVS = (self.O * self.Y) - self.Y
for n in range(self.N):
for t in range(self.T):
# Versione del codice originale
dL_dV += np.outer(dL_dVS[n, t, :], self.S[n, t, :].T)
dL_dS = self.V.T.dot(dL_dVS[n, t, :])
if reLU:
dL_dargTanh = (dL_dS > 0).astype(int)
else:
dL_dargTanh = dL_dS * S2[n, t, :]
for t_i in range(t, t - self.t_prev, -1):
dL_dU += np.outer(dL_dargTanh, self.X[n, t_i, :].T)
if t_i == 0:
h_prev = np.zeros((self.H))
dL_dW += np.outer(dL_dargTanh, h_prev.T)
break
else:
dL_dW += np.outer(dL_dargTanh, S0[n, t_i - 1, :].T)
dL_dargTanh = self.W.T.dot(dL_dargTanh) * (1 - S0[n, t_i - 1, :] ** 2)
# Evaluation of dL/dV
# print('Evaluation of dL/dV')
# dL_dO = self.dLdO(self.Y[n, t, :], self.O[n, t, :]) # returns a NxD matrix
# dO_dVS = self.O[n, t, :] * (1.0 - self.O[n, t, :]) # returns a NxD matrix
# dO_dV = self.dOdV(dO_dVS, self.S[n, t, :]) # returns a DxH matrix
# dL_dV += self.dLdV(dL_dO, dO_dV) # returns the final DxH matrix
# dL_dV += self.dLdV(dL_dVS, dO_dV) # returns the final DxH matrix
#c = self.eta / (self.N * self.T) # Constant value including eta and 1/n
# print('V equality: ',np.array_equal(self.V, Vnew))
# Evalutation of dL/dU
# print('Evaluation of dL/dU')
# dO_dS = self.dOdS(dO_dVS) # returns a DxH matrix
# # dL_dS = dL_dO.dot(dO_dS) # returns a DxH matrix
# dS_dU = self.dSdU(S2[n, t, :], self.X[n, t, :]) # returns a HxD matrix
# dL_dS = self.dLdS(dL_dO, dO_dS)
# dL_dU += self.dLdU(dL_dS,dS_dU)
# dL_dU = dL_dS.T * dS_dU # returns the final HxD matrix
# print('U equality: ', np.array_equal(self.U, Unew))
# Evaluation of dL/dW
# print('Evaluation of dL/dW')
# dS_dW = self.dSdW(S2[n, t, :], S0[n, t, :]) # returns a HxD matrix
# dL_dW += self.dLdW(dL_dS, dS_dW)
#print('dL/dW dimensions: ', dL_dW.shape)
# print('W equality: ', np.array_equal(self.W, Wnew))
Vnew = self.V + (self.alpha * delta_V - c * dL_dV) # - c * dL_dV
delta_V = self.alpha * delta_V - c * dL_dV
Unew = self.U + (self.alpha * delta_U - c * dL_dU) # - c * dL_dU
delta_U = self.alpha * delta_U - c * dL_dU
Wnew = self.W + (self.alpha * delta_W - c * dL_dW) # - c * dL_dW
delta_W = self.alpha * delta_W - c * dL_dW
Vnew = np.clip(Vnew, -5, 5)
Unew = np.clip(Unew, -5, 5)
Wnew = np.clip(Wnew, -5, 5)
return (Vnew, Unew, Wnew)
def training(self, K, mini_batch_size):
loss_train, loss_val = [], []
acc_train, acc_val = [], []
idx_train = list(range(self.n_train))
self.N = mini_batch_size
n_mini_batches = self.n_train // self.N
print('Training set size: ', self.n_train)
print('Mini-batch size: ', self.N)
print('Number of mini-batches: ', n_mini_batches)
# Bias introduction on X and U
for i in range(K):
print('Epoch ', i, '/', K, ':')
random.shuffle(idx_train)
loss_t, loss_v = 0.0, 0.0
acc_t, acc_v = 0.0, 0.0
# forward and backprop steps
for j in range(n_mini_batches):
print(' Batch ', j + 1, '/', n_mini_batches)
self.init_main_params([self.train[i] for i in idx_train[(j * self.N):((j + 1) * self.N)]])
self.S, self.O = self.forward(self.X, self.U, self.S, self.O)
self.V, self.U, self.W = self.backprop()
loss_t += self.loss(self.n_train)
acc_t += self.accuracy()
# print('Loss: ', self.loss())
N = np.sum((len(y_i) for y_i in self.train))
# print('Number of elements in the training set: ', N)
loss = loss_t/ N # len(self.train)
acc = acc_t/ N # n_mini_batches
#print('Mean loss: ', loss)
loss_train.append(loss)
acc_train.append(acc)
print(' Loss: ', loss)
# validation step
print('Validation: ')
for j in range(len(self.val) // self.N):
print(' Batch ', j + 1, '/', len(self.val) // self.N)
self.init_main_params(self.val[(j * self.N):((j + 1) * self.N)])
self.forward(self.X, self.U, self.S, self.O)
l = [len(a) for a in self.val]
#loss_v += self.loss()/(self.N * sum(l))
loss_v += self.loss(len(self.val))
acc_v += self.accuracy()
# print('Validation loss: ', self.loss(len(self.val)))
print('Validation accuracy: ', self.accuracy())
l = []
# loss_val.append(loss_v/(len(self.val) // self.N))
N = np.sum((len(y_i) for y_i in self.val))
loss = loss_v/ N # len(self.val) # (len(self.val) // self.N)
acc = acc_v/ N # len(self.val) # (len(self.val) // self.N)
print('Loss val: ', loss)
loss_val.append(loss)
acc_val.append(acc)
return loss_train, loss_val, acc_train, acc_val
def testing(self):
self.init_main_params(self.test)
self.forward(self.X, self.U, self.S, self.O)
print('N = ', self.N)
loss_test = self.loss(len(self.test))
acc_test = self.accuracy()
return loss_test, acc_test
# Function that implements the softmax computation
def softmax(self, s):
# Softmax over 2D-matrix if D dimension is on axis = 0
#s -= np.amax(s, axis=0)
s = np.exp(s)
return s / np.sum(s, axis=0)
# Function that implements the activation of the hidden layer
def out_HL(self, x, U, s_prev):
# print('X shape: ', self.X.shape)
# print('U shape: ', self.U.shape)
return np.tanh(np.dot(U, x) + np.dot(self.W, s_prev)) # which verse of W am I using? which weights will be upgraded?
# Function that implements the loss function computation
def loss(self, n_phrases):
'''
o = o.transpose()
a = -y*np.log(o)
return a.sum()
'''
O_ = np.log(self.O)
O_[ ~np.isfinite(O_)] = 0.0
#c = -1 /(n_phrases * 10)
#c = -1
#return c * np.tensordot(self.Y, O_, axes=((0, 1, 2), (0, 1, 2)))
# We only care about our prediction of the "correct" words
correct_word_predictions = self.Y * O_
# Add to the loss based on how off we were
L = -1.0 * np.sum(correct_word_predictions)
# res = -np.tensordot(self.Y, O_, axes=((0, 1, 2), (0, 1, 2)))
#print('Equality of loss computations: ', L == res)
return L #/(n_phrases * 10)
def accuracy(self):
O_ = self.O.argmax(axis=2)
Y_ = self.Y.argmax(axis=2)
acc = 0.0
acc_tot = 0.0
# print('O_ size: ', O_.size())
N = np.shape(O_)[0]
for i in range(N):
for j in range(self.T):
if Y_[i,j] == 0:
acc_tot += acc
acc = 0.0
break
else:
acc += (Y_[i,j] == O_[i,j])
#comp_res = Y_ == O_
#return comp_res.sum() / (self.N * self.T)
return acc_tot
| apache-2.0 |
meabsence/python-for-android | python3-alpha/python3-src/Lib/test/make_ssl_certs.py | 89 | 1886 | """Make the custom certificate and private key files used by test_ssl
and friends."""
import os
import sys
import tempfile
from subprocess import *
req_template = """
[req]
distinguished_name = req_distinguished_name
x509_extensions = req_x509_extensions
prompt = no
[req_distinguished_name]
C = XY
L = Castle Anthrax
O = Python Software Foundation
CN = {hostname}
[req_x509_extensions]
subjectAltName = DNS:{hostname}
"""
here = os.path.abspath(os.path.dirname(__file__))
def make_cert_key(hostname):
tempnames = []
for i in range(3):
with tempfile.NamedTemporaryFile(delete=False) as f:
tempnames.append(f.name)
req_file, cert_file, key_file = tempnames
try:
with open(req_file, 'w') as f:
f.write(req_template.format(hostname=hostname))
args = ['req', '-new', '-days', '3650', '-nodes', '-x509',
'-newkey', 'rsa:1024', '-keyout', key_file,
'-out', cert_file, '-config', req_file]
check_call(['openssl'] + args)
with open(cert_file, 'r') as f:
cert = f.read()
with open(key_file, 'r') as f:
key = f.read()
return cert, key
finally:
for name in tempnames:
os.remove(name)
if __name__ == '__main__':
os.chdir(here)
cert, key = make_cert_key('localhost')
with open('ssl_cert.pem', 'w') as f:
f.write(cert)
with open('ssl_key.pem', 'w') as f:
f.write(key)
with open('keycert.pem', 'w') as f:
f.write(key)
f.write(cert)
# For certificate matching tests
cert, key = make_cert_key('fakehostname')
with open('keycert2.pem', 'w') as f:
f.write(key)
f.write(cert)
| apache-2.0 |
amlyj/pythonStudy | 2.7/contributed_modules/requests/thread_downloader.py | 1 | 5761 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18-4-23 下午3:24
# @Author : Tom.Lee
# @CopyRight : 2016-2017 OpenBridge by yihecloud
# @File : thread_downloader.py
# @Product : PyCharm
# @Docs :
# @Source :
import math
import os
import sys
import threading
import time
import requests
import urllib3
urllib3.disable_warnings()
# from requests.packages.urllib3.exceptions import InsecureRequestWarning
#
# requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
default_headers = {
'Cookie': '',
'accept-language:': 'zh-CN,zh;q=0.9',
'accept-encoding': 'gzip, deflate, br',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'cache-control': 'max-age=0',
'Connection': 'keep-alive',
'upgrade-insecure-requests': '1',
'User-Agent': 'Mozilla/5.0 (Linux; Android 5.0; '
'SM-G900P Build/LRX21T) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 '
'Mobile Safari/537.36',
}
def current_time(formatter="%Y-%m-%d %H:%M:%S"):
return time.strftime(formatter, time.localtime(time.time()))
def print_log(s):
print '\033[1;34;m%s ==> %s\033[0m' % (current_time(), s)
def progressbar(current_size, total_size):
"""
进度条
[%-50s] 表示步长50,
current_size * step_size 表示进度长度
:param current_size: 当前下载大小
:param total_size: 文件总大小
:return:
"""
progress_size = 50
current_size = current_size if current_size <= total_size else total_size
download_percent = 1.0 * current_size / total_size # 下载比例:? < 1
percent = '{:.2%}'.format(download_percent if download_percent <= 1 else 1)
sys.stdout.write('\r')
progress_bar = '=' * int(math.floor(current_size * (progress_size - 1) / total_size)) + '>'
current_size_format = "{0:,}".format(current_size)
sys.stdout.write('[%-50s] %s/(byte) %s' % (progress_bar, current_size_format, percent))
sys.stdout.flush()
if current_size == total_size:
sys.stdout.write(' download ok !\n')
class Downloader(threading.Thread):
"""
下载器
"""
def __init__(self, download_url, lock, chunk_size=512, save_folder=None,
headers=None, callback_func=None, verify=False, *args, **kwargs):
"""
:param download_url: file url
:param chunk_size: 每次读取字节大小
:param save_folder: 保存的路径
:param headers: 请求头
:param callback_func: 回调函数
:param verify: 取消安全验证
:param kwargs:
:return:
"""
super(Downloader, self).__init__(*args, **kwargs)
self.__flag = threading.Event() # 用于暂停线程的标识
self.__flag.set() # 设置为True
self.__running = threading.Event() # 用于停止线程的标识
self.__running.set() # 将running设置为True
self.download_url = download_url
self.lock = lock
self.chunk_size = chunk_size
self.save_folder = save_folder
self.headers = headers
self.callback_func = callback_func
self.verify = verify
self.args = args
self.kwargs = kwargs
@property
def is_running(self):
"""
获取运行标志
:return: True/False
"""
return self.__running.isSet() and self.__flag.wait()
def run(self):
"""
使用while 循环,使用self.is_running 来获取运行标志位
"""
print_log(" Add task : %s" % self.download_url)
self._execute()
def stop(self):
"""
设置为False, 让线程阻塞
"""
self.__flag.clear()
def resume(self):
"""
设置为True, 让线程停止阻塞
"""
self.__flag.set()
def exit(self):
"""
暂停标志设置为True
运行标志设置为False
"""
self.__flag.set()
self.__running.clear()
def _execute(self):
file_name = self.download_url.split('/')[-1:][0].split('?')[0]
if self.save_folder:
try:
assert os.path.exists(self.save_folder) and os.path.isdir(self.save_folder)
except AssertionError:
raise OSError("The specified folder does not exist. %s " % self.save_folder)
file_name = '%s/%s' % (self.save_folder.rstrip('/'), file_name)
response = requests.get(
url=self.download_url,
headers=self.headers or default_headers,
verify=self.verify,
stream=True,
hooks=dict(response=self.callback_func),
**self.kwargs)
lock.acquire()
print_log('Saving file to: "%s"' % file_name)
with open(file_name, "wb") as f:
total_size = float(response.headers.get('Content-Length', default=0))
download_size = 0
for chunk in response.iter_content(chunk_size=self.chunk_size):
if chunk:
f.write(chunk)
download_size += self.chunk_size
progressbar(download_size, total_size)
lock.release()
if __name__ == '__main__':
lock = threading.Lock()
dl1 = Downloader(
download_url='https://github.com/prometheus/node_exporter/archive/remove-gmond.zip',
lock=lock,
save_folder='/tmp')
dl2 = Downloader(
download_url='https://github.com/prometheus/node_exporter/archive/fish-bump-go-1.10.1.zip',
lock=lock,
save_folder='/tmp')
dl1.start()
dl2.start()
| mit |
dhruvagarwal/django | django/contrib/admin/tests.py | 301 | 6166 | import os
from unittest import SkipTest
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _
class AdminSeleniumWebDriverTestCase(StaticLiveServerTestCase):
available_apps = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
]
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
@classmethod
def setUpClass(cls):
if not os.environ.get('DJANGO_SELENIUM_TESTS', False):
raise SkipTest('Selenium tests not requested')
try:
cls.selenium = import_string(cls.webdriver_class)()
except Exception as e:
raise SkipTest('Selenium webdriver "%s" not installed or not '
'operational: %s' % (cls.webdriver_class, str(e)))
# This has to be last to ensure that resources are cleaned up properly!
super(AdminSeleniumWebDriverTestCase, cls).setUpClass()
@classmethod
def _tearDownClassInternal(cls):
if hasattr(cls, 'selenium'):
cls.selenium.quit()
super(AdminSeleniumWebDriverTestCase, cls)._tearDownClassInternal()
def wait_until(self, callback, timeout=10):
"""
Helper function that blocks the execution of the tests until the
specified callback returns a value that is not falsy. This function can
be called, for example, after clicking a link or submitting a form.
See the other public methods that call this function for more details.
"""
from selenium.webdriver.support.wait import WebDriverWait
WebDriverWait(self.selenium, timeout).until(callback)
def wait_loaded_tag(self, tag_name, timeout=10):
"""
Helper function that blocks until the element with the given tag name
is found on the page.
"""
self.wait_for(tag_name, timeout)
def wait_for(self, css_selector, timeout=10):
"""
Helper function that blocks until a CSS selector is found on the page.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.presence_of_element_located((By.CSS_SELECTOR, css_selector)),
timeout
)
def wait_for_text(self, css_selector, text, timeout=10):
"""
Helper function that blocks until the text is found in the CSS selector.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.text_to_be_present_in_element(
(By.CSS_SELECTOR, css_selector), text),
timeout
)
def wait_for_value(self, css_selector, text, timeout=10):
"""
Helper function that blocks until the value is found in the CSS selector.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.text_to_be_present_in_element_value(
(By.CSS_SELECTOR, css_selector), text),
timeout
)
def wait_page_loaded(self):
"""
Block until page has started to load.
"""
from selenium.common.exceptions import TimeoutException
try:
# Wait for the next page to be loaded
self.wait_loaded_tag('body')
except TimeoutException:
# IE7 occasionally returns an error "Internet Explorer cannot
# display the webpage" and doesn't load the next page. We just
# ignore it.
pass
def admin_login(self, username, password, login_url='/admin/'):
"""
Helper function to log into the admin.
"""
self.selenium.get('%s%s' % (self.live_server_url, login_url))
username_input = self.selenium.find_element_by_name('username')
username_input.send_keys(username)
password_input = self.selenium.find_element_by_name('password')
password_input.send_keys(password)
login_text = _('Log in')
self.selenium.find_element_by_xpath(
'//input[@value="%s"]' % login_text).click()
self.wait_page_loaded()
def get_css_value(self, selector, attribute):
"""
Helper function that returns the value for the CSS attribute of an
DOM element specified by the given selector. Uses the jQuery that ships
with Django.
"""
return self.selenium.execute_script(
'return django.jQuery("%s").css("%s")' % (selector, attribute))
def get_select_option(self, selector, value):
"""
Returns the <OPTION> with the value `value` inside the <SELECT> widget
identified by the CSS selector `selector`.
"""
from selenium.common.exceptions import NoSuchElementException
options = self.selenium.find_elements_by_css_selector('%s > option' % selector)
for option in options:
if option.get_attribute('value') == value:
return option
raise NoSuchElementException('Option "%s" not found in "%s"' % (value, selector))
def assertSelectOptions(self, selector, values):
"""
Asserts that the <SELECT> widget identified by `selector` has the
options with the given `values`.
"""
options = self.selenium.find_elements_by_css_selector('%s > option' % selector)
actual_values = []
for option in options:
actual_values.append(option.get_attribute('value'))
self.assertEqual(values, actual_values)
def has_css_class(self, selector, klass):
"""
Returns True if the element identified by `selector` has the CSS class
`klass`.
"""
return (self.selenium.find_element_by_css_selector(selector)
.get_attribute('class').find(klass) != -1)
| bsd-3-clause |
bitmovin/bitcodin-python | examples/create_job_rotation.py | 1 | 1687 | #!/usr/bin/env python
from time import sleep
import bitcodin
bitcodin.api_key = 'YOUR API KEY'
input_obj = bitcodin.Input(url='http://bitbucketireland.s3.amazonaws.com/Sintel-original-short.mkv')
input_result = bitcodin.create_input(input_obj)
video_configs = list()
video_configs.append(bitcodin.VideoStreamConfig(
default_stream_id=0,
bitrate=4800000,
profile='Main',
preset='premium',
height=1080,
width=1920
))
video_configs.append(bitcodin.VideoStreamConfig(
default_stream_id=0,
bitrate=2400000,
profile='Main',
preset='premium',
height=768,
width=1024
))
video_configs.append(bitcodin.VideoStreamConfig(
default_stream_id=0,
bitrate=1200000,
profile='Main',
preset='premium',
height=480,
width=854
))
audio_configs = [bitcodin.AudioStreamConfig(default_stream_id=0, bitrate=192000)]
encoding_profile_obj = bitcodin.EncodingProfile(
name='API Test Profile',
video_stream_configs=video_configs,
audio_stream_configs=audio_configs,
rotation=45
)
encoding_profile_result = bitcodin.create_encoding_profile(encoding_profile_obj)
manifests = ['mpd', 'm3u8']
# Simply give a output_id to the job configuration to which the results should be copied when the job is finished.
job = bitcodin.Job(
input_id=input_result.input_id,
encoding_profile_id=encoding_profile_result.encoding_profile_id,
manifest_types=manifests
)
job_result = bitcodin.create_job(job)
while job_result.status != 'Finished' and job_result.status != 'Error':
job_result = bitcodin.get_job(job_result.job_id)
print(job_result.to_json())
sleep(5)
print(job_result.to_json())
print "Job Finished!" | unlicense |
scizzorz/rain | rain/lexer.py | 1 | 3763 | from . import error as Q
from . import module as M
from .token import bool_token
from .token import coord
from .token import dedent_token
from .token import end_token
from .token import float_token
from .token import indent_token
from .token import int_token
from .token import keyword_token
from .token import name_token
from .token import newline_token
from .token import null_token
from .token import operator_token
from .token import string_token
from .token import symbol_token
from .token import table_token
from collections import OrderedDict
import re
OPERATORS = (
'->', '::',
'<=', '>=', '>', '<', '==', '!=',
'*', '/', '+', '-',
'&', '|', '!', '$',
)
KW_OPERATORS = (
)
KEYWORDS = (
'as', 'bind', 'break', 'catch', 'continue', 'else', 'for', 'foreign', 'func',
'if', 'import', 'in', 'var', 'library', 'link', 'loop', 'macro', 'pass',
'return', 'save', 'until', 'while', 'with',
)
def factory(data, *, pos=coord()):
if data.lower() in KEYWORDS:
return keyword_token(data.lower(), pos=pos)
elif data.lower() in KW_OPERATORS:
return operator_token(data.lower(), pos=pos)
else:
return name_token(M.normalize_name(data), pos=pos)
raw = OrderedDict()
raw[r'#.*'] = None
raw[r'""|"(.*?[^\\])"'] = string_token
raw[r'(?:0|-?[1-9][0-9]*)\.[0-9]+'] = float_token
raw[r'0|-?[1-9][0-9]*'] = int_token
raw[r'true|false'] = bool_token
raw[r'null'] = null_token
raw[r'table'] = table_token
raw[r'[a-zA-Z_][a-zA-Z0-9_]*'] = factory
raw['|'.join(re.escape(x) for x in OPERATORS)] = operator_token
raw[r'.'] = symbol_token
rules = OrderedDict()
for k, v in raw.items():
rules[re.compile(k)] = v
indent = re.compile('^[ ]*')
ignore_whitespace = []
def stream(source):
indents = [0]
line = 1
col = 1
def skip(amt):
nonlocal source, col
source = source[amt:]
col += amt
last = None
while source:
if source[0] == '\n':
# skip repeated newlines
while source and source[0] == '\n':
skip(1)
col = 1
line += 1
# get this line's indentation
depth = indent.match(source)
depth_amt = len(depth.group(0))
# skip this line if it was just an indentation
if source and source[depth_amt] == '\n':
skip(1)
col = 1
line += 1
continue
# handle indents
if not ignore_whitespace:
if depth_amt > indents[-1]:
last = indent_token(pos=coord(line, col, len=depth_amt))
yield last
indents.append(depth_amt)
# handle newlines at the same indentation
else:
if not isinstance(last, (type(None), indent_token, newline_token)):
last = newline_token(pos=coord(line, col))
yield last
# handle dedents
while depth_amt < indents[-1]:
last = newline_token(pos=coord(line, col))
yield dedent_token(pos=coord(line, col))
yield last
del indents[-1]
skip(depth_amt)
if not source:
break
# skip internal whitespace
if source[0].isspace():
skip(1)
continue
# tokenize
for rule, kind in rules.items():
match = rule.match(source)
if match:
value = match.group(0)
if kind:
last = kind(value, pos=coord(line, col, len=len(value)))
if last in (symbol_token('['), symbol_token('{'), symbol_token('(')):
ignore_whitespace.append(True)
elif last in (symbol_token(']'), symbol_token('}'), symbol_token(')')):
if ignore_whitespace:
ignore_whitespace.pop()
else:
Q.abort('unmatched brace', pos=coord(line, col))
yield last
skip(len(value))
break
yield end_token(pos=coord(line, col))
| mit |
robbiet480/home-assistant | homeassistant/components/zwave/light.py | 2 | 13266 | """Support for Z-Wave lights."""
import logging
from threading import Timer
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
DOMAIN,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.util.color as color_util
from . import CONF_REFRESH_DELAY, CONF_REFRESH_VALUE, ZWaveDeviceEntity, const
_LOGGER = logging.getLogger(__name__)
COLOR_CHANNEL_WARM_WHITE = 0x01
COLOR_CHANNEL_COLD_WHITE = 0x02
COLOR_CHANNEL_RED = 0x04
COLOR_CHANNEL_GREEN = 0x08
COLOR_CHANNEL_BLUE = 0x10
# Some bulbs have an independent warm and cool white light LEDs. These need
# to be treated differently, aka the zw098 workaround. Ensure these are added
# to DEVICE_MAPPINGS below.
# (Manufacturer ID, Product ID) from
# https://github.com/OpenZWave/open-zwave/blob/master/config/manufacturer_specific.xml
AEOTEC_ZW098_LED_BULB_LIGHT = (0x86, 0x62)
AEOTEC_ZWA001_LED_BULB_LIGHT = (0x371, 0x1)
AEOTEC_ZWA002_LED_BULB_LIGHT = (0x371, 0x2)
HANK_HKZW_RGB01_LED_BULB_LIGHT = (0x208, 0x4)
ZIPATO_RGB_BULB_2_LED_BULB_LIGHT = (0x131, 0x3)
WORKAROUND_ZW098 = "zw098"
DEVICE_MAPPINGS = {
AEOTEC_ZW098_LED_BULB_LIGHT: WORKAROUND_ZW098,
AEOTEC_ZWA001_LED_BULB_LIGHT: WORKAROUND_ZW098,
AEOTEC_ZWA002_LED_BULB_LIGHT: WORKAROUND_ZW098,
HANK_HKZW_RGB01_LED_BULB_LIGHT: WORKAROUND_ZW098,
ZIPATO_RGB_BULB_2_LED_BULB_LIGHT: WORKAROUND_ZW098,
}
# Generate midpoint color temperatures for bulbs that have limited
# support for white light colors
TEMP_COLOR_MAX = 500 # mireds (inverted)
TEMP_COLOR_MIN = 154
TEMP_MID_HASS = (TEMP_COLOR_MAX - TEMP_COLOR_MIN) / 2 + TEMP_COLOR_MIN
TEMP_WARM_HASS = (TEMP_COLOR_MAX - TEMP_COLOR_MIN) / 3 * 2 + TEMP_COLOR_MIN
TEMP_COLD_HASS = (TEMP_COLOR_MAX - TEMP_COLOR_MIN) / 3 + TEMP_COLOR_MIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Z-Wave Light from Config Entry."""
@callback
def async_add_light(light):
"""Add Z-Wave Light."""
async_add_entities([light])
async_dispatcher_connect(hass, "zwave_new_light", async_add_light)
def get_device(node, values, node_config, **kwargs):
"""Create Z-Wave entity device."""
refresh = node_config.get(CONF_REFRESH_VALUE)
delay = node_config.get(CONF_REFRESH_DELAY)
_LOGGER.debug(
"node=%d value=%d node_config=%s CONF_REFRESH_VALUE=%s"
" CONF_REFRESH_DELAY=%s",
node.node_id,
values.primary.value_id,
node_config,
refresh,
delay,
)
if node.has_command_class(const.COMMAND_CLASS_SWITCH_COLOR):
return ZwaveColorLight(values, refresh, delay)
return ZwaveDimmer(values, refresh, delay)
def brightness_state(value):
"""Return the brightness and state."""
if value.data > 0:
return round((value.data / 99) * 255), STATE_ON
return 0, STATE_OFF
def byte_to_zwave_brightness(value):
"""Convert brightness in 0-255 scale to 0-99 scale.
`value` -- (int) Brightness byte value from 0-255.
"""
if value > 0:
return max(1, round((value / 255) * 99))
return 0
def ct_to_hs(temp):
"""Convert color temperature (mireds) to hs."""
colorlist = list(
color_util.color_temperature_to_hs(
color_util.color_temperature_mired_to_kelvin(temp)
)
)
return [int(val) for val in colorlist]
class ZwaveDimmer(ZWaveDeviceEntity, LightEntity):
"""Representation of a Z-Wave dimmer."""
def __init__(self, values, refresh, delay):
"""Initialize the light."""
ZWaveDeviceEntity.__init__(self, values, DOMAIN)
self._brightness = None
self._state = None
self._supported_features = None
self._delay = delay
self._refresh_value = refresh
self._zw098 = None
# Enable appropriate workaround flags for our device
# Make sure that we have values for the key before converting to int
if self.node.manufacturer_id.strip() and self.node.product_id.strip():
specific_sensor_key = (
int(self.node.manufacturer_id, 16),
int(self.node.product_id, 16),
)
if specific_sensor_key in DEVICE_MAPPINGS:
if DEVICE_MAPPINGS[specific_sensor_key] == WORKAROUND_ZW098:
_LOGGER.debug("AEOTEC ZW098 workaround enabled")
self._zw098 = 1
# Used for value change event handling
self._refreshing = False
self._timer = None
_LOGGER.debug(
"self._refreshing=%s self.delay=%s", self._refresh_value, self._delay
)
self.value_added()
self.update_properties()
def update_properties(self):
"""Update internal properties based on zwave values."""
# Brightness
self._brightness, self._state = brightness_state(self.values.primary)
def value_added(self):
"""Call when a new value is added to this entity."""
self._supported_features = SUPPORT_BRIGHTNESS
if self.values.dimming_duration is not None:
self._supported_features |= SUPPORT_TRANSITION
def value_changed(self):
"""Call when a value for this entity's node has changed."""
if self._refresh_value:
if self._refreshing:
self._refreshing = False
else:
def _refresh_value():
"""Use timer callback for delayed value refresh."""
self._refreshing = True
self.values.primary.refresh()
if self._timer is not None and self._timer.isAlive():
self._timer.cancel()
self._timer = Timer(self._delay, _refresh_value)
self._timer.start()
return
super().value_changed()
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def is_on(self):
"""Return true if device is on."""
return self._state == STATE_ON
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
def _set_duration(self, **kwargs):
"""Set the transition time for the brightness value.
Zwave Dimming Duration values:
0x00 = instant
0x01-0x7F = 1 second to 127 seconds
0x80-0xFE = 1 minute to 127 minutes
0xFF = factory default
"""
if self.values.dimming_duration is None:
if ATTR_TRANSITION in kwargs:
_LOGGER.debug("Dimming not supported by %s.", self.entity_id)
return
if ATTR_TRANSITION not in kwargs:
self.values.dimming_duration.data = 0xFF
return
transition = kwargs[ATTR_TRANSITION]
if transition <= 127:
self.values.dimming_duration.data = int(transition)
elif transition > 7620:
self.values.dimming_duration.data = 0xFE
_LOGGER.warning("Transition clipped to 127 minutes for %s.", self.entity_id)
else:
minutes = int(transition / 60)
_LOGGER.debug(
"Transition rounded to %d minutes for %s.", minutes, self.entity_id
)
self.values.dimming_duration.data = minutes + 0x7F
def turn_on(self, **kwargs):
"""Turn the device on."""
self._set_duration(**kwargs)
# Zwave multilevel switches use a range of [0, 99] to control
# brightness. Level 255 means to set it to previous value.
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
brightness = byte_to_zwave_brightness(self._brightness)
else:
brightness = 255
if self.node.set_dimmer(self.values.primary.value_id, brightness):
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
self._set_duration(**kwargs)
if self.node.set_dimmer(self.values.primary.value_id, 0):
self._state = STATE_OFF
class ZwaveColorLight(ZwaveDimmer):
"""Representation of a Z-Wave color changing light."""
def __init__(self, values, refresh, delay):
"""Initialize the light."""
self._color_channels = None
self._hs = None
self._ct = None
self._white = None
super().__init__(values, refresh, delay)
def value_added(self):
"""Call when a new value is added to this entity."""
super().value_added()
self._supported_features |= SUPPORT_COLOR
if self._zw098:
self._supported_features |= SUPPORT_COLOR_TEMP
elif self._color_channels is not None and self._color_channels & (
COLOR_CHANNEL_WARM_WHITE | COLOR_CHANNEL_COLD_WHITE
):
self._supported_features |= SUPPORT_WHITE_VALUE
def update_properties(self):
"""Update internal properties based on zwave values."""
super().update_properties()
if self.values.color is None:
return
if self.values.color_channels is None:
return
# Color Channels
self._color_channels = self.values.color_channels.data
# Color Data String
data = self.values.color.data
# RGB is always present in the openzwave color data string.
rgb = [int(data[1:3], 16), int(data[3:5], 16), int(data[5:7], 16)]
self._hs = color_util.color_RGB_to_hs(*rgb)
# Parse remaining color channels. Openzwave appends white channels
# that are present.
index = 7
# Warm white
if self._color_channels & COLOR_CHANNEL_WARM_WHITE:
warm_white = int(data[index : index + 2], 16)
index += 2
else:
warm_white = 0
# Cold white
if self._color_channels & COLOR_CHANNEL_COLD_WHITE:
cold_white = int(data[index : index + 2], 16)
index += 2
else:
cold_white = 0
# Color temperature. With the AEOTEC ZW098 bulb, only two color
# temperatures are supported. The warm and cold channel values
# indicate brightness for warm/cold color temperature.
if self._zw098:
if warm_white > 0:
self._ct = TEMP_WARM_HASS
self._hs = ct_to_hs(self._ct)
elif cold_white > 0:
self._ct = TEMP_COLD_HASS
self._hs = ct_to_hs(self._ct)
else:
# RGB color is being used. Just report midpoint.
self._ct = TEMP_MID_HASS
elif self._color_channels & COLOR_CHANNEL_WARM_WHITE:
self._white = warm_white
elif self._color_channels & COLOR_CHANNEL_COLD_WHITE:
self._white = cold_white
# If no rgb channels supported, report None.
if not (
self._color_channels & COLOR_CHANNEL_RED
or self._color_channels & COLOR_CHANNEL_GREEN
or self._color_channels & COLOR_CHANNEL_BLUE
):
self._hs = None
@property
def hs_color(self):
"""Return the hs color."""
return self._hs
@property
def white_value(self):
"""Return the white value of this light between 0..255."""
return self._white
@property
def color_temp(self):
"""Return the color temperature."""
return self._ct
def turn_on(self, **kwargs):
"""Turn the device on."""
rgbw = None
if ATTR_WHITE_VALUE in kwargs:
self._white = kwargs[ATTR_WHITE_VALUE]
if ATTR_COLOR_TEMP in kwargs:
# Color temperature. With the AEOTEC ZW098 bulb, only two color
# temperatures are supported. The warm and cold channel values
# indicate brightness for warm/cold color temperature.
if self._zw098:
if kwargs[ATTR_COLOR_TEMP] > TEMP_MID_HASS:
self._ct = TEMP_WARM_HASS
rgbw = "#000000ff00"
else:
self._ct = TEMP_COLD_HASS
rgbw = "#00000000ff"
elif ATTR_HS_COLOR in kwargs:
self._hs = kwargs[ATTR_HS_COLOR]
if ATTR_WHITE_VALUE not in kwargs:
# white LED must be off in order for color to work
self._white = 0
if (
ATTR_WHITE_VALUE in kwargs or ATTR_HS_COLOR in kwargs
) and self._hs is not None:
rgbw = "#"
for colorval in color_util.color_hs_to_RGB(*self._hs):
rgbw += format(colorval, "02x")
if self._white is not None:
rgbw += format(self._white, "02x") + "00"
else:
rgbw += "0000"
if rgbw and self.values.color:
self.values.color.data = rgbw
super().turn_on(**kwargs)
| apache-2.0 |
napkindrawing/ansible | lib/ansible/modules/database/postgresql/postgresql_db.py | 54 | 10855 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: postgresql_db
short_description: Add or remove PostgreSQL databases from a remote host.
description:
- Add or remove PostgreSQL databases from a remote host.
version_added: "0.6"
options:
name:
description:
- name of the database to add or remove
required: true
default: null
owner:
description:
- Name of the role to set as owner of the database
required: false
default: null
template:
description:
- Template used to create the database
required: false
default: null
encoding:
description:
- Encoding of the database
required: false
default: null
lc_collate:
description:
- Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template.
required: false
default: null
lc_ctype:
description:
- Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0)
is used as template.
required: false
default: null
state:
description:
- The database state
required: false
default: present
choices: [ "present", "absent" ]
author: "Ansible Core Team"
extends_documentation_fragment:
- postgres
'''
EXAMPLES = '''
# Create a new database with name "acme"
- postgresql_db:
name: acme
# Create a new database with name "acme" and specific encoding and locale
# settings. If a template different from "template0" is specified, encoding
# and locale settings must match those of the template.
- postgresql_db:
name: acme
encoding: UTF-8
lc_collate: de_DE.UTF-8
lc_ctype: de_DE.UTF-8
template: template0
'''
HAS_PSYCOPG2 = False
try:
import psycopg2
import psycopg2.extras
except ImportError:
pass
else:
HAS_PSYCOPG2 = True
from ansible.module_utils.six import iteritems
import traceback
import ansible.module_utils.postgres as pgutils
from ansible.module_utils.database import SQLParseError, pg_quote_identifier
from ansible.module_utils.basic import get_exception, AnsibleModule
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def set_owner(cursor, db, owner):
query = "ALTER DATABASE %s OWNER TO %s" % (
pg_quote_identifier(db, 'database'),
pg_quote_identifier(owner, 'role'))
cursor.execute(query)
return True
def get_encoding_id(cursor, encoding):
query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;"
cursor.execute(query, {'encoding': encoding})
return cursor.fetchone()['encoding_id']
def get_db_info(cursor, db):
query = """
SELECT rolname AS owner,
pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id,
datcollate AS lc_collate, datctype AS lc_ctype
FROM pg_database JOIN pg_roles ON pg_roles.oid = pg_database.datdba
WHERE datname = %(db)s
"""
cursor.execute(query, {'db': db})
return cursor.fetchone()
def db_exists(cursor, db):
query = "SELECT * FROM pg_database WHERE datname=%(db)s"
cursor.execute(query, {'db': db})
return cursor.rowcount == 1
def db_delete(cursor, db):
if db_exists(cursor, db):
query = "DROP DATABASE %s" % pg_quote_identifier(db, 'database')
cursor.execute(query)
return True
else:
return False
def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype):
params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype)
if not db_exists(cursor, db):
query_fragments = ['CREATE DATABASE %s' % pg_quote_identifier(db, 'database')]
if owner:
query_fragments.append('OWNER %s' % pg_quote_identifier(owner, 'role'))
if template:
query_fragments.append('TEMPLATE %s' % pg_quote_identifier(template, 'database'))
if encoding:
query_fragments.append('ENCODING %(enc)s')
if lc_collate:
query_fragments.append('LC_COLLATE %(collate)s')
if lc_ctype:
query_fragments.append('LC_CTYPE %(ctype)s')
query = ' '.join(query_fragments)
cursor.execute(query, params)
return True
else:
db_info = get_db_info(cursor, db)
if (encoding and
get_encoding_id(cursor, encoding) != db_info['encoding_id']):
raise NotSupportedError(
'Changing database encoding is not supported. '
'Current encoding: %s' % db_info['encoding']
)
elif lc_collate and lc_collate != db_info['lc_collate']:
raise NotSupportedError(
'Changing LC_COLLATE is not supported. '
'Current LC_COLLATE: %s' % db_info['lc_collate']
)
elif lc_ctype and lc_ctype != db_info['lc_ctype']:
raise NotSupportedError(
'Changing LC_CTYPE is not supported.'
'Current LC_CTYPE: %s' % db_info['lc_ctype']
)
elif owner and owner != db_info['owner']:
return set_owner(cursor, db, owner)
else:
return False
def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype):
if not db_exists(cursor, db):
return False
else:
db_info = get_db_info(cursor, db)
if (encoding and
get_encoding_id(cursor, encoding) != db_info['encoding_id']):
return False
elif lc_collate and lc_collate != db_info['lc_collate']:
return False
elif lc_ctype and lc_ctype != db_info['lc_ctype']:
return False
elif owner and owner != db_info['owner']:
return False
else:
return True
# ===========================================
# Module execution.
#
def main():
argument_spec = pgutils.postgres_common_argument_spec()
argument_spec.update(dict(
db=dict(required=True, aliases=['name']),
owner=dict(default=""),
template=dict(default=""),
encoding=dict(default=""),
lc_collate=dict(default=""),
lc_ctype=dict(default=""),
state=dict(default="present", choices=["absent", "present"]),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode = True
)
if not HAS_PSYCOPG2:
module.fail_json(msg="the python psycopg2 module is required")
db = module.params["db"]
port = module.params["port"]
owner = module.params["owner"]
template = module.params["template"]
encoding = module.params["encoding"]
lc_collate = module.params["lc_collate"]
lc_ctype = module.params["lc_ctype"]
state = module.params["state"]
sslrootcert = module.params["ssl_rootcert"]
changed = False
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host":"host",
"login_user":"user",
"login_password":"password",
"port":"port",
"ssl_mode":"sslmode",
"ssl_rootcert":"sslrootcert"
}
kw = dict( (params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != '' and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
try:
pgutils.ensure_libs(sslrootcert=module.params.get('ssl_rootcert'))
db_connection = psycopg2.connect(database="postgres", **kw)
# Enable autocommit so we can create databases
if psycopg2.__version__ >= '2.4.2':
db_connection.autocommit = True
else:
db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
except pgutils.LibraryError:
e = get_exception()
module.fail_json(msg="unable to connect to database: {0}".format(str(e)), exception=traceback.format_exc())
except TypeError:
e = get_exception()
if 'sslrootcert' in e.args[0]:
module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(e),
exception=traceback.format_exc())
module.fail_json(msg="unable to connect to database: %s" % e, exception=traceback.format_exc())
except Exception:
e = get_exception()
module.fail_json(msg="unable to connect to database: %s" % e, exception=traceback.format_exc())
try:
if module.check_mode:
if state == "absent":
changed = db_exists(cursor, db)
elif state == "present":
changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype)
module.exit_json(changed=changed, db=db)
if state == "absent":
try:
changed = db_delete(cursor, db)
except SQLParseError:
e = get_exception()
module.fail_json(msg=str(e))
elif state == "present":
try:
changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype)
except SQLParseError:
e = get_exception()
module.fail_json(msg=str(e))
except NotSupportedError:
e = get_exception()
module.fail_json(msg=str(e))
except SystemExit:
# Avoid catching this on Python 2.4
raise
except Exception:
e = get_exception()
module.fail_json(msg="Database query failed: %s" % e)
module.exit_json(changed=changed, db=db)
if __name__ == '__main__':
main()
| gpl-3.0 |
nju520/django | django/db/backends/oracle/introspection.py | 517 | 11463 | import cx_Oracle
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.utils.encoding import force_text
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type objects to Django Field types.
data_types_reverse = {
cx_Oracle.BLOB: 'BinaryField',
cx_Oracle.CLOB: 'TextField',
cx_Oracle.DATETIME: 'DateField',
cx_Oracle.FIXED_CHAR: 'CharField',
cx_Oracle.NCLOB: 'TextField',
cx_Oracle.NUMBER: 'DecimalField',
cx_Oracle.STRING: 'CharField',
cx_Oracle.TIMESTAMP: 'DateTimeField',
}
try:
data_types_reverse[cx_Oracle.NATIVE_FLOAT] = 'FloatField'
except AttributeError:
pass
try:
data_types_reverse[cx_Oracle.UNICODE] = 'CharField'
except AttributeError:
pass
cache_bust_counter = 1
def get_field_type(self, data_type, description):
# If it's a NUMBER with scale == 0, consider it an IntegerField
if data_type == cx_Oracle.NUMBER:
precision, scale = description[4:6]
if scale == 0:
if precision > 11:
return 'BigIntegerField'
elif precision == 1:
return 'BooleanField'
else:
return 'IntegerField'
elif scale == -127:
return 'FloatField'
return super(DatabaseIntrospection, self).get_field_type(data_type, description)
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
cursor.execute("SELECT TABLE_NAME, 't' FROM USER_TABLES UNION ALL "
"SELECT VIEW_NAME, 'v' FROM USER_VIEWS")
return [TableInfo(row[0].lower(), row[1]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
self.cache_bust_counter += 1
cursor.execute("SELECT * FROM {} WHERE ROWNUM < 2 AND {} > 0".format(
self.connection.ops.quote_name(table_name),
self.cache_bust_counter))
description = []
for desc in cursor.description:
name = force_text(desc[0]) # cx_Oracle always returns a 'str' on both Python 2 and 3
name = name % {} # cx_Oracle, for some reason, doubles percent signs.
description.append(FieldInfo(*(name.lower(),) + desc[1:]))
return description
def table_name_converter(self, name):
"Table name comparison is case insensitive under Oracle"
return name.lower()
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return {d[0]: i for i, d in enumerate(self.get_table_description(cursor, table_name))}
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
table_name = table_name.upper()
cursor.execute("""
SELECT ta.column_name, tb.table_name, tb.column_name
FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb,
user_tab_cols ta, user_tab_cols tb
WHERE user_constraints.table_name = %s AND
ta.table_name = user_constraints.table_name AND
ta.column_name = ca.column_name AND
ca.table_name = ta.table_name AND
user_constraints.constraint_name = ca.constraint_name AND
user_constraints.r_constraint_name = cb.constraint_name AND
cb.table_name = tb.table_name AND
cb.column_name = tb.column_name AND
ca.position = cb.position""", [table_name])
relations = {}
for row in cursor.fetchall():
relations[row[0].lower()] = (row[2].lower(), row[1].lower())
return relations
def get_key_columns(self, cursor, table_name):
cursor.execute("""
SELECT ccol.column_name, rcol.table_name AS referenced_table, rcol.column_name AS referenced_column
FROM user_constraints c
JOIN user_cons_columns ccol
ON ccol.constraint_name = c.constraint_name
JOIN user_cons_columns rcol
ON rcol.constraint_name = c.r_constraint_name
WHERE c.table_name = %s AND c.constraint_type = 'R'""", [table_name.upper()])
return [tuple(cell.lower() for cell in row)
for row in cursor.fetchall()]
def get_indexes(self, cursor, table_name):
sql = """
SELECT LOWER(uic1.column_name) AS column_name,
CASE user_constraints.constraint_type
WHEN 'P' THEN 1 ELSE 0
END AS is_primary_key,
CASE user_indexes.uniqueness
WHEN 'UNIQUE' THEN 1 ELSE 0
END AS is_unique
FROM user_constraints, user_indexes, user_ind_columns uic1
WHERE user_constraints.constraint_type (+) = 'P'
AND user_constraints.index_name (+) = uic1.index_name
AND user_indexes.uniqueness (+) = 'UNIQUE'
AND user_indexes.index_name (+) = uic1.index_name
AND uic1.table_name = UPPER(%s)
AND uic1.column_position = 1
AND NOT EXISTS (
SELECT 1
FROM user_ind_columns uic2
WHERE uic2.index_name = uic1.index_name
AND uic2.column_position = 2
)
"""
cursor.execute(sql, [table_name])
indexes = {}
for row in cursor.fetchall():
indexes[row[0]] = {'primary_key': bool(row[1]),
'unique': bool(row[2])}
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Loop over the constraints, getting PKs and uniques
cursor.execute("""
SELECT
user_constraints.constraint_name,
LOWER(cols.column_name) AS column_name,
CASE user_constraints.constraint_type
WHEN 'P' THEN 1
ELSE 0
END AS is_primary_key,
CASE user_indexes.uniqueness
WHEN 'UNIQUE' THEN 1
ELSE 0
END AS is_unique,
CASE user_constraints.constraint_type
WHEN 'C' THEN 1
ELSE 0
END AS is_check_constraint
FROM
user_constraints
INNER JOIN
user_indexes ON user_indexes.index_name = user_constraints.index_name
LEFT OUTER JOIN
user_cons_columns cols ON user_constraints.constraint_name = cols.constraint_name
WHERE
(
user_constraints.constraint_type = 'P' OR
user_constraints.constraint_type = 'U'
)
AND user_constraints.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column, pk, unique, check in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": pk,
"unique": unique,
"foreign_key": None,
"check": check,
"index": True, # All P and U come with index, see inner join above
}
# Record the details
constraints[constraint]['columns'].append(column)
# Check constraints
cursor.execute("""
SELECT
cons.constraint_name,
LOWER(cols.column_name) AS column_name
FROM
user_constraints cons
LEFT OUTER JOIN
user_cons_columns cols ON cons.constraint_name = cols.constraint_name
WHERE
cons.constraint_type = 'C' AND
cons.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": True,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Foreign key constraints
cursor.execute("""
SELECT
cons.constraint_name,
LOWER(cols.column_name) AS column_name,
LOWER(rcons.table_name),
LOWER(rcols.column_name)
FROM
user_constraints cons
INNER JOIN
user_constraints rcons ON cons.r_constraint_name = rcons.constraint_name
INNER JOIN
user_cons_columns rcols ON rcols.constraint_name = rcons.constraint_name
LEFT OUTER JOIN
user_cons_columns cols ON cons.constraint_name = cols.constraint_name
WHERE
cons.constraint_type = 'R' AND
cons.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column, other_table, other_column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": (other_table, other_column),
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
index_name,
LOWER(column_name)
FROM
user_ind_columns cols
WHERE
table_name = UPPER(%s) AND
NOT EXISTS (
SELECT 1
FROM user_constraints cons
WHERE cols.index_name = cons.index_name
)
ORDER BY cols.column_position
""", [table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": False,
"index": True,
}
# Record the details
constraints[constraint]['columns'].append(column)
return constraints
| bsd-3-clause |
mixman/djangodev | tests/regressiontests/null_fk_ordering/models.py | 34 | 1358 | """
Regression tests for proper working of ForeignKey(null=True). Tests these bugs:
* #7512: including a nullable foreign key reference in Meta ordering has un
xpected results
"""
from django.db import models
# The first two models represent a very simple null FK ordering case.
class Author(models.Model):
name = models.CharField(max_length=150)
class Article(models.Model):
title = models.CharField(max_length=150)
author = models.ForeignKey(Author, null=True)
def __unicode__(self):
return u'Article titled: %s' % (self.title, )
class Meta:
ordering = ['author__name', ]
# These following 4 models represent a far more complex ordering case.
class SystemInfo(models.Model):
system_name = models.CharField(max_length=32)
class Forum(models.Model):
system_info = models.ForeignKey(SystemInfo)
forum_name = models.CharField(max_length=32)
class Post(models.Model):
forum = models.ForeignKey(Forum, null=True)
title = models.CharField(max_length=32)
def __unicode__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey(Post, null=True)
comment_text = models.CharField(max_length=250)
class Meta:
ordering = ['post__forum__system_info__system_name', 'comment_text']
def __unicode__(self):
return self.comment_text
| bsd-3-clause |
Thraxis/pymedusa | lib/adba/aniDBlink.py | 69 | 7333 | #!/usr/bin/env python
#
# This file is part of aDBa.
#
# aDBa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# aDBa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with aDBa. If not, see <http://www.gnu.org/licenses/>.
import socket, sys, zlib
from time import time, sleep
import threading
from aniDBresponses import ResponseResolver
from aniDBerrors import *
class AniDBLink(threading.Thread):
def __init__(self, server, port, myport, logFunction, delay=2, timeout=20, logPrivate=False):
super(AniDBLink, self).__init__()
self.server = server
self.port = port
self.target = (server, port)
self.timeout = timeout
self.myport = 0
self.bound = self.connectSocket(myport, self.timeout)
self.cmd_queue = {None:None}
self.resp_tagged_queue = {}
self.resp_untagged_queue = []
self.tags = []
self.lastpacket = time()
self.delay = delay
self.session = None
self.banned = False
self.crypt = None
self.log = logFunction
self.logPrivate = logPrivate
self._stop = threading.Event()
self._quiting = False
self.setDaemon(True)
self.start()
def connectSocket(self, myport, timeout):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.settimeout(timeout)
portlist = [myport] + [7654]
for port in portlist:
try:
self.sock.bind(('', port))
except:
continue
else:
self.myport = port
return True
else:
return False
def disconnectSocket(self):
self.sock.close()
def stop (self):
self.log("Releasing socket and stopping link thread")
self._quiting = True
self.disconnectSocket()
self._stop.set()
def stopped (self):
return self._stop.isSet()
def print_log(self, data):
print data
def print_log_dummy(self, data):
pass
def run(self):
while not self._quiting:
try:
data = self.sock.recv(8192)
except socket.timeout:
self._handle_timeouts()
continue
self.log("NetIO < %s" % repr(data))
try:
for i in range(2):
try:
tmp = data
resp = None
if tmp[:2] == '\x00\x00':
tmp = zlib.decompressobj().decompress(tmp[2:])
self.log("UnZip | %s" % repr(tmp))
resp = ResponseResolver(tmp)
except:
sys.excepthook(*sys.exc_info())
self.crypt = None
self.session = None
else:
break
if not resp:
raise AniDBPacketCorruptedError, "Either decrypting, decompressing or parsing the packet failed"
cmd = self._cmd_dequeue(resp)
resp = resp.resolve(cmd)
resp.parse()
if resp.rescode in ('200', '201'):
self.session = resp.attrs['sesskey']
if resp.rescode in ('209',):
print "sorry encryption is not supported"
raise
#self.crypt=aes(md5(resp.req.apipassword+resp.attrs['salt']).digest())
if resp.rescode in ('203', '403', '500', '501', '503', '506'):
self.session = None
self.crypt = None
if resp.rescode in ('504', '555'):
self.banned = True
print "AniDB API informs that user or client is banned:", resp.resstr
resp.handle()
if not cmd or not cmd.mode:
self._resp_queue(resp)
else:
self.tags.remove(resp.restag)
except:
sys.excepthook(*sys.exc_info())
print "Avoiding flood by paranoidly panicing: Aborting link thread, killing connection, releasing waiters and quiting"
self.sock.close()
try:cmd.waiter.release()
except:pass
for tag, cmd in self.cmd_queue.iteritems():
try:cmd.waiter.release()
except:pass
sys.exit()
def _handle_timeouts(self):
willpop = []
for tag, cmd in self.cmd_queue.iteritems():
if not tag:
continue
if time() - cmd.started > self.timeout:
self.tags.remove(cmd.tag)
willpop.append(cmd.tag)
cmd.waiter.release()
for tag in willpop:
self.cmd_queue.pop(tag)
def _resp_queue(self, response):
if response.restag:
self.resp_tagged_queue[response.restag] = response
else:
self.resp_untagged_queue.append(response)
def getresponse(self, command):
if command:
resp = self.resp_tagged_queue.pop(command.tag)
else:
resp = self.resp_untagged_queue.pop()
self.tags.remove(resp.restag)
return resp
def _cmd_queue(self, command):
self.cmd_queue[command.tag] = command
self.tags.append(command.tag)
def _cmd_dequeue(self, resp):
if not resp.restag:
return None
else:
return self.cmd_queue.pop(resp.restag)
def _delay(self):
return (self.delay < 2.1 and 2.1 or self.delay)
def _do_delay(self):
age = time() - self.lastpacket
delay = self._delay()
if age <= delay:
sleep(delay - age)
def _send(self, command):
if self.banned:
self.log("NetIO | BANNED")
raise AniDBBannedError, "Not sending, banned"
self._do_delay()
self.lastpacket = time()
command.started = time()
data = command.raw_data()
self.sock.sendto(data, self.target)
if command.command == 'AUTH' and self.logPrivate:
self.log("NetIO > sensitive data is not logged!")
else:
self.log("NetIO > %s" % repr(data))
def new_tag(self):
if not len(self.tags):
maxtag = "T000"
else:
maxtag = max(self.tags)
newtag = "T%03d" % (int(maxtag[1:]) + 1)
return newtag
def request(self, command):
if not (self.session and command.session) and command.command not in ('AUTH', 'PING', 'ENCRYPT'):
raise AniDBMustAuthError, "You must be authed to execute commands besides AUTH and PING"
command.started = time()
self._cmd_queue(command)
self._send(command)
| gpl-3.0 |
dustymabe/atomic | Atomic/satellite.py | 3 | 13673 | import sys
import requests
import json
import os
import base64
from Atomic import util
try:
import ConfigParser as configparser
except ImportError: # py3 compat
import configparser
# On latest Fedora, this is a symlink
if hasattr(requests, 'packages'):
requests.packages.urllib3.disable_warnings()
else:
# But with python-requests-2.4.3-1.el7.noarch, we need
# to talk to urllib3 directly
have_urllib3 = False
try:
import urllib3
have_urllib3 = True
except ImportError as e:
pass
if have_urllib3:
# Except only call disable-warnings if it exists
if hasattr(urllib3, 'disable_warnings'):
urllib3.disable_warnings()
def push_image_to_satellite(image, server_url, username, password,
verify_ssl, docker_client, activation_key,
repo_id, debug=False):
if not image:
raise ValueError("Image required")
parts = image.split("/")
if parts > 1:
if parts[0].find(".") != -1:
server_url = parts[0]
image = ("/").join(parts[1:])
if not server_url:
raise ValueError("Satellite server url required")
if not server_url.startswith("http"):
server_url = "https://" + server_url
try:
sat = SatelliteServer(server_url=server_url, username=username,
password=password, verify_ssl=verify_ssl,
docker_client=docker_client, debug=debug)
except Exception as e:
raise IOError('Failed to initialize Satellite: {0}'.format(e))
if not sat.is_repo(repo_id):
raise IOError("""Invalid Repository ID: {0}. Please create that repository
and try again, or input a different ID.""".format(repo_id).replace('\n', ' '))
keyData = sat.get_data(repo_id, activation_key)
content_view_id = keyData.get("content_view_id")
org_id = keyData.get("org_id")
product_id = keyData.get("product_id")
try:
util.writeOut('Uploading image "{0}" to server "{1}"'.format(
image, server_url))
sat.upload_docker_image(image, repo_id)
util.writeOut("")
except Exception as e:
raise IOError('Failed to upload image: {0}'.format(e))
sat.publish_view(content_view_id, repo_id)
print("Push Complete")
class SatelliteServer(object):
"""Interact with Satellite API"""
def __init__(self, server_url, username, password, verify_ssl,
docker_client, debug=False):
self._server_url = server_url
self._username = username
self._password = password
self._verify_ssl = verify_ssl
self._docker_client = docker_client
self._web_distributor = "docker_web_distributor_name_cli"
self._export_distributor = "docker_export_distributor_name_cli"
self._importer = "docker_importer"
self._export_dir = "/var/www/pub/docker/web/"
self._unit_type_id = "docker_image"
self._chunk_size = 1048576 # 1 MB per upload call
self._debug = debug
def _call_satellite(self, url, req_type='get', payload=None,
filePayload=None):
"""This function handles requests to the Satellite Server"""
if req_type == 'get':
if (self._debug):
print('Calling Satellite URL "{0}"'.format(url))
r = requests.get(url, auth=(self._username, self._password),
verify=self._verify_ssl)
elif req_type == 'post':
if (self._debug):
print('Posting to Satellite URL "{0}"'.format(url))
if payload:
print('Satellite HTTP payload:\n{0}'.format(
json.dumps(payload, indent=2)))
r = requests.post(url, auth=(self._username, self._password),
data=json.dumps(payload),
verify=self._verify_ssl)
elif req_type == 'post-nodata':
if (self._debug):
print('Posting to Satellite URL "{0}". No data sent.'.format(
url))
header = {'Content-Type': 'application/json'}
r = requests.post(url, auth=(self._username, self._password),
headers=header, data=json.dumps(payload),
verify=self._verify_ssl)
elif req_type == 'put':
if self._debug:
print('Putting to Satellite URL "{0}"'.format(url))
r = requests.put(url, auth=(self._username, self._password),
data=payload, verify=self._verify_ssl)
elif req_type == 'put-jsonHead':
if self._debug:
print('Putting with json header to Satellite URL "{0}"'
.format(url))
header = {'Content-Type': 'application/json'}
r = requests.put(url, auth=(self._username, self._password),
headers=header, data=json.dumps(payload),
verify=self._verify_ssl)
elif req_type == 'put-multi-part':
if self._debug:
print('Multi-Part Putting to Satellite URL "{0}"'.format(url))
header = {
'multipart': True,
'accept': 'application/json;version=2',
'content-type': 'multipart/form-data'
}
r = requests.put(url, auth=(self._username, self._password),
headers=header, data=payload,
verify=self._verify_ssl)
elif req_type == 'delete':
if self._debug:
print('Delete call to Satellite URL "{0}"'.format(url))
header = {'Content-Type': 'application/json'}
r = requests.delete(url, auth=(self._username, self._password),
headers=header, verify=self._verify_ssl)
else:
raise ValueError('Invalid value of "req_type" parameter: {0}'
.format(req_type))
if self._debug:
print(r)
try:
r_json = r.json()
except Exception as e:
# some requests don't return a json object
return None
if ('errors' in r_json):
sys.stderr.write('Error message from Satellite response:\n{0}\n'
.format(r_json['errors']))
if 'spawned_tasks' in r_json:
for task in r_json['spawned_tasks']:
if self._debug:
print('Checking status of spawned task {0}'.format(
task['task_id']))
self._call_satellite('{0}/{1}'.format(self._server_url,
task['_href']))
return r_json
# It looks like, while we can't actually get the activation key
# from the text on the satellite page, it is in the URL of the
# page. Same for repo id with repo page. As in,
# https://sat6-atomic.refarch.bos.redhat.com/activation_keys/
# {The activation key number is here}/info
def get_data(self, repo_id, activation_key):
url = '{0}/katello/api/repositories/{1}'.format(
self._server_url, repo_id)
r_json = self._call_satellite(url)
keyData = {}
keyData['org_id'] = r_json.get("organization").get("id")
keyData['product_id'] = r_json.get("product").get("id")
url = '{0}/katello/api/activation_keys/{1}'.format(
self._server_url, activation_key)
r2_json = self._call_satellite(url)
keyData['content_view_id'] = r2_json.get("content_view_id")
if self._debug:
print("key data is {0}".format(keyData))
return keyData
@property
def status(self):
"""Return Satellite server status"""
if self._debug:
print('Verifying Satellite server status')
return self._call_satellite('{0}/api/v2/status/'.format(
self._server_url))
def is_repo(self, repo_id):
"""Return true if repo exists"""
url = '{0}/katello/api/repositories/{1}'.format(
self._server_url, repo_id)
if self._debug:
print('Verifying satellite repository "{0}"'.format(repo_id))
r_json = self._call_satellite(url)
if int(repo_id) == r_json.get('id'):
if self._debug:
print("Yes it was a repo")
return True
else:
return False
def _upload_id(self, repo_id):
"""Get a satellite upload ID"""
url = '{0}/katello/api/repositories/{1}/content_uploads'.format(
self._server_url, repo_id)
r_json = self._call_satellite(url, "post-nodata")
if 'error' in r_json:
raise Exception('Unable to get a satellite upload ID')
return r_json.get('upload_id')
def upload_docker_image(self, image, repo_id):
"""Upload image to repository"""
if self._debug:
print("Getting an upload id")
upload_id = self._upload_id(repo_id)
if self._debug:
print('Uploading image using ID "{0}"'.format(upload_id))
print('\nUploading image "{0}"'.format(image))
self._upload_docker_image(image, repo_id, upload_id)
self._import_upload(upload_id, repo_id)
self._delete_upload_id(upload_id, repo_id)
def _upload_docker_image(self, image, repo_id, upload_id):
if self._debug:
print("Beginning to upload the image")
offset = 0
image_stream = self._docker_client.get_image(image)
while True:
content = image_stream.read(self._chunk_size)
if not content:
break
url = "{0}/katello/api/repositories/{1}/content_uploads/{2}".format(self._server_url, repo_id, upload_id)
sys.stdout.flush()
sys.stdout.write(".")
payload = {
'offset': offset,
'content': content
}
r_json = self._call_satellite(url, "put-multi-part", payload)
if (r_json is not None):
if ('errors' in r_json):
raise Exception("Unable to upload image. Error:{0}"
.format(r_json.get("errors")))
offset += self._chunk_size
image_stream.close()
if self._debug:
print("Finished uploading the image data")
def _delete_upload_id(self, upload_id, repo_id):
"""Delete upload request ID"""
delete_url = "{0}/katello/api/repositories/{1}/content_uploads/{2}".format(self._server_url, repo_id, upload_id)
self._call_satellite(delete_url, "delete")
if self._debug:
print("Successful Deletion")
def _import_upload(self, upload_id, repo_id):
"""Import uploaded content"""
url = '{0}/katello/api/repositories/{1}/import_uploads'.format(
self._server_url, repo_id)
if self._debug:
print('Importing satellite upload {0} into {1}'.format(
upload_id, repo_id))
payload = {
"upload_ids": [upload_id]
# may need to make the id into a string. Unclear.
}
r_json = self._call_satellite(url, "put-jsonHead", payload)
if (r_json is not None):
if ('errors' in r_json):
raise Exception('Unable to import satellite content into {0}'
.format(repo_id))
def publish_view(self, content_id, repo_id):
"""Publish satellite repository to satellite web server"""
url = '{0}/katello/api/content_views/{1}/publish/'.format(
self._server_url, content_id)
r_json = self._call_satellite(url, "post-nodata")
if (r_json is not None):
if ('errors' in r_json):
raise Exception('Unable to publish satellite repo "{0}"'
.format(repo_id))
class SatelliteConfig(object):
def __init__(self):
self.c = configparser.ConfigParser()
self.config_file = os.path.expanduser("~/.satellite/admin.conf")
self.c.read(self.config_file)
self.url = self._get("server", "url")
self.username = self._get("auth", "username")
self.password = self._get("auth", "password")
self.verify_ssl = self._getboolean("server", "verify_ssl")
"""
Satellite configuration file [optional]:
1. look in (or create) ~/.satellite/admin.conf
configuration contents:
[server]
host = <satellite-server-hostname.example.com>
verify_ssl = false
# optional auth section
[auth]
username: <user>
password: <pass>
"""
def _get(self, section, val):
try:
return self.c.get(section, val)
except (configparser.NoSectionError, configparser.NoOptionError):
return None
except ValueError as e:
raise ValueError("Satellite Bad Value for {0} in {1}. {2}".format(
val, self.config_file, e))
def _getboolean(self, section, val):
try:
return self.c.getboolean(section, val)
except (configparser.NoSectionError, configparser.NoOptionError):
return True
except ValueError as e:
raise ValueError("Satellite Bad Value for {0} in {1}. {2}".format(
val, self.config_file, e))
def config(self):
return {"url": self.url, "verify_ssl": self.verify_ssl, "username": self.username, "password": self.password}
| lgpl-2.1 |
hirunatan/anillo | tests/middlewares/test_sessions.py | 3 | 1121 | # -*- coding: utf-8 -*-
"""
tests.middlwares.test_session
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests session middleware
:copyright: (c) 2015 by Jesús Espino.
:license: BSD, see LICENSE for more details.
"""
from anillo.middlewares.cookies import wrap_cookies
from anillo.middlewares.session import wrap_session
from anillo.http.request import Request
from anillo.http.responses import Response
@wrap_cookies
@wrap_session
def session_app(request):
request.session["test"] = request.session.get("test", 0) + 1
response = Response(request.session["test"])
return response
def test_session_without_cookie():
request = Request()
response = session_app(request)
assert response.body == 1
response = session_app(request)
assert response.body == 1
def test_session_with_cookie():
request = Request()
response = session_app(request)
assert response.body == 1
session_id = response.cookies['session-id']['value']
request = Request(headers={"Cookie": "session-id={}".format(session_id)})
response = session_app(request)
assert response.body == 2
| bsd-2-clause |
grantvk/aima-python | submissions/Hess/vacuum2.py | 18 | 1142 | import agents as ag
def HW2Agent() -> object:
def program(percept):
bump, status = percept
if status == 'Dirty':
action = 'Suck'
else:
lastBump, lastStatus = program.oldPercepts[-1]
lastAction = program.oldActions[-1]
if lastAction == 'Suck' :
action = program.oldActions[-2]
elif (lastAction == 'Right' and bump == 'None'):
action = 'Right'
elif (lastAction == 'Right' and bump == 'Bump'):
action = 'Left'
elif (lastAction == 'Left' and bump == 'None') :
action ='Left'
elif (lastAction == 'Left' and bump == 'Bump') :
action = 'Right'
else:
action = 'Left'
program.oldPercepts.append(percept)
program.oldActions.append(action)
return action
# assign static variables here
program.oldPercepts = [('None', 'Clean')]
program.oldActions = ['Left']
agt = ag.Agent(program)
# assign class attributes here:
# agt.direction = ag.Direction('left')
return agt | mit |
ueg1990/flask-restful | tests/test_cors.py | 19 | 2247 | import unittest
from flask import Flask
import flask_restful
from flask_restful.utils import cors
from nose.tools import assert_equals, assert_true
class CORSTestCase(unittest.TestCase):
def test_crossdomain(self):
class Foo(flask_restful.Resource):
@cors.crossdomain(origin='*')
def get(self):
return "data"
app = Flask(__name__)
api = flask_restful.Api(app)
api.add_resource(Foo, '/')
with app.test_client() as client:
res = client.get('/')
assert_equals(res.status_code, 200)
assert_equals(res.headers['Access-Control-Allow-Origin'], '*')
assert_equals(res.headers['Access-Control-Max-Age'], '21600')
assert_true('HEAD' in res.headers['Access-Control-Allow-Methods'])
assert_true('OPTIONS' in res.headers['Access-Control-Allow-Methods'])
assert_true('GET' in res.headers['Access-Control-Allow-Methods'])
def test_access_control_expose_headers(self):
class Foo(flask_restful.Resource):
@cors.crossdomain(origin='*',
expose_headers=['X-My-Header', 'X-Another-Header'])
def get(self):
return "data"
app = Flask(__name__)
api = flask_restful.Api(app)
api.add_resource(Foo, '/')
with app.test_client() as client:
res = client.get('/')
assert_equals(res.status_code, 200)
assert_true('X-MY-HEADER' in res.headers['Access-Control-Expose-Headers'])
assert_true('X-ANOTHER-HEADER' in res.headers['Access-Control-Expose-Headers'])
def test_no_crossdomain(self):
class Foo(flask_restful.Resource):
def get(self):
return "data"
app = Flask(__name__)
api = flask_restful.Api(app)
api.add_resource(Foo, '/')
with app.test_client() as client:
res = client.get('/')
assert_equals(res.status_code, 200)
assert_true('Access-Control-Allow-Origin' not in res.headers)
assert_true('Access-Control-Allow-Methods' not in res.headers)
assert_true('Access-Control-Max-Age' not in res.headers)
| bsd-3-clause |
mibanescu/pulp | server/pulp/server/async/app.py | 3 | 2642 | """
This module is the Pulp Celery App. It is passed to the workers from the command line, and they
will see the "celery" module attribute and use it. This module also initializes the Pulp app after
Celery setup finishes.
"""
from celery.signals import celeryd_after_setup
# This import will load our configs
from pulp.server import config # noqa
from pulp.server import initialization
# We need this import so that the Celery setup_logging signal gets registered
from pulp.server import logs # noqa
from pulp.server.async import tasks
# This import is here so that Celery will find our application instance
from pulp.server.async.celery_instance import celery # noqa
from pulp.server.managers.repo import _common as common_utils
@celeryd_after_setup.connect
def initialize_worker(sender, instance, **kwargs):
"""
This function performs all the necessary initialization of the Celery worker.
It starts by cleaning up old state if this worker was previously running, but died unexpectedly.
In such cases, any Pulp tasks that were running or waiting on this worker will show incorrect
state. Any reserved_resource reservations associated with the previous worker will also be
removed along with the worker entry in the database itself. The working directory specified in
/etc/pulp/server.conf (/var/cache/pulp/<worker_name>) by default is removed and recreated. This
is called early in the worker start process, and later when it's fully online, pulp_celerybeat
will discover the worker as usual to allow new work to arrive at this worker. If there is no
previous work to cleanup, this method still runs, but has no effect on the database.
After cleaning up old state, it ensures the existence of the worker's working directory.
Lastly, this function makes the call to Pulp's initialization code.
It uses the celeryd_after_setup signal[0] so that it gets called by Celery after logging is
initialized, but before Celery starts to run tasks.
[0] http://celery.readthedocs.org/en/latest/userguide/signals.html#celeryd-after-setup
:param sender: The hostname of the worker
:type sender: basestring
:param instance: The Worker instance to be initialized (unused)
:type instance: celery.apps.worker.Worker
:param kwargs: Other params (unused)
:type kwargs: dict
"""
initialization.initialize()
tasks._delete_worker(sender, normal_shutdown=True)
# Create a new working directory for worker that is starting now
common_utils.delete_worker_working_directory(sender)
common_utils.create_worker_working_directory(sender)
| gpl-2.0 |
saydulk/django | django/contrib/sessions/backends/base.py | 298 | 12046 | from __future__ import unicode_literals
import base64
import logging
import string
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.sessions.exceptions import SuspiciousSession
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.crypto import (
constant_time_compare, get_random_string, salted_hmac,
)
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
# session_key should not be case sensitive because some backends can store it
# on case insensitive file systems.
VALID_KEY_CHARS = string.ascii_lowercase + string.digits
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
self.serializer = import_string(settings.SESSION_SERIALIZER)
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, default=None):
self.modified = self.modified or key in self._session
return self._session.pop(key, default)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Returns the given session dictionary serialized and encoded as a string."
serialized = self.serializer().dumps(session_dict)
hash = self._hash(serialized)
return base64.b64encode(hash.encode() + b":" + serialized).decode('ascii')
def decode(self, session_data):
encoded_data = base64.b64decode(force_bytes(session_data))
try:
# could produce ValueError if there is no ':'
hash, serialized = encoded_data.split(b':', 1)
expected_hash = self._hash(serialized)
if not constant_time_compare(hash.decode(), expected_hash):
raise SuspiciousSession("Session data corrupted")
else:
return self.serializer().loads(serialized)
except Exception as e:
# ValueError, SuspiciousOperation, unpickling exceptions. If any of
# these happen, just return an empty dictionary (an empty session).
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return key in self._session
def keys(self):
return self._session.keys()
def values(self):
return self._session.values()
def items(self):
return self._session.items()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def is_empty(self):
"Returns True when there is no session_key and the session is empty"
try:
return not bool(self._session_key) and not self._session_cache
except AttributeError:
return True
def _get_new_session_key(self):
"Returns session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not self.exists(session_key):
break
return session_key
def _get_or_create_session_key(self):
if self._session_key is None:
self._session_key = self._get_new_session_key()
return self._session_key
def _validate_session_key(self, key):
"""
Key must be truthy and at least 8 characters long. 8 characters is an
arbitrary lower bound for some minimal key security.
"""
return key and len(key) >= 8
def _get_session_key(self):
return self.__session_key
def _set_session_key(self, value):
"""
Validate session key on assignment. Invalid values will set to None.
"""
if self._validate_session_key(value):
self.__session_key = value
else:
self.__session_key = None
session_key = property(_get_session_key)
_session_key = property(_get_session_key, _set_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self.session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self, **kwargs):
"""Get the number of seconds until the session expires.
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Make the difference between "expiry=None passed in kwargs" and
# "expiry not passed in kwargs", in order to guarantee not to trigger
# self.load() when expiry is provided.
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - modification
return delta.days * 86400 + delta.seconds
def get_expiry_date(self, **kwargs):
"""Get session the expiry date (as a datetime object).
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Same comment as in get_expiry_age
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return modification + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = timezone.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self._session_key = None
def cycle_key(self):
"""
Creates a new session key, whilst retaining the current session data.
"""
data = self._session_cache
key = self.session_key
self.create()
self._session_cache = data
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError('subclasses of SessionBase must provide an exists() method')
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError('subclasses of SessionBase must provide a create() method')
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() can update an existing object with the same key.
"""
raise NotImplementedError('subclasses of SessionBase must provide a save() method')
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError('subclasses of SessionBase must provide a delete() method')
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError('subclasses of SessionBase must provide a load() method')
@classmethod
def clear_expired(cls):
"""
Remove expired sessions from the session store.
If this operation isn't possible on a given backend, it should raise
NotImplementedError. If it isn't necessary, because the backend has
a built-in expiration mechanism, it should be a no-op.
"""
raise NotImplementedError('This backend does not support clear_expired().')
| bsd-3-clause |
redhat-openstack/cinder | cinder/tests/test_vmware_vmdk.py | 2 | 126862 | # Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for VMware VMDK driver.
"""
from distutils.version import LooseVersion
import os
import mock
import mox
from cinder import exception
from cinder.image import glance
from cinder.openstack.common import units
from cinder import test
from cinder.volume import configuration
from cinder.volume.drivers.vmware import api
from cinder.volume.drivers.vmware import datastore as hub
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim
from cinder.volume.drivers.vmware import vim_util
from cinder.volume.drivers.vmware import vmdk
from cinder.volume.drivers.vmware import vmware_images
from cinder.volume.drivers.vmware import volumeops
class FakeVim(object):
@property
def service_content(self):
return mox.MockAnything()
@property
def client(self):
return mox.MockAnything()
def Login(self, session_manager, userName, password):
return mox.MockAnything()
def Logout(self, session_manager):
pass
def TerminateSession(self, session_manager, sessionId):
pass
def SessionIsActive(self, session_manager, sessionID, userName):
pass
class FakeTaskInfo(object):
def __init__(self, state, result=None):
self.state = state
self.result = result
class FakeError(object):
def __init__(self):
self.localizedMessage = None
self.error = FakeError()
class FakeMor(object):
def __init__(self, type, val):
self._type = type
self.value = val
class FakeObject(object):
def __init__(self):
self._fields = {}
def __setitem__(self, key, value):
self._fields[key] = value
def __getitem__(self, item):
return self._fields[item]
class FakeManagedObjectReference(object):
def __init__(self, lis=None):
self.ManagedObjectReference = lis or []
class FakeDatastoreSummary(object):
def __init__(self, freeSpace, capacity, datastore=None, name=None):
self.freeSpace = freeSpace
self.capacity = capacity
self.datastore = datastore
self.name = name
class FakeSnapshotTree(object):
def __init__(self, tree=None, name=None,
snapshot=None, childSnapshotList=None):
self.rootSnapshotList = tree
self.name = name
self.snapshot = snapshot
self.childSnapshotList = childSnapshotList
class FakeElem(object):
def __init__(self, prop_set=None):
self.propSet = prop_set
class FakeProp(object):
def __init__(self, name=None, val=None):
self.name = name
self.val = val
class FakeRetrieveResult(object):
def __init__(self, objects, token):
self.objects = objects
self.token = token
class FakeObj(object):
def __init__(self, obj=None):
self.obj = obj
class VMwareEsxVmdkDriverTestCase(test.TestCase):
"""Test class for VMwareEsxVmdkDriver."""
IP = 'localhost'
USERNAME = 'username'
PASSWORD = 'password'
VOLUME_FOLDER = 'cinder-volumes'
API_RETRY_COUNT = 3
TASK_POLL_INTERVAL = 5.0
IMG_TX_TIMEOUT = 10
MAX_OBJECTS = 100
TMP_DIR = "/vmware-tmp"
VMDK_DRIVER = vmdk.VMwareEsxVmdkDriver
def setUp(self):
super(VMwareEsxVmdkDriverTestCase, self).setUp()
self._config = mox.MockObject(configuration.Configuration)
self._config.append_config_values(mox.IgnoreArg())
self._config.vmware_host_ip = self.IP
self._config.vmware_host_username = self.USERNAME
self._config.vmware_host_password = self.PASSWORD
self._config.vmware_wsdl_location = None
self._config.vmware_volume_folder = self.VOLUME_FOLDER
self._config.vmware_api_retry_count = self.API_RETRY_COUNT
self._config.vmware_task_poll_interval = self.TASK_POLL_INTERVAL
self._config.vmware_image_transfer_timeout_secs = self.IMG_TX_TIMEOUT
self._config.vmware_max_objects_retrieval = self.MAX_OBJECTS
self._config.vmware_tmp_dir = self.TMP_DIR
self._db = mock.Mock()
self._driver = vmdk.VMwareEsxVmdkDriver(configuration=self._config,
db=self._db)
api_retry_count = self._config.vmware_api_retry_count,
task_poll_interval = self._config.vmware_task_poll_interval,
self._session = api.VMwareAPISession(self.IP, self.USERNAME,
self.PASSWORD, api_retry_count,
task_poll_interval,
create_session=False)
self._volumeops = volumeops.VMwareVolumeOps(self._session,
self.MAX_OBJECTS)
self._vim = FakeVim()
def test_retry(self):
"""Test Retry."""
class TestClass(object):
def __init__(self):
self.counter1 = 0
self.counter2 = 0
@api.Retry(max_retry_count=2, inc_sleep_time=0.001,
exceptions=(Exception))
def fail(self):
self.counter1 += 1
raise exception.CinderException('Fail')
@api.Retry(max_retry_count=2)
def success(self):
self.counter2 += 1
return self.counter2
test_obj = TestClass()
self.assertRaises(exception.CinderException, test_obj.fail)
self.assertEqual(test_obj.counter1, 3)
ret = test_obj.success()
self.assertEqual(1, ret)
def test_create_session(self):
"""Test create_session."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.ReplayAll()
self._session.create_session()
m.UnsetStubs()
m.VerifyAll()
def test_do_setup(self):
"""Test do_setup."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'session')
self._driver.session = self._session
m.ReplayAll()
self._driver.do_setup(mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_check_for_setup_error(self):
"""Test check_for_setup_error."""
self._driver.check_for_setup_error()
def test_get_volume_stats(self):
"""Test get_volume_stats."""
stats = self._driver.get_volume_stats()
self.assertEqual(stats['vendor_name'], 'VMware')
self.assertEqual(stats['driver_version'], self._driver.VERSION)
self.assertEqual(stats['storage_protocol'], 'LSI Logic SCSI')
self.assertEqual(stats['reserved_percentage'], 0)
self.assertEqual(stats['total_capacity_gb'], 'unknown')
self.assertEqual(stats['free_capacity_gb'], 'unknown')
def test_create_volume(self):
"""Test create_volume."""
driver = self._driver
host = mock.sentinel.host
rp = mock.sentinel.resource_pool
folder = mock.sentinel.folder
summary = mock.sentinel.summary
driver._select_ds_for_volume = mock.MagicMock()
driver._select_ds_for_volume.return_value = (host, rp, folder,
summary)
# invoke the create_volume call
volume = {'name': 'fake_volume'}
driver.create_volume(volume)
# verify calls made
driver._select_ds_for_volume.assert_called_once_with(volume)
# test create_volume call when _select_ds_for_volume fails
driver._select_ds_for_volume.side_effect = error_util.VimException('')
self.assertRaises(error_util.VimFaultException, driver.create_volume,
volume)
# Clear side effects.
driver._select_ds_for_volume.side_effect = None
def test_success_wait_for_task(self):
"""Test successful wait_for_task."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
result = FakeMor('VirtualMachine', 'my_vm')
success_task_info = FakeTaskInfo('success', result=result)
m.StubOutWithMock(vim_util, 'get_object_property')
vim_util.get_object_property(self._session.vim,
mox.IgnoreArg(),
'info').AndReturn(success_task_info)
m.ReplayAll()
ret = self._session.wait_for_task(mox.IgnoreArg())
self.assertEqual(ret.result, result)
m.UnsetStubs()
m.VerifyAll()
def test_failed_wait_for_task(self):
"""Test failed wait_for_task."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
failed_task_info = FakeTaskInfo('failed')
m.StubOutWithMock(vim_util, 'get_object_property')
vim_util.get_object_property(self._session.vim,
mox.IgnoreArg(),
'info').AndReturn(failed_task_info)
m.ReplayAll()
self.assertRaises(error_util.VimFaultException,
self._session.wait_for_task,
mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_delete_volume_without_backing(self):
"""Test delete_volume without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
self._volumeops.get_backing('hello_world').AndReturn(None)
m.ReplayAll()
volume = FakeObject()
volume['name'] = 'hello_world'
self._driver.delete_volume(volume)
m.UnsetStubs()
m.VerifyAll()
def test_delete_volume_with_backing(self):
"""Test delete_volume with backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
backing = FakeMor('VirtualMachine', 'my_vm')
FakeMor('Task', 'my_task')
m.StubOutWithMock(self._volumeops, 'get_backing')
m.StubOutWithMock(self._volumeops, 'delete_backing')
self._volumeops.get_backing('hello_world').AndReturn(backing)
self._volumeops.delete_backing(backing)
m.ReplayAll()
volume = FakeObject()
volume['name'] = 'hello_world'
self._driver.delete_volume(volume)
m.UnsetStubs()
m.VerifyAll()
def test_create_export(self):
"""Test create_export."""
self._driver.create_export(mox.IgnoreArg(), mox.IgnoreArg())
def test_ensure_export(self):
"""Test ensure_export."""
self._driver.ensure_export(mox.IgnoreArg(), mox.IgnoreArg())
def test_remove_export(self):
"""Test remove_export."""
self._driver.remove_export(mox.IgnoreArg(), mox.IgnoreArg())
def test_terminate_connection(self):
"""Test terminate_connection."""
self._driver.terminate_connection(mox.IgnoreArg(), mox.IgnoreArg(),
force=mox.IgnoreArg())
def test_create_backing_in_inventory_multi_hosts(self):
"""Test _create_backing_in_inventory scanning multiple hosts."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
host1 = FakeObj(obj=FakeMor('HostSystem', 'my_host1'))
host2 = FakeObj(obj=FakeMor('HostSystem', 'my_host2'))
retrieve_result = FakeRetrieveResult([host1, host2], None)
m.StubOutWithMock(self._volumeops, 'get_hosts')
self._volumeops.get_hosts().AndReturn(retrieve_result)
m.StubOutWithMock(self._driver, '_create_backing')
volume = FakeObject()
volume['name'] = 'vol_name'
backing = FakeMor('VirtualMachine', 'my_back')
mux = self._driver._create_backing(volume, host1.obj, {})
mux.AndRaise(error_util.VimException('Maintenance mode'))
mux = self._driver._create_backing(volume, host2.obj, {})
mux.AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'cancel_retrieval')
self._volumeops.cancel_retrieval(retrieve_result)
m.StubOutWithMock(self._volumeops, 'continue_retrieval')
m.ReplayAll()
result = self._driver._create_backing_in_inventory(volume)
self.assertEqual(result, backing)
m.UnsetStubs()
m.VerifyAll()
def test_init_conn_with_instance_and_backing(self):
"""Test initialize_connection with instance and backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
volume['size'] = 1
connector = {'instance': 'my_instance'}
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(volume['name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'get_host')
host = FakeMor('HostSystem', 'my_host')
self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
def test_get_volume_group_folder(self):
"""Test _get_volume_group_folder."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
datacenter = FakeMor('Datacenter', 'my_dc')
m.StubOutWithMock(self._volumeops, 'get_vmfolder')
self._volumeops.get_vmfolder(datacenter)
m.ReplayAll()
self._driver._get_volume_group_folder(datacenter)
m.UnsetStubs()
m.VerifyAll()
def test_select_datastore_summary(self):
"""Test _select_datastore_summary."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
datastore1 = FakeMor('Datastore', 'my_ds_1')
datastore2 = FakeMor('Datastore', 'my_ds_2')
datastore3 = FakeMor('Datastore', 'my_ds_3')
datastore4 = FakeMor('Datastore', 'my_ds_4')
datastores = [datastore1, datastore2, datastore3, datastore4]
m.StubOutWithMock(self._volumeops, 'get_summary')
summary1 = FakeDatastoreSummary(5, 100)
summary2 = FakeDatastoreSummary(25, 100)
summary3 = FakeDatastoreSummary(50, 100)
summary4 = FakeDatastoreSummary(75, 100)
self._volumeops.get_summary(
datastore1).MultipleTimes().AndReturn(summary1)
self._volumeops.get_summary(
datastore2).MultipleTimes().AndReturn(summary2)
self._volumeops.get_summary(
datastore3).MultipleTimes().AndReturn(summary3)
self._volumeops.get_summary(
datastore4).MultipleTimes().AndReturn(summary4)
m.StubOutWithMock(self._volumeops, 'get_connected_hosts')
host1 = FakeMor('HostSystem', 'my_host_1')
host2 = FakeMor('HostSystem', 'my_host_2')
host3 = FakeMor('HostSystem', 'my_host_3')
host4 = FakeMor('HostSystem', 'my_host_4')
self._volumeops.get_connected_hosts(
datastore1).MultipleTimes().AndReturn([host1, host2, host3, host4])
self._volumeops.get_connected_hosts(
datastore2).MultipleTimes().AndReturn([host1, host2, host3])
self._volumeops.get_connected_hosts(
datastore3).MultipleTimes().AndReturn([host1, host2])
self._volumeops.get_connected_hosts(
datastore4).MultipleTimes().AndReturn([host1, host2])
m.ReplayAll()
summary = self._driver._select_datastore_summary(1, datastores)
self.assertEqual(summary, summary1)
summary = self._driver._select_datastore_summary(10, datastores)
self.assertEqual(summary, summary2)
summary = self._driver._select_datastore_summary(40, datastores)
self.assertEqual(summary, summary4)
self.assertRaises(error_util.VimException,
self._driver._select_datastore_summary,
100, datastores)
m.UnsetStubs()
m.VerifyAll()
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'session', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_get_folder_ds_summary(self, volumeops, session):
"""Test _get_folder_ds_summary."""
volumeops = volumeops.return_value
driver = self._driver
volume = {'size': 10, 'volume_type_id': 'fake_type'}
rp = mock.sentinel.resource_pool
dss = mock.sentinel.datastores
# patch method calls from _get_folder_ds_summary
volumeops.get_dc.return_value = mock.sentinel.dc
volumeops.get_vmfolder.return_value = mock.sentinel.folder
driver._get_storage_profile = mock.MagicMock()
driver._select_datastore_summary = mock.MagicMock()
driver._select_datastore_summary.return_value = mock.sentinel.summary
# call _get_folder_ds_summary
(folder, datastore_summary) = driver._get_folder_ds_summary(volume,
rp, dss)
# verify returned values and calls made
self.assertEqual(mock.sentinel.folder, folder,
"Folder returned is wrong.")
self.assertEqual(mock.sentinel.summary, datastore_summary,
"Datastore summary returned is wrong.")
volumeops.get_dc.assert_called_once_with(rp)
volumeops.get_vmfolder.assert_called_once_with(mock.sentinel.dc)
driver._get_storage_profile.assert_called_once_with(volume)
size = volume['size'] * units.Gi
driver._select_datastore_summary.assert_called_once_with(size, dss)
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
def test_get_disk_type(self, get_volume_type_extra_specs):
"""Test _get_disk_type."""
# Test with no volume type.
volume = {'volume_type_id': None}
self.assertEqual(vmdk.THIN_VMDK_TYPE,
vmdk.VMwareEsxVmdkDriver._get_disk_type(volume))
# Test with valid vmdk_type.
volume_type_id = mock.sentinel.volume_type_id
volume = {'volume_type_id': volume_type_id}
get_volume_type_extra_specs.return_value = vmdk.THICK_VMDK_TYPE
self.assertEqual(vmdk.THICK_VMDK_TYPE,
vmdk.VMwareEsxVmdkDriver._get_disk_type(volume))
get_volume_type_extra_specs.assert_called_once_with(volume_type_id,
'vmware:vmdk_type')
# Test with invalid vmdk_type.
get_volume_type_extra_specs.return_value = 'sparse'
self.assertRaises(error_util.InvalidDiskTypeException,
vmdk.VMwareEsxVmdkDriver._get_disk_type,
volume)
def test_init_conn_with_instance_no_backing(self):
"""Test initialize_connection with instance and without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
volume['size'] = 1
volume['volume_type_id'] = None
connector = {'instance': 'my_instance'}
self._volumeops.get_backing(volume['name'])
m.StubOutWithMock(self._volumeops, 'get_host')
host = FakeMor('HostSystem', 'my_host')
self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
m.StubOutWithMock(self._volumeops, 'get_dss_rp')
resource_pool = FakeMor('ResourcePool', 'my_rp')
datastores = [FakeMor('Datastore', 'my_ds')]
self._volumeops.get_dss_rp(host).AndReturn((datastores, resource_pool))
m.StubOutWithMock(self._driver, '_get_folder_ds_summary')
folder = FakeMor('Folder', 'my_fol')
summary = FakeDatastoreSummary(1, 1)
self._driver._get_folder_ds_summary(volume, resource_pool,
datastores).AndReturn((folder,
summary))
backing = FakeMor('VirtualMachine', 'my_back')
m.StubOutWithMock(self._volumeops, 'create_backing')
self._volumeops.create_backing(volume['name'],
volume['size'] * units.Mi,
mox.IgnoreArg(), folder,
resource_pool, host,
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(backing)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
def test_init_conn_without_instance(self):
"""Test initialize_connection without instance and a backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
backing = FakeMor('VirtualMachine', 'my_back')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
connector = {}
self._volumeops.get_backing(volume['name']).AndReturn(backing)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
def test_create_snapshot_without_backing(self):
"""Test vmdk.create_snapshot without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snap_name'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
self._volumeops.get_backing(snapshot['volume_name'])
m.ReplayAll()
self._driver.create_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_create_snapshot_with_backing(self):
"""Test vmdk.create_snapshot with backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snapshot_name'
snapshot['display_description'] = 'snapshot_desc'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'create_snapshot')
self._volumeops.create_snapshot(backing, snapshot['name'],
snapshot['display_description'])
m.ReplayAll()
self._driver.create_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_create_snapshot_when_attached(self):
"""Test vmdk.create_snapshot when volume is attached."""
snapshot = FakeObject()
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'in-use'
self.assertRaises(exception.InvalidVolume,
self._driver.create_snapshot, snapshot)
def test_delete_snapshot_without_backing(self):
"""Test delete_snapshot without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snap_name'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
self._volumeops.get_backing(snapshot['volume_name'])
m.ReplayAll()
self._driver.delete_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_delete_snapshot_with_backing(self):
"""Test delete_snapshot with backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['name'] = 'snapshot_name'
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snap_name'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'delete_snapshot')
self._volumeops.delete_snapshot(backing,
snapshot['name'])
m.ReplayAll()
self._driver.delete_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_delete_snapshot_when_attached(self):
"""Test delete_snapshot when volume is attached."""
snapshot = FakeObject()
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'in-use'
self.assertRaises(exception.InvalidVolume,
self._driver.delete_snapshot, snapshot)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_without_backing(self, mock_vops):
"""Test create_cloned_volume without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
src_vref = {'name': 'src_snapshot_name'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_with_backing(self, mock_vops):
"""Test create_cloned_volume with a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = mock.sentinel.volume
fake_size = 1
src_vref = {'name': 'src_snapshot_name', 'size': fake_size}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
src_vmdk = "[datastore] src_vm/src_vm.vmdk"
mock_vops.get_vmdk_path.return_value = src_vmdk
driver._create_backing_by_copying = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
mock_vops.get_vmdk_path.assert_called_once_with(backing)
driver._create_backing_by_copying.assert_called_once_with(volume,
src_vmdk,
fake_size)
@mock.patch.object(VMDK_DRIVER, '_extend_volumeops_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_backing_by_copying(self, volumeops, create_backing,
_extend_virtual_disk):
self._test_create_backing_by_copying(volumeops, create_backing,
_extend_virtual_disk)
def _test_create_backing_by_copying(self, volumeops, create_backing,
_extend_virtual_disk):
"""Test _create_backing_by_copying."""
fake_volume = {'size': 2, 'name': 'fake_volume-0000000000001'}
fake_size = 1
fake_src_vmdk_path = "[datastore] src_vm/src_vm.vmdk"
fake_backing = mock.sentinel.backing
fake_vmdk_path = mock.sentinel.path
#"[datastore] dest_vm/dest_vm.vmdk"
fake_dc = mock.sentinel.datacenter
create_backing.return_value = fake_backing
volumeops.get_vmdk_path.return_value = fake_vmdk_path
volumeops.get_dc.return_value = fake_dc
# Test with fake_volume['size'] greater than fake_size
self._driver._create_backing_by_copying(fake_volume,
fake_src_vmdk_path,
fake_size)
create_backing.assert_called_once_with(fake_volume)
volumeops.get_vmdk_path.assert_called_once_with(fake_backing)
volumeops.get_dc.assert_called_once_with(fake_backing)
volumeops.delete_vmdk_file.assert_called_once_with(fake_vmdk_path,
fake_dc)
volumeops.copy_vmdk_file.assert_called_once_with(fake_dc,
fake_src_vmdk_path,
fake_vmdk_path)
_extend_virtual_disk.assert_called_once_with(fake_volume['size'],
fake_vmdk_path,
fake_dc)
# Reset all the mocks and test with fake_volume['size']
# not greater than fake_size
_extend_virtual_disk.reset_mock()
fake_size = 2
self._driver._create_backing_by_copying(fake_volume,
fake_src_vmdk_path,
fake_size)
self.assertFalse(_extend_virtual_disk.called)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot_without_backing(self, mock_vops):
"""Test create_volume_from_snapshot without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snap_without_backing_snap(self, mock_vops):
"""Test create_volume_from_snapshot without a backing snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot(self, mock_vops):
"""Test create_volume_from_snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap',
'volume_size': 1}
fake_size = snapshot['volume_size']
backing = mock.sentinel.backing
snap_moref = mock.sentinel.snap_moref
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = snap_moref
src_vmdk = "[datastore] src_vm/src_vm-001.vmdk"
mock_vops.get_vmdk_path.return_value = src_vmdk
driver._create_backing_by_copying = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
mock_vops.get_vmdk_path.assert_called_once_with(snap_moref)
driver._create_backing_by_copying.assert_called_once_with(volume,
src_vmdk,
fake_size)
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_volume(self, volume_ops, _extend_virtual_disk,
_select_ds_for_volume):
"""Test extend_volume."""
self._test_extend_volume(volume_ops, _extend_virtual_disk,
_select_ds_for_volume)
def _test_extend_volume(self, volume_ops, _extend_virtual_disk,
_select_ds_for_volume):
fake_name = u'volume-00000001'
new_size = '21'
fake_size = '20'
fake_vol = {'project_id': 'testprjid', 'name': fake_name,
'size': fake_size,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
fake_host = mock.sentinel.host
fake_rp = mock.sentinel.rp
fake_folder = mock.sentinel.folder
fake_summary = mock.Mock(spec=object)
fake_summary.datastore = mock.sentinel.datastore
fake_summary.name = 'fake_name'
fake_backing = mock.sentinel.backing
volume_ops.get_backing.return_value = fake_backing
# If there is enough space in the datastore, where the volume is
# located, then the rest of this method will not be called.
self._driver.extend_volume(fake_vol, new_size)
_extend_virtual_disk.assert_called_with(fake_name, new_size)
self.assertFalse(_select_ds_for_volume.called)
self.assertFalse(volume_ops.get_backing.called)
self.assertFalse(volume_ops.relocate_backing.called)
self.assertFalse(volume_ops.move_backing_to_folder.called)
# If there is not enough space in the datastore, where the volume is
# located, then the rest of this method will be called. The first time
# _extend_virtual_disk is called, VimFaultException is raised. The
# second time it is called, there is no exception.
_extend_virtual_disk.reset_mock()
_extend_virtual_disk.side_effect = [error_util.
VimFaultException(mock.Mock(),
'Error'), None]
# When _select_ds_for_volume raises no exception.
_select_ds_for_volume.return_value = (fake_host, fake_rp,
fake_folder, fake_summary)
self._driver.extend_volume(fake_vol, new_size)
_select_ds_for_volume.assert_called_with(new_size)
volume_ops.get_backing.assert_called_with(fake_name)
volume_ops.relocate_backing.assert_called_with(fake_backing,
fake_summary.datastore,
fake_rp,
fake_host)
_extend_virtual_disk.assert_called_with(fake_name, new_size)
volume_ops.move_backing_to_folder.assert_called_with(fake_backing,
fake_folder)
# If get_backing raises error_util.VimException,
# this exception will be caught for volume extend.
_extend_virtual_disk.reset_mock()
_extend_virtual_disk.side_effect = [error_util.
VimFaultException(mock.Mock(),
'Error'), None]
volume_ops.get_backing.side_effect = error_util.VimException('Error')
self.assertRaises(error_util.VimException, self._driver.extend_volume,
fake_vol, new_size)
# If _select_ds_for_volume raised an exception, the rest code will
# not be called.
_extend_virtual_disk.reset_mock()
volume_ops.get_backing.reset_mock()
volume_ops.relocate_backing.reset_mock()
volume_ops.move_backing_to_folder.reset_mock()
_extend_virtual_disk.side_effect = [error_util.
VimFaultException(mock.Mock(),
'Error'), None]
_select_ds_for_volume.side_effect = error_util.VimException('Error')
self.assertRaises(error_util.VimException, self._driver.extend_volume,
fake_vol, new_size)
_extend_virtual_disk.assert_called_once_with(fake_name, new_size)
self.assertFalse(volume_ops.get_backing.called)
self.assertFalse(volume_ops.relocate_backing.called)
self.assertFalse(volume_ops.move_backing_to_folder.called)
def test_copy_image_to_volume_non_vmdk(self):
"""Test copy_image_to_volume for a non-vmdk disk format."""
fake_context = mock.sentinel.context
fake_image_id = 'image-123456789'
fake_image_meta = {'disk_format': 'novmdk'}
image_service = mock.Mock()
image_service.show.return_value = fake_image_meta
fake_volume = {'name': 'fake_name', 'size': 1}
self.assertRaises(exception.ImageUnacceptable,
self._driver.copy_image_to_volume,
fake_context, fake_volume,
image_service, fake_image_id)
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER,
'_create_virtual_disk_from_preallocated_image')
@mock.patch.object(VMDK_DRIVER, '_create_virtual_disk_from_sparse_image')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_ds_name_folder_path')
@mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory')
def test_copy_image_to_volume_non_stream_optimized(
self, create_backing, get_ds_name_folder_path, get_disk_type,
create_disk_from_sparse_image, create_disk_from_preallocated_image,
vops, select_ds_for_volume, generate_uuid, extend_disk):
self._test_copy_image_to_volume_non_stream_optimized(
create_backing,
get_ds_name_folder_path,
get_disk_type,
create_disk_from_sparse_image,
create_disk_from_preallocated_image,
vops,
select_ds_for_volume,
generate_uuid,
extend_disk)
def _test_copy_image_to_volume_non_stream_optimized(
self, create_backing, get_ds_name_folder_path, get_disk_type,
create_disk_from_sparse_image, create_disk_from_preallocated_image,
vops, select_ds_for_volume, generate_uuid, extend_disk):
image_size_in_bytes = 2 * units.Gi
adapter_type = 'lsiLogic'
image_meta = {'disk_format': 'vmdk',
'size': image_size_in_bytes,
'properties': {'vmware_disktype': 'sparse',
'vmwware_adaptertype': adapter_type}}
image_service = mock.Mock(glance.GlanceImageService)
image_service.show.return_value = image_meta
backing = mock.Mock()
def create_backing_mock(volume, create_params):
self.assertTrue(create_params[vmdk.CREATE_PARAM_DISK_LESS])
return backing
create_backing.side_effect = create_backing_mock
ds_name = mock.Mock()
folder_path = mock.Mock()
get_ds_name_folder_path.return_value = (ds_name, folder_path)
summary = mock.Mock()
select_ds_for_volume.return_value = (mock.sentinel.host,
mock.sentinel.rp,
mock.sentinel.folder,
summary)
uuid = "6b77b25a-9136-470e-899e-3c930e570d8e"
generate_uuid.return_value = uuid
host = mock.Mock()
dc_ref = mock.Mock()
vops.get_host.return_value = host
vops.get_dc.return_value = dc_ref
disk_type = vmdk.EAGER_ZEROED_THICK_VMDK_TYPE
get_disk_type.return_value = disk_type
path = mock.Mock()
create_disk_from_sparse_image.return_value = path
create_disk_from_preallocated_image.return_value = path
volume_size = 2
vops.get_disk_size.return_value = volume_size * units.Gi
context = mock.Mock()
volume = {'name': 'volume_name',
'id': 'volume_id',
'size': volume_size}
image_id = mock.Mock()
self._driver.copy_image_to_volume(
context, volume, image_service, image_id)
create_params = {vmdk.CREATE_PARAM_DISK_LESS: True,
vmdk.CREATE_PARAM_BACKING_NAME: uuid}
create_backing.assert_called_once_with(volume, create_params)
create_disk_from_sparse_image.assert_called_once_with(
context, image_service, image_id, image_size_in_bytes,
dc_ref, ds_name, folder_path, uuid)
vops.attach_disk_to_backing.assert_called_once_with(
backing, image_size_in_bytes / units.Ki, disk_type,
adapter_type, path.get_descriptor_ds_file_path())
select_ds_for_volume.assert_called_once_with(volume)
vops.clone_backing.assert_called_once_with(
volume['name'], backing, None, volumeops.FULL_CLONE_TYPE,
summary.datastore, disk_type, mock.sentinel.host)
vops.delete_backing.assert_called_once_with(backing)
self.assertFalse(extend_disk.called)
vops.get_disk_size.return_value = 1 * units.Gi
create_backing.reset_mock()
vops.attach_disk_to_backing.reset_mock()
vops.delete_backing.reset_mock()
image_meta['properties']['vmware_disktype'] = 'preallocated'
self._driver.copy_image_to_volume(
context, volume, image_service, image_id)
del create_params[vmdk.CREATE_PARAM_BACKING_NAME]
create_backing.assert_called_once_with(volume, create_params)
create_disk_from_preallocated_image.assert_called_once_with(
context, image_service, image_id, image_size_in_bytes,
dc_ref, ds_name, folder_path, volume['name'], adapter_type)
vops.attach_disk_to_backing.assert_called_once_with(
backing, image_size_in_bytes / units.Ki, disk_type,
adapter_type, path.get_descriptor_ds_file_path())
extend_disk.assert_called_once_with(volume['name'], volume['size'])
extend_disk.reset_mock()
create_disk_from_preallocated_image.side_effect = (
error_util.VimException("Error"))
self.assertRaises(error_util.VimException,
self._driver.copy_image_to_volume,
context, volume, image_service, image_id)
vops.delete_backing.assert_called_once_with(backing)
self.assertFalse(extend_disk.called)
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_virtual_disk_from_preallocated_image(
self, vops, copy_image, flat_extent_path):
self._test_create_virtual_disk_from_preallocated_image(
vops, copy_image, flat_extent_path)
def _test_create_virtual_disk_from_preallocated_image(
self, vops, copy_image, flat_extent_path):
context = mock.Mock()
image_service = mock.Mock()
image_id = mock.Mock()
image_size_in_bytes = 2 * units.Gi
dc_ref = mock.Mock()
ds_name = "nfs"
folder_path = "A/B/"
disk_name = "disk-1"
adapter_type = "ide"
src_path = mock.Mock()
flat_extent_path.return_value = src_path
ret = self._driver._create_virtual_disk_from_preallocated_image(
context, image_service, image_id, image_size_in_bytes, dc_ref,
ds_name, folder_path, disk_name, adapter_type)
create_descriptor = vops.create_flat_extent_virtual_disk_descriptor
create_descriptor.assert_called_once_with(
dc_ref, src_path, image_size_in_bytes / units.Ki, adapter_type,
vmdk.EAGER_ZEROED_THICK_VMDK_TYPE)
copy_image.assert_called_once_with(
context, dc_ref, image_service, image_id, image_size_in_bytes,
ds_name, src_path.get_flat_extent_file_path())
self.assertEqual(src_path, ret)
create_descriptor.reset_mock()
copy_image.reset_mock()
copy_image.side_effect = error_util.VimException("error")
self.assertRaises(
error_util.VimException,
self._driver._create_virtual_disk_from_preallocated_image,
context, image_service, image_id, image_size_in_bytes, dc_ref,
ds_name, folder_path, disk_name, adapter_type)
vops.delete_file.assert_called_once_with(
src_path.get_descriptor_ds_file_path(), dc_ref)
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.'
'MonolithicSparseVirtualDiskPath')
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
def test_create_virtual_disk_from_sparse_image(
self, copy_image, copy_temp_virtual_disk, flat_extent_path,
sparse_path):
self._test_create_virtual_disk_from_sparse_image(
copy_image, copy_temp_virtual_disk, flat_extent_path, sparse_path)
def _test_create_virtual_disk_from_sparse_image(
self, copy_image, copy_temp_virtual_disk, flat_extent_path,
sparse_path):
context = mock.Mock()
image_service = mock.Mock()
image_id = mock.Mock()
image_size_in_bytes = 2 * units.Gi
dc_ref = mock.Mock()
ds_name = "nfs"
folder_path = "A/B/"
disk_name = "disk-1"
src_path = mock.Mock()
sparse_path.return_value = src_path
dest_path = mock.Mock()
flat_extent_path.return_value = dest_path
ret = self._driver._create_virtual_disk_from_sparse_image(
context, image_service, image_id, image_size_in_bytes, dc_ref,
ds_name, folder_path, disk_name)
copy_image.assert_called_once_with(
context, dc_ref, image_service, image_id, image_size_in_bytes,
ds_name, src_path.get_descriptor_file_path())
copy_temp_virtual_disk.assert_called_once_with(
dc_ref, src_path, dest_path)
self.assertEqual(dest_path, ret)
@mock.patch.object(vmware_images, 'fetch_stream_optimized_image')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_copy_image_to_volume_stream_optimized(self,
volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
fetch_optimized_image):
"""Test copy_image_to_volume.
Test with an acceptable vmdk disk format and streamOptimized disk type.
"""
self._test_copy_image_to_volume_stream_optimized(volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
fetch_optimized_image)
def _test_copy_image_to_volume_stream_optimized(self, volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
fetch_optimized_image):
fake_context = mock.Mock()
fake_backing = mock.sentinel.backing
fake_image_id = 'image-id'
size = 5 * units.Gi
size_gb = float(size) / units.Gi
fake_volume_size = 1 + size_gb
adapter_type = 'ide'
fake_image_meta = {'disk_format': 'vmdk', 'size': size,
'properties': {'vmware_disktype': 'streamOptimized',
'vmware_adaptertype': adapter_type}}
image_service = mock.Mock(glance.GlanceImageService)
fake_host = mock.sentinel.host
fake_rp = mock.sentinel.rp
fake_folder = mock.sentinel.folder
fake_summary = mock.sentinel.summary
fake_summary.name = "datastore-1"
fake_vm_create_spec = mock.sentinel.spec
fake_disk_type = 'thin'
vol_name = 'fake_volume name'
vol_id = '12345'
fake_volume = {'name': vol_name,
'id': vol_id,
'size': fake_volume_size,
'volume_type_id': None}
cf = session.vim.client.factory
vm_import_spec = cf.create('ns0:VirtualMachineImportSpec')
vm_import_spec.configSpec = fake_vm_create_spec
timeout = self._config.vmware_image_transfer_timeout_secs
image_service.show.return_value = fake_image_meta
volumeops.get_create_spec.return_value = fake_vm_create_spec
volumeops.get_backing.return_value = fake_backing
# If _select_ds_for_volume raises an exception, get_create_spec
# will not be called.
_select_ds_for_volume.side_effect = error_util.VimException('Error')
self.assertRaises(exception.VolumeBackendAPIException,
self._driver.copy_image_to_volume,
fake_context, fake_volume,
image_service, fake_image_id)
self.assertFalse(volumeops.get_create_spec.called)
# If the volume size is greater then than the backing's disk size,
# _extend_vmdk_virtual_disk will be called.
_select_ds_for_volume.side_effect = None
_select_ds_for_volume.return_value = (fake_host, fake_rp,
fake_folder, fake_summary)
profile_id = 'profile-1'
get_profile_id.return_value = profile_id
volumeops.get_disk_size.return_value = size
self._driver.copy_image_to_volume(fake_context, fake_volume,
image_service, fake_image_id)
image_service.show.assert_called_with(fake_context, fake_image_id)
_select_ds_for_volume.assert_called_with(fake_volume)
get_profile_id.assert_called_once_with(fake_volume)
volumeops.get_create_spec.assert_called_with(fake_volume['name'],
0,
fake_disk_type,
fake_summary.name,
profile_id,
adapter_type)
self.assertTrue(fetch_optimized_image.called)
fetch_optimized_image.assert_called_with(fake_context, timeout,
image_service,
fake_image_id,
session=session,
host=self.IP,
resource_pool=fake_rp,
vm_folder=fake_folder,
vm_create_spec=
vm_import_spec,
image_size=size)
_extend_virtual_disk.assert_called_once_with(fake_volume['name'],
fake_volume_size)
# If the volume size is not greater then than backing's disk size,
# _extend_vmdk_virtual_disk will not be called.
volumeops.get_disk_size.return_value = fake_volume_size * units.Gi
_extend_virtual_disk.reset_mock()
self._driver.copy_image_to_volume(fake_context, fake_volume,
image_service, fake_image_id)
self.assertFalse(_extend_virtual_disk.called)
# If fetch_stream_optimized_image raises an exception,
# get_backing and delete_backing will be called.
fetch_optimized_image.side_effect = exception.CinderException
self.assertRaises(exception.CinderException,
self._driver.copy_image_to_volume,
fake_context, fake_volume,
image_service, fake_image_id)
volumeops.get_backing.assert_called_with(fake_volume['name'])
volumeops.delete_backing.assert_called_with(fake_backing)
self.assertFalse(_extend_virtual_disk.called)
def test_copy_volume_to_image_non_vmdk(self):
"""Test copy_volume_to_image for a non-vmdk disk format."""
m = self.mox
image_meta = FakeObject()
image_meta['disk_format'] = 'novmdk'
volume = FakeObject()
volume['name'] = 'vol-name'
volume['instance_uuid'] = None
volume['attached_host'] = None
m.ReplayAll()
self.assertRaises(exception.ImageUnacceptable,
self._driver.copy_volume_to_image,
mox.IgnoreArg(), volume,
mox.IgnoreArg(), image_meta)
m.UnsetStubs()
m.VerifyAll()
def test_copy_volume_to_image_when_attached(self):
"""Test copy_volume_to_image when volume is attached."""
m = self.mox
volume = FakeObject()
volume['instance_uuid'] = 'my_uuid'
m.ReplayAll()
self.assertRaises(exception.InvalidVolume,
self._driver.copy_volume_to_image,
mox.IgnoreArg(), volume,
mox.IgnoreArg(), mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_copy_volume_to_image_vmdk(self):
"""Test copy_volume_to_image for a valid vmdk disk format."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'session')
self._driver.session = self._session
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
image_id = 'image-id-1'
image_meta = FakeObject()
image_meta['disk_format'] = 'vmdk'
image_meta['id'] = image_id
image_meta['name'] = image_id
image_service = FakeObject()
vol_name = 'volume-123456789'
project_id = 'project-owner-id-123'
volume = FakeObject()
volume['name'] = vol_name
size_gb = 5
size = size_gb * units.Gi
volume['size'] = size_gb
volume['project_id'] = project_id
volume['instance_uuid'] = None
volume['attached_host'] = None
# volumeops.get_backing
backing = FakeMor("VirtualMachine", "my_vm")
m.StubOutWithMock(self._volumeops, 'get_backing')
self._volumeops.get_backing(vol_name).AndReturn(backing)
# volumeops.get_vmdk_path
datastore_name = 'datastore1'
file_path = 'my_folder/my_nested_folder/my_vm.vmdk'
vmdk_file_path = '[%s] %s' % (datastore_name, file_path)
m.StubOutWithMock(self._volumeops, 'get_vmdk_path')
self._volumeops.get_vmdk_path(backing).AndReturn(vmdk_file_path)
# vmware_images.upload_image
timeout = self._config.vmware_image_transfer_timeout_secs
host_ip = self.IP
m.StubOutWithMock(vmware_images, 'upload_image')
vmware_images.upload_image(mox.IgnoreArg(), timeout, image_service,
image_id, project_id, session=self._session,
host=host_ip, vm=backing,
vmdk_file_path=vmdk_file_path,
vmdk_size=size,
image_name=image_id,
image_version=1)
m.ReplayAll()
self._driver.copy_volume_to_image(mox.IgnoreArg(), volume,
image_service, image_meta)
m.UnsetStubs()
m.VerifyAll()
def test_retrieve_properties_ex_fault_checker(self):
"""Test retrieve_properties_ex_fault_checker is called."""
m = self.mox
class FakeVim(vim.Vim):
def __init__(self):
pass
@property
def client(self):
class FakeRetrv(object):
def RetrievePropertiesEx(self, collector):
pass
def __getattr__(self, name):
if name == 'service':
return FakeRetrv()
return FakeRetrv()
def RetrieveServiceContent(self, type='ServiceInstance'):
return mox.MockAnything()
_vim = FakeVim()
m.ReplayAll()
# retrieve_properties_ex_fault_checker throws authentication error
self.assertRaises(error_util.VimFaultException,
_vim.RetrievePropertiesEx, mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_retype(self, ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing):
self._test_retype(ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing)
def _test_retype(self, ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, genereate_uuid,
delete_temp_backing):
self._driver._storage_policy_enabled = True
context = mock.sentinel.context
diff = mock.sentinel.diff
host = mock.sentinel.host
new_type = {'id': 'abc'}
# Test with in-use volume.
vol = {'size': 1, 'status': 'retyping', 'name': 'vol-1',
'volume_type_id': 'def', 'instance_uuid': '583a8dbb'}
self.assertFalse(self._driver.retype(context, vol, new_type, diff,
host))
# Test with no backing.
vops.get_backing.return_value = None
vol['instance_uuid'] = None
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
# Test with no disk type conversion, no profile change and
# compliant datastore.
ds_value = mock.sentinel.datastore_value
datastore = mock.Mock(value=ds_value)
vops.get_datastore.return_value = datastore
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
get_volume_type_extra_specs.side_effect = [vmdk.THIN_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
None,
None]
ds_sel.is_datastore_compliant.return_value = True
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
# Test with no disk type conversion, profile change and
# compliant datastore.
new_profile = mock.sentinel.new_profile
get_volume_type_extra_specs.side_effect = [vmdk.THIN_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
ds_sel.is_datastore_compliant.return_value = True
profile_id = mock.sentinel.profile_id
ds_sel.get_profile_id.return_value = profile_id
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
vops.change_backing_profile.assert_called_once_with(backing,
profile_id)
# Test with disk type conversion, profile change and a backing with
# snapshots. Also test the no candidate datastore case.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.snapshot_exists.return_value = True
ds_sel.select_datastore.return_value = ()
self.assertFalse(self._driver.retype(context, vol, new_type, diff,
host))
exp_req = {hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS: [ds_value],
hub.DatastoreSelector.PROFILE_NAME: new_profile,
hub.DatastoreSelector.SIZE_BYTES: units.Gi}
ds_sel.select_datastore.assert_called_once_with(exp_req)
# Modify the previous case with a candidate datastore which is
# different than the backing's current datastore.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.snapshot_exists.return_value = True
host = mock.sentinel.host
rp = mock.sentinel.rp
candidate_ds = mock.Mock(value=mock.sentinel.candidate_ds_value)
summary = mock.Mock(datastore=candidate_ds)
ds_sel.select_datastore.return_value = (host, rp, summary)
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
vops.change_backing_profile.reset_mock()
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
vops.relocate_backing.assert_called_once_with(
backing, candidate_ds, rp, host, vmdk.THIN_VMDK_TYPE)
vops.move_backing_to_folder.assert_called_once_with(backing, folder)
vops.change_backing_profile.assert_called_once_with(backing,
profile_id)
# Test with disk type conversion, profile change, backing with
# no snapshots and candidate datastore which is same as the backing
# datastore.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.snapshot_exists.return_value = False
summary.datastore = datastore
uuid = '025b654b-d4ed-47f9-8014-b71a7744eafc'
genereate_uuid.return_value = uuid
clone = mock.sentinel.clone
vops.clone_backing.return_value = clone
vops.change_backing_profile.reset_mock()
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
vops.rename_backing.assert_called_once_with(backing, uuid)
vops.clone_backing.assert_called_once_with(
vol['name'], backing, None, volumeops.FULL_CLONE_TYPE,
datastore, vmdk.THIN_VMDK_TYPE, host)
delete_temp_backing.assert_called_once_with(backing)
vops.change_backing_profile.assert_called_once_with(clone,
profile_id)
# Modify the previous case with exception during clone.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.clone_backing.side_effect = error_util.VimException('error')
vops.rename_backing.reset_mock()
vops.change_backing_profile.reset_mock()
self.assertRaises(
error_util.VimException, self._driver.retype, context, vol,
new_type, diff, host)
exp_rename_calls = [mock.call(backing, uuid),
mock.call(backing, vol['name'])]
self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
self.assertFalse(vops.change_backing_profile.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_vmdk_virtual_disk(self, volume_ops):
"""Test vmdk._extend_vmdk_virtual_disk."""
self._test_extend_vmdk_virtual_disk(volume_ops)
def _test_extend_vmdk_virtual_disk(self, volume_ops):
fake_backing = mock.sentinel.backing
fake_vmdk_path = "[datastore] dest_vm/dest_vm.vmdk"
fake_dc = mock.sentinel.datacenter
fake_name = 'fake_name'
fake_size = 7
# If the backing is None, get_vmdk_path and get_dc
# will not be called
volume_ops.get_backing.return_value = None
volume_ops.get_vmdk_path.return_value = fake_vmdk_path
volume_ops.get_dc.return_value = fake_dc
self._driver._extend_vmdk_virtual_disk(fake_name, fake_size)
volume_ops.get_backing.assert_called_once_with(fake_name)
self.assertFalse(volume_ops.get_vmdk_path.called)
self.assertFalse(volume_ops.get_dc.called)
self.assertFalse(volume_ops.extend_virtual_disk.called)
# Reset the mock and set the backing with a fake,
# all the mocks should be called.
volume_ops.get_backing.reset_mock()
volume_ops.get_backing.return_value = fake_backing
self._driver._extend_vmdk_virtual_disk(fake_name, fake_size)
volume_ops.get_vmdk_path.assert_called_once_with(fake_backing)
volume_ops.get_dc.assert_called_once_with(fake_backing)
volume_ops.extend_virtual_disk.assert_called_once_with(fake_size,
fake_vmdk_path,
fake_dc)
# Test the exceptional case for extend_virtual_disk
volume_ops.extend_virtual_disk.side_effect = error_util.VimException(
'VimException raised.')
self.assertRaises(error_util.VimException,
self._driver._extend_vmdk_virtual_disk,
fake_name, fake_size)
@mock.patch.object(vmware_images, 'download_stream_optimized_disk')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, '_temporary_file')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'session')
def test_backup_volume(self, session, vops, create_backing, generate_uuid,
temporary_file, file_open, download_disk):
self._test_backup_volume(session, vops, create_backing, generate_uuid,
temporary_file, file_open, download_disk)
def _test_backup_volume(self, session, vops, create_backing, generate_uuid,
temporary_file, file_open, download_disk):
volume = {'name': 'vol-1', 'id': 1, 'size': 1}
self._db.volume_get.return_value = volume
vops.get_backing.return_value = None
backing = mock.sentinel.backing
create_backing.return_value = backing
uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0"
generate_uuid.return_value = uuid
tmp_file_path = mock.sentinel.tmp_file_path
temporary_file_ret = mock.Mock()
temporary_file.return_value = temporary_file_ret
temporary_file_ret.__enter__ = mock.Mock(return_value=tmp_file_path)
temporary_file_ret.__exit__ = mock.Mock(return_value=None)
vmdk_path = mock.sentinel.vmdk_path
vops.get_vmdk_path.return_value = vmdk_path
tmp_file = mock.sentinel.tmp_file
file_open_ret = mock.Mock()
file_open.return_value = file_open_ret
file_open_ret.__enter__ = mock.Mock(return_value=tmp_file)
file_open_ret.__exit__ = mock.Mock(return_value=None)
context = mock.sentinel.context
backup = {'id': 2, 'volume_id': 1}
backup_service = mock.Mock()
self._driver.backup_volume(context, backup, backup_service)
create_backing.assert_called_once_with(volume)
temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid)
self.assertEqual(mock.call(tmp_file_path, "wb"),
file_open.call_args_list[0])
download_disk.assert_called_once_with(
context, self.IMG_TX_TIMEOUT, tmp_file, session=session,
host=self.IP, vm=backing, vmdk_file_path=vmdk_path,
vmdk_size=volume['size'] * units.Gi)
self.assertEqual(mock.call(tmp_file_path, "rb"),
file_open.call_args_list[1])
backup_service.backup.assert_called_once_with(backup, tmp_file)
@mock.patch.object(VMDK_DRIVER, 'extend_volume')
@mock.patch.object(VMDK_DRIVER, '_restore_backing')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, '_temporary_file')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_restore_backup(self, vops, generate_uuid, temporary_file,
file_open, restore_backing, extend_volume):
self._test_restore_backup(vops, generate_uuid, temporary_file,
file_open, restore_backing, extend_volume)
def _test_restore_backup(
self, vops, generate_uuid, temporary_file, file_open,
restore_backing, extend_volume):
volume = {'name': 'vol-1', 'id': 1, 'size': 1}
backup = {'id': 2, 'size': 1}
context = mock.sentinel.context
backup_service = mock.Mock()
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
vops.snapshot_exists.return_value = True
self.assertRaises(
exception.InvalidVolume, self._driver.restore_backup, context,
backup, volume, backup_service)
uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0"
generate_uuid.return_value = uuid
tmp_file_path = mock.sentinel.tmp_file_path
temporary_file_ret = mock.Mock()
temporary_file.return_value = temporary_file_ret
temporary_file_ret.__enter__ = mock.Mock(return_value=tmp_file_path)
temporary_file_ret.__exit__ = mock.Mock(return_value=None)
tmp_file = mock.sentinel.tmp_file
file_open_ret = mock.Mock()
file_open.return_value = file_open_ret
file_open_ret.__enter__ = mock.Mock(return_value=tmp_file)
file_open_ret.__exit__ = mock.Mock(return_value=None)
vops.snapshot_exists.return_value = False
self._driver.restore_backup(context, backup, volume, backup_service)
temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid)
file_open.assert_called_once_with(tmp_file_path, "wb")
backup_service.restore.assert_called_once_with(
backup, volume['id'], tmp_file)
restore_backing.assert_called_once_with(
context, volume, backing, tmp_file_path, backup['size'] * units.Gi)
self.assertFalse(extend_volume.called)
temporary_file.reset_mock()
file_open.reset_mock()
backup_service.reset_mock()
restore_backing.reset_mock()
volume = {'name': 'vol-1', 'id': 1, 'size': 2}
self._driver.restore_backup(context, backup, volume, backup_service)
temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid)
file_open.assert_called_once_with(tmp_file_path, "wb")
backup_service.restore.assert_called_once_with(
backup, volume['id'], tmp_file)
restore_backing.assert_called_once_with(
context, volume, backing, tmp_file_path, backup['size'] * units.Gi)
extend_volume.assert_called_once_with(volume, volume['size'])
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER,
'_create_backing_from_stream_optimized_file')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
def test_restore_backing(
self, generate_uuid, create_backing, select_ds, get_disk_type,
vops, delete_temp_backing):
self._test_restore_backing(
generate_uuid, create_backing, select_ds, get_disk_type, vops,
delete_temp_backing)
def _test_restore_backing(
self, generate_uuid, create_backing, select_ds, get_disk_type,
vops, delete_temp_backing):
src_uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0"
generate_uuid.return_value = src_uuid
src = mock.sentinel.src
create_backing.return_value = src
summary = mock.Mock()
summary.datastore = mock.sentinel.datastore
select_ds.return_value = (mock.sentinel.host, mock.ANY, mock.ANY,
summary)
disk_type = vmdk.THIN_VMDK_TYPE
get_disk_type.return_value = disk_type
context = mock.sentinel.context
volume = {'name': 'vol-1', 'id': 1, 'size': 1}
backing = None
tmp_file_path = mock.sentinel.tmp_file_path
backup_size = units.Gi
self._driver._restore_backing(
context, volume, backing, tmp_file_path, backup_size)
create_backing.assert_called_once_with(
context, src_uuid, volume, tmp_file_path, backup_size)
vops.clone_backing.assert_called_once_with(
volume['name'], src, None, volumeops.FULL_CLONE_TYPE,
summary.datastore, disk_type, mock.sentinel.host)
delete_temp_backing.assert_called_once_with(src)
create_backing.reset_mock()
vops.clone_backing.reset_mock()
delete_temp_backing.reset_mock()
dest_uuid = "de4b0708-f947-4abe-98f8-75e52ce03b7b"
tmp_uuid = "82c2a4f0-9064-4d95-bd88-6567a36018fa"
generate_uuid.side_effect = [src_uuid, dest_uuid, tmp_uuid]
dest = mock.sentinel.dest
vops.clone_backing.return_value = dest
backing = mock.sentinel.backing
self._driver._restore_backing(
context, volume, backing, tmp_file_path, backup_size)
create_backing.assert_called_once_with(
context, src_uuid, volume, tmp_file_path, backup_size)
vops.clone_backing.assert_called_once_with(
dest_uuid, src, None, volumeops.FULL_CLONE_TYPE,
summary.datastore, disk_type, mock.sentinel.host)
exp_rename_calls = [mock.call(backing, tmp_uuid),
mock.call(dest, volume['name'])]
self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
exp_delete_temp_backing_calls = [mock.call(backing), mock.call(src)]
self.assertEqual(exp_delete_temp_backing_calls,
delete_temp_backing.call_args_list)
delete_temp_backing.reset_mock()
vops.rename_backing.reset_mock()
def vops_rename(backing, new_name):
if backing == dest and new_name == volume['name']:
raise error_util.VimException("error")
vops.rename_backing.side_effect = vops_rename
generate_uuid.side_effect = [src_uuid, dest_uuid, tmp_uuid]
self.assertRaises(
error_util.VimException, self._driver._restore_backing, context,
volume, backing, tmp_file_path, backup_size)
exp_rename_calls = [mock.call(backing, tmp_uuid),
mock.call(dest, volume['name']),
mock.call(backing, volume['name'])]
self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
exp_delete_temp_backing_calls = [mock.call(dest), mock.call(src)]
self.assertEqual(exp_delete_temp_backing_calls,
delete_temp_backing.call_args_list)
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch.object(vmware_images, 'upload_stream_optimized_disk')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
def test_create_backing_from_stream_optimized_file(
self, select_ds, session, get_storage_profile_id, get_disk_type,
vops, file_open, upload_disk, delete_temp_backing):
self._test_create_backing_from_stream_optimized_file(
select_ds, session, get_storage_profile_id, get_disk_type, vops,
file_open, upload_disk, delete_temp_backing)
def _test_create_backing_from_stream_optimized_file(
self, select_ds, session, get_storage_profile_id, get_disk_type,
vops, file_open, upload_disk, delete_temp_backing):
rp = mock.sentinel.rp
folder = mock.sentinel.folder
summary = mock.Mock()
summary.name = mock.sentinel.name
select_ds.return_value = (mock.ANY, rp, folder, summary)
import_spec = mock.Mock()
session.vim.client.factory.create.return_value = import_spec
profile_id = 'profile-1'
get_storage_profile_id.return_value = profile_id
disk_type = vmdk.THIN_VMDK_TYPE
get_disk_type.return_value = disk_type
create_spec = mock.Mock()
vops.get_create_spec.return_value = create_spec
tmp_file = mock.sentinel.tmp_file
file_open_ret = mock.Mock()
file_open.return_value = file_open_ret
file_open_ret.__enter__ = mock.Mock(return_value=tmp_file)
file_open_ret.__exit__ = mock.Mock(return_value=None)
vm_ref = mock.sentinel.vm_ref
upload_disk.return_value = vm_ref
context = mock.sentinel.context
name = 'vm-1'
volume = {'name': 'vol-1', 'id': 1, 'size': 1}
tmp_file_path = mock.sentinel.tmp_file_path
file_size_bytes = units.Gi
ret = self._driver._create_backing_from_stream_optimized_file(
context, name, volume, tmp_file_path, file_size_bytes)
self.assertEqual(vm_ref, ret)
vops.get_create_spec.assert_called_once_with(
name, 0, disk_type, summary.name, profile_id)
file_open.assert_called_once_with(tmp_file_path, "rb")
upload_disk.assert_called_once_with(
context, self.IMG_TX_TIMEOUT, tmp_file, session=session,
host=self.IP, resource_pool=rp, vm_folder=folder,
vm_create_spec=import_spec, vmdk_size=file_size_bytes)
upload_disk.side_effect = error_util.VimException("error")
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
self.assertRaises(
error_util.VimException,
self._driver._create_backing_from_stream_optimized_file,
context, name, volume, tmp_file_path, file_size_bytes)
delete_temp_backing.assert_called_once_with(backing)
class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
"""Test class for VMwareVcVmdkDriver."""
VMDK_DRIVER = vmdk.VMwareVcVmdkDriver
DEFAULT_VC_VERSION = '5.5'
def setUp(self):
super(VMwareVcVmdkDriverTestCase, self).setUp()
self._config.vmware_host_version = self.DEFAULT_VC_VERSION
self._driver = vmdk.VMwareVcVmdkDriver(configuration=self._config,
db=self._db)
def test_get_pbm_wsdl_location(self):
# no version returns None
wsdl = self._driver._get_pbm_wsdl_location(None)
self.assertIsNone(wsdl)
def expected_wsdl(version):
driver_dir = os.path.join(os.path.dirname(__file__), '..',
'volume', 'drivers', 'vmware')
driver_abs_dir = os.path.abspath(driver_dir)
return 'file://' + os.path.join(driver_abs_dir, 'wsdl', version,
'pbmService.wsdl')
# verify wsdl path for different version strings
with mock.patch('os.path.exists') as path_exists:
path_exists.return_value = True
wsdl = self._driver._get_pbm_wsdl_location(LooseVersion('5'))
self.assertEqual(expected_wsdl('5'), wsdl)
wsdl = self._driver._get_pbm_wsdl_location(LooseVersion('5.5'))
self.assertEqual(expected_wsdl('5.5'), wsdl)
wsdl = self._driver._get_pbm_wsdl_location(LooseVersion('5.5.1'))
self.assertEqual(expected_wsdl('5.5'), wsdl)
# if wsdl path does not exist, then it returns None
path_exists.return_value = False
wsdl = self._driver._get_pbm_wsdl_location(LooseVersion('5.5'))
self.assertIsNone(wsdl)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
def test_get_vc_version(self, session):
# test config overrides fetching from VC server
version = self._driver._get_vc_version()
self.assertEqual(self.DEFAULT_VC_VERSION, version)
# explicitly remove config entry
self._driver.configuration.vmware_host_version = None
session.return_value.vim.service_content.about.version = '6.0.1'
version = self._driver._get_vc_version()
self.assertEqual(LooseVersion('6.0.1'), version)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_vc_version')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
def test_do_setup_with_pbm_disabled(self, session, get_vc_version):
session_obj = mock.Mock(name='session')
session.return_value = session_obj
get_vc_version.return_value = LooseVersion('5.0')
self._driver.do_setup(mock.ANY)
self.assertFalse(self._driver._storage_policy_enabled)
get_vc_version.assert_called_once_with()
self.assertEqual(session_obj, self._driver.volumeops._session)
self.assertEqual(session_obj, self._driver.ds_sel._session)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_pbm_wsdl_location')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_vc_version')
def test_do_setup_with_invalid_pbm_wsdl(self, get_vc_version,
get_pbm_wsdl_location):
vc_version = LooseVersion('5.5')
get_vc_version.return_value = vc_version
get_pbm_wsdl_location.return_value = None
self.assertRaises(error_util.VMwareDriverException,
self._driver.do_setup,
mock.ANY)
self.assertFalse(self._driver._storage_policy_enabled)
get_vc_version.assert_called_once_with()
get_pbm_wsdl_location.assert_called_once_with(vc_version)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_pbm_wsdl_location')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_vc_version')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
def test_do_setup(self, session, get_vc_version, get_pbm_wsdl_location):
session_obj = mock.Mock(name='session')
session.return_value = session_obj
vc_version = LooseVersion('5.5')
get_vc_version.return_value = vc_version
get_pbm_wsdl_location.return_value = 'file:///pbm.wsdl'
self._driver.do_setup(mock.ANY)
self.assertTrue(self._driver._storage_policy_enabled)
get_vc_version.assert_called_once_with()
get_pbm_wsdl_location.assert_called_once_with(vc_version)
self.assertEqual(session_obj, self._driver.volumeops._session)
self.assertEqual(session_obj, self._driver.ds_sel._session)
@mock.patch.object(VMDK_DRIVER, '_extend_volumeops_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_backing_by_copying(self, volumeops, create_backing,
extend_virtual_disk):
self._test_create_backing_by_copying(volumeops, create_backing,
extend_virtual_disk)
def test_init_conn_with_instance_and_backing(self):
"""Test initialize_connection with instance and backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
volume['size'] = 1
connector = {'instance': 'my_instance'}
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(volume['name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'get_host')
host = FakeMor('HostSystem', 'my_host')
self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
datastore = FakeMor('Datastore', 'my_ds')
resource_pool = FakeMor('ResourcePool', 'my_rp')
m.StubOutWithMock(self._volumeops, 'get_dss_rp')
self._volumeops.get_dss_rp(host).AndReturn(([datastore],
resource_pool))
m.StubOutWithMock(self._volumeops, 'get_datastore')
self._volumeops.get_datastore(backing).AndReturn(datastore)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
def test_get_volume_group_folder(self):
"""Test _get_volume_group_folder."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
datacenter = FakeMor('Datacenter', 'my_dc')
m.StubOutWithMock(self._volumeops, 'get_vmfolder')
self._volumeops.get_vmfolder(datacenter)
m.StubOutWithMock(self._volumeops, 'create_folder')
self._volumeops.create_folder(mox.IgnoreArg(),
self._config.vmware_volume_folder)
m.ReplayAll()
self._driver._get_volume_group_folder(datacenter)
m.UnsetStubs()
m.VerifyAll()
def test_init_conn_with_instance_and_backing_and_relocation(self):
"""Test initialize_connection with backing being relocated."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
volume['size'] = 1
connector = {'instance': 'my_instance'}
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(volume['name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'get_host')
host = FakeMor('HostSystem', 'my_host')
self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
datastore1 = FakeMor('Datastore', 'my_ds_1')
datastore2 = FakeMor('Datastore', 'my_ds_2')
resource_pool = FakeMor('ResourcePool', 'my_rp')
m.StubOutWithMock(self._volumeops, 'get_dss_rp')
self._volumeops.get_dss_rp(host).AndReturn(([datastore1],
resource_pool))
m.StubOutWithMock(self._volumeops, 'get_datastore')
self._volumeops.get_datastore(backing).AndReturn(datastore2)
m.StubOutWithMock(self._driver, '_get_folder_ds_summary')
folder = FakeMor('Folder', 'my_fol')
summary = FakeDatastoreSummary(1, 1, datastore1)
self._driver._get_folder_ds_summary(volume, resource_pool,
[datastore1]).AndReturn((folder,
summary))
m.StubOutWithMock(self._volumeops, 'relocate_backing')
self._volumeops.relocate_backing(backing, datastore1,
resource_pool, host)
m.StubOutWithMock(self._volumeops, 'move_backing_to_folder')
self._volumeops.move_backing_to_folder(backing, folder)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_clone_backing_linked(self, volume_ops, _extend_vmdk_virtual_disk):
"""Test _clone_backing with clone type - linked."""
fake_size = 3
fake_volume = {'volume_type_id': None, 'name': 'fake_name',
'size': fake_size}
fake_snapshot = {'volume_name': 'volume_name',
'name': 'snapshot_name',
'volume_size': 2}
fake_type = volumeops.LINKED_CLONE_TYPE
fake_backing = mock.sentinel.backing
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.LINKED_CLONE_TYPE,
fake_snapshot['volume_size'])
volume_ops.clone_backing.assert_called_with(fake_volume['name'],
fake_backing,
fake_snapshot,
fake_type,
None,
host=None)
# If the volume size is greater than the original snapshot size,
# _extend_vmdk_virtual_disk will be called.
_extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'],
fake_volume['size'])
# If the volume size is not greater than the original snapshot size,
# _extend_vmdk_virtual_disk will not be called.
fake_size = 2
fake_volume['size'] = fake_size
_extend_vmdk_virtual_disk.reset_mock()
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.LINKED_CLONE_TYPE,
fake_snapshot['volume_size'])
self.assertFalse(_extend_vmdk_virtual_disk.called)
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_clone_backing_full(self, volume_ops, _select_ds_for_volume,
_extend_vmdk_virtual_disk):
"""Test _clone_backing with clone type - full."""
fake_host = mock.sentinel.host
fake_backing = mock.sentinel.backing
fake_folder = mock.sentinel.folder
fake_datastore = mock.sentinel.datastore
fake_resource_pool = mock.sentinel.resourcePool
fake_summary = mock.Mock(spec=object)
fake_summary.datastore = fake_datastore
fake_size = 3
fake_volume = {'volume_type_id': None, 'name': 'fake_name',
'size': fake_size}
fake_snapshot = {'volume_name': 'volume_name', 'name': 'snapshot_name',
'volume_size': 2}
_select_ds_for_volume.return_value = (fake_host,
fake_resource_pool,
fake_folder, fake_summary)
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.FULL_CLONE_TYPE,
fake_snapshot['volume_size'])
_select_ds_for_volume.assert_called_with(fake_volume)
volume_ops.clone_backing.assert_called_with(fake_volume['name'],
fake_backing,
fake_snapshot,
volumeops.FULL_CLONE_TYPE,
fake_datastore,
host=fake_host)
# If the volume size is greater than the original snapshot size,
# _extend_vmdk_virtual_disk will be called.
_extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'],
fake_volume['size'])
# If the volume size is not greater than the original snapshot size,
# _extend_vmdk_virtual_disk will not be called.
fake_size = 2
fake_volume['size'] = fake_size
_extend_vmdk_virtual_disk.reset_mock()
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.FULL_CLONE_TYPE,
fake_snapshot['volume_size'])
self.assertFalse(_extend_vmdk_virtual_disk.called)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot_without_backing(self, mock_vops):
"""Test create_volume_from_snapshot without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snap_without_backing_snap(self, mock_vops):
"""Test create_volume_from_snapshot without a backing snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot(self, mock_vops):
"""Test create_volume_from_snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap',
'volume_size': 2}
backing = mock.sentinel.backing
snap_moref = mock.sentinel.snap_moref
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = snap_moref
driver._clone_backing = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
default_clone_type = volumeops.FULL_CLONE_TYPE
driver._clone_backing.assert_called_once_with(volume,
backing,
snap_moref,
default_clone_type,
snapshot['volume_size'])
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_without_backing(self, mock_vops):
"""Test create_cloned_volume without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
src_vref = {'name': 'src_snapshot_name'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_with_backing(self, mock_vops):
"""Test create_cloned_volume with clone type - full."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
src_vref = {'name': 'src_snapshot_name', 'size': 1}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
default_clone_type = volumeops.FULL_CLONE_TYPE
driver._clone_backing = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
driver._clone_backing.assert_called_once_with(volume,
backing,
None,
default_clone_type,
src_vref['size'])
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_clone_type')
def test_create_linked_cloned_volume_with_backing(self, get_clone_type,
mock_vops):
"""Test create_cloned_volume with clone type - linked."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol', 'id': 'mock_id'}
src_vref = {'name': 'src_snapshot_name', 'status': 'available',
'size': 1}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
linked_clone = volumeops.LINKED_CLONE_TYPE
get_clone_type.return_value = linked_clone
driver._clone_backing = mock.MagicMock()
mock_vops.create_snapshot = mock.MagicMock()
mock_vops.create_snapshot.return_value = mock.sentinel.snapshot
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
get_clone_type.assert_called_once_with(volume)
name = 'snapshot-%s' % volume['id']
mock_vops.create_snapshot.assert_called_once_with(backing, name, None)
driver._clone_backing.assert_called_once_with(volume,
backing,
mock.sentinel.snapshot,
linked_clone,
src_vref['size'])
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_clone_type')
def test_create_linked_cloned_volume_when_attached(self, get_clone_type,
mock_vops):
"""Test create_cloned_volume linked clone when volume is attached."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol', 'id': 'mock_id'}
src_vref = {'name': 'src_snapshot_name', 'status': 'in-use'}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
linked_clone = volumeops.LINKED_CLONE_TYPE
get_clone_type.return_value = linked_clone
# invoke the create_volume_from_snapshot api
self.assertRaises(exception.InvalidVolume,
driver.create_cloned_volume,
volume,
src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
get_clone_type.assert_called_once_with(volume)
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
def test_get_storage_profile(self, get_volume_type_extra_specs):
"""Test vmdk _get_storage_profile."""
# volume with no type id returns None
volume = FakeObject()
volume['volume_type_id'] = None
sp = self._driver._get_storage_profile(volume)
self.assertEqual(None, sp, "Without a volume_type_id no storage "
"profile should be returned.")
# profile associated with the volume type should be returned
fake_id = 'fake_volume_id'
volume['volume_type_id'] = fake_id
get_volume_type_extra_specs.return_value = 'fake_profile'
profile = self._driver._get_storage_profile(volume)
self.assertEqual('fake_profile', profile)
spec_key = 'vmware:storage_profile'
get_volume_type_extra_specs.assert_called_once_with(fake_id, spec_key)
# None should be returned when no storage profile is
# associated with the volume type
get_volume_type_extra_specs.return_value = False
profile = self._driver._get_storage_profile(volume)
self.assertIsNone(profile)
@mock.patch('cinder.volume.drivers.vmware.vim_util.'
'convert_datastores_to_hubs')
@mock.patch('cinder.volume.drivers.vmware.vim_util.'
'convert_hubs_to_datastores')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_filter_ds_by_profile(self, volumeops, session, hubs_to_ds,
ds_to_hubs):
"""Test vmdk _filter_ds_by_profile() method."""
volumeops = volumeops.return_value
session = session.return_value
# Test with no profile id
datastores = [mock.sentinel.ds1, mock.sentinel.ds2]
profile = 'fake_profile'
volumeops.retrieve_profile_id.return_value = None
self.assertRaises(error_util.VimException,
self._driver._filter_ds_by_profile,
datastores, profile)
volumeops.retrieve_profile_id.assert_called_once_with(profile)
# Test with a fake profile id
profileId = 'fake_profile_id'
filtered_dss = [mock.sentinel.ds1]
# patch method calls from _filter_ds_by_profile
volumeops.retrieve_profile_id.return_value = profileId
pbm_cf = mock.sentinel.pbm_cf
session.pbm.client.factory = pbm_cf
hubs = [mock.sentinel.hub1, mock.sentinel.hub2]
ds_to_hubs.return_value = hubs
volumeops.filter_matching_hubs.return_value = mock.sentinel.hubs
hubs_to_ds.return_value = filtered_dss
# call _filter_ds_by_profile with a fake profile
actual_dss = self._driver._filter_ds_by_profile(datastores, profile)
# verify return value and called methods
self.assertEqual(filtered_dss, actual_dss,
"Wrong filtered datastores returned.")
ds_to_hubs.assert_called_once_with(pbm_cf, datastores)
volumeops.filter_matching_hubs.assert_called_once_with(hubs,
profileId)
hubs_to_ds.assert_called_once_with(mock.sentinel.hubs, datastores)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_get_folder_ds_summary(self, volumeops, session):
"""Test _get_folder_ds_summary."""
volumeops = volumeops.return_value
driver = self._driver
driver._storage_policy_enabled = True
volume = {'size': 10, 'volume_type_id': 'fake_type'}
rp = mock.sentinel.resource_pool
dss = [mock.sentinel.datastore1, mock.sentinel.datastore2]
filtered_dss = [mock.sentinel.datastore1]
profile = mock.sentinel.profile
def filter_ds(datastores, storage_profile):
return filtered_dss
# patch method calls from _get_folder_ds_summary
volumeops.get_dc.return_value = mock.sentinel.dc
volumeops.get_vmfolder.return_value = mock.sentinel.vmfolder
volumeops.create_folder.return_value = mock.sentinel.folder
driver._get_storage_profile = mock.MagicMock()
driver._get_storage_profile.return_value = profile
driver._filter_ds_by_profile = mock.MagicMock(side_effect=filter_ds)
driver._select_datastore_summary = mock.MagicMock()
driver._select_datastore_summary.return_value = mock.sentinel.summary
# call _get_folder_ds_summary
(folder, datastore_summary) = driver._get_folder_ds_summary(volume,
rp, dss)
# verify returned values and calls made
self.assertEqual(mock.sentinel.folder, folder,
"Folder returned is wrong.")
self.assertEqual(mock.sentinel.summary, datastore_summary,
"Datastore summary returned is wrong.")
volumeops.get_dc.assert_called_once_with(rp)
volumeops.get_vmfolder.assert_called_once_with(mock.sentinel.dc)
volumeops.create_folder.assert_called_once_with(mock.sentinel.vmfolder,
self.VOLUME_FOLDER)
driver._get_storage_profile.assert_called_once_with(volume)
driver._filter_ds_by_profile.assert_called_once_with(dss, profile)
size = volume['size'] * units.Gi
driver._select_datastore_summary.assert_called_once_with(size,
filtered_dss)
# Clear side effects.
driver._filter_ds_by_profile.side_effect = None
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_vmdk_virtual_disk(self, volume_ops):
"""Test vmdk._extend_vmdk_virtual_disk."""
self._test_extend_vmdk_virtual_disk(volume_ops)
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER,
'_create_virtual_disk_from_preallocated_image')
@mock.patch.object(VMDK_DRIVER, '_create_virtual_disk_from_sparse_image')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_ds_name_folder_path')
@mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory')
def test_copy_image_to_volume_non_stream_optimized(
self, create_backing, get_ds_name_folder_path, get_disk_type,
create_disk_from_sparse_image, create_disk_from_preallocated_image,
vops, select_ds_for_volume, generate_uuid, extend_disk):
self._test_copy_image_to_volume_non_stream_optimized(
create_backing,
get_ds_name_folder_path,
get_disk_type,
create_disk_from_sparse_image,
create_disk_from_preallocated_image,
vops,
select_ds_for_volume,
generate_uuid,
extend_disk)
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_virtual_disk_from_preallocated_image(
self, vops, copy_image, flat_extent_path):
self._test_create_virtual_disk_from_preallocated_image(
vops, copy_image, flat_extent_path)
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.'
'MonolithicSparseVirtualDiskPath')
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
def test_create_virtual_disk_from_sparse_image(
self, copy_image, copy_temp_virtual_disk, flat_extent_path,
sparse_path):
self._test_create_virtual_disk_from_sparse_image(
copy_image, copy_temp_virtual_disk, flat_extent_path, sparse_path)
@mock.patch.object(vmware_images, 'fetch_stream_optimized_image')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_copy_image_to_volume_stream_optimized(self, volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
fetch_optimized_image):
"""Test copy_image_to_volume.
Test with an acceptable vmdk disk format and streamOptimized disk type.
"""
self._test_copy_image_to_volume_stream_optimized(volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
fetch_optimized_image)
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_retype(self, ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing):
self._test_retype(ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing)
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_volume(self, volume_ops, _extend_virtual_disk,
_select_ds_for_volume):
"""Test extend_volume."""
self._test_extend_volume(volume_ops, _extend_virtual_disk,
_select_ds_for_volume)
@mock.patch.object(vmware_images, 'download_stream_optimized_disk')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, '_temporary_file')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'session')
def test_backup_volume(self, session, vops, create_backing, generate_uuid,
temporary_file, file_open, download_disk):
self._test_backup_volume(session, vops, create_backing, generate_uuid,
temporary_file, file_open, download_disk)
@mock.patch.object(VMDK_DRIVER, 'extend_volume')
@mock.patch.object(VMDK_DRIVER, '_restore_backing')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, '_temporary_file')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_restore_backup(self, vops, generate_uuid, temporary_file,
file_open, restore_backing, extend_volume):
self._test_restore_backup(vops, generate_uuid, temporary_file,
file_open, restore_backing, extend_volume)
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER,
'_create_backing_from_stream_optimized_file')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
def test_restore_backing(
self, generate_uuid, create_backing, select_ds, get_disk_type,
vops, delete_temp_backing):
self._test_restore_backing(
generate_uuid, create_backing, select_ds, get_disk_type, vops,
delete_temp_backing)
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch.object(vmware_images, 'upload_stream_optimized_disk')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
def test_create_backing_from_stream_optimized_file(
self, select_ds, session, get_storage_profile_id, get_disk_type,
vops, file_open, upload_disk, delete_temp_backing):
self._test_create_backing_from_stream_optimized_file(
select_ds, session, get_storage_profile_id, get_disk_type, vops,
file_open, upload_disk, delete_temp_backing)
@mock.patch.object(VMDK_DRIVER, '_get_folder_ds_summary')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_backing_with_params(self, vops, get_folder_ds_summary):
resource_pool = mock.sentinel.resource_pool
vops.get_dss_rp.return_value = (mock.Mock(), resource_pool)
folder = mock.sentinel.folder
summary = mock.sentinel.summary
get_folder_ds_summary.return_value = (folder, summary)
volume = {'name': 'vol-1', 'volume_type_id': None, 'size': 1}
host = mock.Mock()
create_params = {vmdk.CREATE_PARAM_DISK_LESS: True}
self._driver._create_backing(volume, host, create_params)
vops.create_backing_disk_less.assert_called_once_with('vol-1',
folder,
resource_pool,
host,
summary.name,
None)
create_params = {vmdk.CREATE_PARAM_ADAPTER_TYPE: 'ide'}
self._driver._create_backing(volume, host, create_params)
vops.create_backing.assert_called_once_with('vol-1',
units.Mi,
vmdk.THIN_VMDK_TYPE,
folder,
resource_pool,
host,
summary.name,
None,
'ide')
vops.create_backing.reset_mock()
backing_name = "temp-vol"
create_params = {vmdk.CREATE_PARAM_BACKING_NAME: backing_name}
self._driver._create_backing(volume, host, create_params)
vops.create_backing.assert_called_once_with(backing_name,
units.Mi,
vmdk.THIN_VMDK_TYPE,
folder,
resource_pool,
host,
summary.name,
None,
'lsiLogic')
@mock.patch('cinder.openstack.common.fileutils.ensure_tree')
@mock.patch('cinder.openstack.common.fileutils.delete_if_exists')
@mock.patch('tempfile.mkstemp')
@mock.patch('os.close')
def test_temporary_file(
self, close, mkstemp, delete_if_exists, ensure_tree):
fd = mock.sentinel.fd
tmp = mock.sentinel.tmp
mkstemp.return_value = (fd, tmp)
prefix = ".vmdk"
suffix = "test"
with self._driver._temporary_file(prefix=prefix,
suffix=suffix) as tmp_file:
self.assertEqual(tmp, tmp_file)
ensure_tree.assert_called_once_with(self.TMP_DIR)
mkstemp.assert_called_once_with(dir=self.TMP_DIR,
prefix=prefix,
suffix=suffix)
close.assert_called_once_with(fd)
delete_if_exists.assert_called_once_with(tmp)
class ImageDiskTypeTest(test.TestCase):
"""Unit tests for ImageDiskType."""
def test_is_valid(self):
self.assertTrue(vmdk.ImageDiskType.is_valid("thin"))
self.assertTrue(vmdk.ImageDiskType.is_valid("preallocated"))
self.assertTrue(vmdk.ImageDiskType.is_valid("streamOptimized"))
self.assertTrue(vmdk.ImageDiskType.is_valid("sparse"))
self.assertFalse(vmdk.ImageDiskType.is_valid("thick"))
def test_validate(self):
vmdk.ImageDiskType.validate("thin")
vmdk.ImageDiskType.validate("preallocated")
vmdk.ImageDiskType.validate("streamOptimized")
vmdk.ImageDiskType.validate("sparse")
self.assertRaises(exception.ImageUnacceptable,
vmdk.ImageDiskType.validate,
"thick")
| apache-2.0 |
edmstudio/ansible | lib/ansible/utils/module_docs_fragments/backup.py | 427 | 1071 | # Copyright (c) 2015 Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = '''
options:
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
choices: [ "yes", "no" ]
default: "no"
'''
| gpl-3.0 |
ivanhorvath/openshift-tools | openshift/installer/vendored/openshift-ansible-3.7.42-1/roles/lib_openshift/src/test/unit/test_oc_clusterrole.py | 64 | 3914 | '''
Unit tests for oc clusterrole
'''
import copy
import os
import sys
import unittest
import mock
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error,wrong-import-position
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
from oc_clusterrole import OCClusterRole # noqa: E402
class OCClusterRoleTest(unittest.TestCase):
'''
Test class for OCClusterRole
'''
# run_ansible input parameters
params = {
'state': 'present',
'name': 'operations',
'rules': [
{'apiGroups': [''],
'attributeRestrictions': None,
'verbs': ['create', 'delete', 'deletecollection',
'get', 'list', 'patch', 'update', 'watch'],
'resources': ['persistentvolumes']}
],
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False,
}
@mock.patch('oc_clusterrole.locate_oc_binary')
@mock.patch('oc_clusterrole.Utils.create_tmpfile_copy')
@mock.patch('oc_clusterrole.Utils._write')
@mock.patch('oc_clusterrole.OCClusterRole._run')
def test_adding_a_clusterrole(self, mock_cmd, mock_write, mock_tmpfile_copy, mock_loc_binary):
''' Testing adding a project '''
params = copy.deepcopy(OCClusterRoleTest.params)
clusterrole = '''{
"apiVersion": "v1",
"kind": "ClusterRole",
"metadata": {
"creationTimestamp": "2017-03-27T14:19:09Z",
"name": "operations",
"resourceVersion": "23",
"selfLink": "/oapi/v1/clusterrolesoperations",
"uid": "57d358fe-12f8-11e7-874a-0ec502977670"
},
"rules": [
{
"apiGroups": [
""
],
"attributeRestrictions": null,
"resources": [
"persistentvolumes"
],
"verbs": [
"create",
"delete",
"deletecollection",
"get",
"list",
"patch",
"update",
"watch"
]
}
]
}'''
# Return values of our mocked function call. These get returned once per call.
mock_cmd.side_effect = [
(1, '', 'Error from server: clusterrole "operations" not found'),
(1, '', 'Error from server: namespaces "operations" not found'),
(0, '', ''), # created
(0, clusterrole, ''), # fetch it
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
mock_loc_binary.side_effect = [
'oc',
]
# Act
results = OCClusterRole.run_ansible(params, False)
# Assert
self.assertTrue(results['changed'])
self.assertEqual(results['results']['returncode'], 0)
self.assertEqual(results['results']['results']['metadata']['name'], 'operations')
self.assertEqual(results['state'], 'present')
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
mock.call(['oc', 'get', 'clusterrole', 'operations', '-o', 'json'], None),
mock.call(['oc', 'get', 'clusterrole', 'operations', '-o', 'json'], None),
mock.call(['oc', 'create', '-f', mock.ANY], None),
mock.call(['oc', 'get', 'clusterrole', 'operations', '-o', 'json'], None),
])
| apache-2.0 |
kashif/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
HiSPARC/station-software | user/python/Tools/Scripts/reindent.py | 2 | 11418 | #! /usr/bin/env python
# Released to the public domain, by Tim Peters, 03 October 2000.
"""reindent [-d][-r][-v] [ path ... ]
-d (--dryrun) Dry run. Analyze, but don't make any changes to, files.
-r (--recurse) Recurse. Search for all .py files in subdirectories too.
-n (--nobackup) No backup. Does not make a ".bak" file before reindenting.
-v (--verbose) Verbose. Print informative msgs; else no output.
-h (--help) Help. Print this usage information and exit.
Change Python (.py) files to use 4-space indents and no hard tab characters.
Also trim excess spaces and tabs from ends of lines, and remove empty lines
at the end of files. Also ensure the last line ends with a newline.
If no paths are given on the command line, reindent operates as a filter,
reading a single source file from standard input and writing the transformed
source to standard output. In this case, the -d, -r and -v flags are
ignored.
You can pass one or more file and/or directory paths. When a directory
path, all .py files within the directory will be examined, and, if the -r
option is given, likewise recursively for subdirectories.
If output is not to standard output, reindent overwrites files in place,
renaming the originals with a .bak extension. If it finds nothing to
change, the file is left alone. If reindent does change a file, the changed
file is a fixed-point for future runs (i.e., running reindent on the
resulting .py file won't change it again).
The hard part of reindenting is figuring out what to do with comment
lines. So long as the input files get a clean bill of health from
tabnanny.py, reindent should do a good job.
The backup file is a copy of the one that is being reindented. The ".bak"
file is generated with shutil.copy(), but some corner cases regarding
user/group and permissions could leave the backup file more readable than
you'd prefer. You can always use the --nobackup option to prevent this.
"""
__version__ = "1"
import tokenize
import os, shutil
import sys
import io
verbose = 0
recurse = 0
dryrun = 0
makebackup = True
def usage(msg=None):
if msg is not None:
print >> sys.stderr, msg
print >> sys.stderr, __doc__
def errprint(*args):
sep = ""
for arg in args:
sys.stderr.write(sep + str(arg))
sep = " "
sys.stderr.write("\n")
def main():
import getopt
global verbose, recurse, dryrun, makebackup
try:
opts, args = getopt.getopt(sys.argv[1:], "drnvh",
["dryrun", "recurse", "nobackup", "verbose", "help"])
except getopt.error, msg:
usage(msg)
return
for o, a in opts:
if o in ('-d', '--dryrun'):
dryrun += 1
elif o in ('-r', '--recurse'):
recurse += 1
elif o in ('-n', '--nobackup'):
makebackup = False
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-h', '--help'):
usage()
return
if not args:
r = Reindenter(sys.stdin)
r.run()
r.write(sys.stdout)
return
for arg in args:
check(arg)
def check(file):
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print "listing directory", file
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if ((recurse and os.path.isdir(fullname) and
not os.path.islink(fullname) and
not os.path.split(fullname)[1].startswith("."))
or name.lower().endswith(".py")):
check(fullname)
return
if verbose:
print "checking", file, "...",
try:
f = open(file, "rb")
except IOError, msg:
errprint("%s: I/O Error: %s" % (file, str(msg)))
return
r = Reindenter(f)
f.close()
newline = r.newlines
if isinstance(newline, tuple):
errprint("%s: mixed newlines detected; cannot process file" % file)
return
if r.run():
if verbose:
print "changed."
if dryrun:
print "But this is a dry run, so leaving it alone."
if not dryrun:
bak = file + ".bak"
if makebackup:
shutil.copyfile(file, bak)
if verbose:
print "backed up", file, "to", bak
f = open(file, "wb")
r.write(f)
f.close()
if verbose:
print "wrote new", file
return True
else:
if verbose:
print "unchanged."
return False
def _detect_newlines(lines):
newlines = {'\r\n' if line[-2:] == '\r\n' else
'\n' if line[-1:] == '\n' else
'\r' if line[-1:] == '\r' else
''
for line in lines}
newlines.discard('')
newlines = tuple(sorted(newlines))
if not newlines:
return '\n'
if len(newlines) == 1:
return newlines[0]
return newlines
def _rstrip(line, JUNK='\r\n \t'):
"""Return line stripped of trailing spaces, tabs, newlines.
Note that line.rstrip() instead also strips sundry control characters,
but at least one known Emacs user expects to keep junk like that, not
mentioning Barry by name or anything <wink>.
"""
i = len(line)
while i > 0 and line[i-1] in JUNK:
i -= 1
return line[:i]
class Reindenter:
def __init__(self, f):
self.find_stmt = 1 # next token begins a fresh stmt?
self.level = 0 # current indent level
# Raw file lines.
self.raw = f.readlines()
# Save the newlines found in the file so they can be used to
# create output without mutating the newlines.
self.newlines = _detect_newlines(self.raw)
if isinstance(self.newlines, tuple):
self.newline = self.newlines[0]
else:
self.newline = self.newlines
# File lines, rstripped & tab-expanded. Dummy at start is so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line is all-blank iff it's newline.
self.lines = [_rstrip(line).expandtabs() + self.newline
for line in self.raw]
self.lines.insert(0, None)
self.index = 1 # index into self.lines of next line
# List of (lineno, indentlevel) pairs, one for each stmt and
# comment line. indentlevel is -1 for comment lines, as a
# signal that tokenize doesn't know what to do about them;
# indeed, they're our headache!
self.stats = []
def run(self):
tokenize.tokenize(self.getline, self.tokeneater)
# Remove trailing empty lines.
lines = self.lines
while lines and lines[-1] == self.newline:
lines.pop()
# Sentinel.
stats = self.stats
stats.append((len(lines), 0))
# Map count of leading spaces to # we want.
have2want = {}
# Program after transformation.
after = self.after = []
# Copy over initial empty lines -- there's nothing to do until
# we see a line with *something* on it.
i = stats[0][0]
after.extend(lines[1:i])
for i in range(len(stats)-1):
thisstmt, thislevel = stats[i]
nextstmt = stats[i+1][0]
have = getlspace(lines[thisstmt])
want = thislevel * 4
if want < 0:
# A comment line.
if have:
# An indented comment line. If we saw the same
# indentation before, reuse what it most recently
# mapped to.
want = have2want.get(have, -1)
if want < 0:
# Then it probably belongs to the next real stmt.
for j in xrange(i+1, len(stats)-1):
jline, jlevel = stats[j]
if jlevel >= 0:
if have == getlspace(lines[jline]):
want = jlevel * 4
break
if want < 0: # Maybe it's a hanging
# comment like this one,
# in which case we should shift it like its base
# line got shifted.
for j in xrange(i-1, -1, -1):
jline, jlevel = stats[j]
if jlevel >= 0:
want = have + getlspace(after[jline-1]) - \
getlspace(lines[jline])
break
if want < 0:
# Still no luck -- leave it alone.
want = have
else:
want = 0
assert want >= 0
have2want[have] = want
diff = want - have
if diff == 0 or have == 0:
after.extend(lines[thisstmt:nextstmt])
else:
for line in lines[thisstmt:nextstmt]:
if diff > 0:
if line == self.newline:
after.append(line)
else:
after.append(" " * diff + line)
else:
remove = min(getlspace(line), -diff)
after.append(line[remove:])
return self.raw != self.after
def write(self, f):
f.writelines(self.after)
# Line-getter for tokenize.
def getline(self):
if self.index >= len(self.lines):
line = ""
else:
line = self.lines[self.index]
self.index += 1
return line
# Line-eater for tokenize.
def tokeneater(self, type, token, (sline, scol), end, line,
INDENT=tokenize.INDENT,
DEDENT=tokenize.DEDENT,
NEWLINE=tokenize.NEWLINE,
COMMENT=tokenize.COMMENT,
NL=tokenize.NL):
if type == NEWLINE:
# A program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
self.find_stmt = 1
elif type == INDENT:
self.find_stmt = 1
self.level += 1
elif type == DEDENT:
self.find_stmt = 1
self.level -= 1
elif type == COMMENT:
if self.find_stmt:
self.stats.append((sline, -1))
# but we're still looking for a new stmt, so leave
# find_stmt alone
elif type == NL:
pass
elif self.find_stmt:
# This is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER.
self.find_stmt = 0
if line: # not endmarker
self.stats.append((sline, self.level))
# Count number of leading blanks.
def getlspace(line):
i, n = 0, len(line)
while i < n and line[i] == " ":
i += 1
return i
if __name__ == '__main__':
main()
| gpl-3.0 |
daizhengy/RDS | trove/tests/api/instances_delete.py | 4 | 6330 | # Copyright 2013 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import time
from proboscis import after_class
from proboscis import before_class
from proboscis import test
from proboscis import asserts
from proboscis.decorators import time_out
from trove.common import cfg
from troveclient.compat import exceptions
from trove.tests.util import create_dbaas_client
from trove.common.utils import poll_until
from trove.tests.util import test_config
from trove.tests.util.users import Requirements
from trove.tests.api.instances import instance_info
from trove.tests.api.instances import VOLUME_SUPPORT
CONF = cfg.CONF
class TestBase(object):
def set_up(self):
reqs = Requirements(is_admin=True)
self.user = test_config.users.find_user(reqs)
self.dbaas = create_dbaas_client(self.user)
def create_instance(self, name, size=1):
volume = None
if VOLUME_SUPPORT:
volume = {'size': size}
result = self.dbaas.instances.create(name,
instance_info.dbaas_flavor_href,
volume, [], [])
return result.id
def wait_for_instance_status(self, instance_id, status="ACTIVE",
acceptable_states=None):
if acceptable_states:
acceptable_states.append(status)
def assert_state(instance):
if acceptable_states:
assert_true(instance.status in acceptable_states,
"Invalid status: %s" % instance.status)
return instance
poll_until(lambda: self.dbaas.instances.get(instance_id),
lambda instance: assert_state(instance).status == status,
time_out=30, sleep_time=1)
def wait_for_instance_task_status(self, instance_id, description):
poll_until(lambda: self.dbaas.management.show(instance_id),
lambda instance: instance.task_description == description,
time_out=30, sleep_time=1)
def is_instance_deleted(self, instance_id):
while True:
try:
self.dbaas.instances.get(instance_id)
except exceptions.NotFound:
return True
time.sleep(.5)
def get_task_info(self, instance_id):
instance = self.dbaas.management.show(instance_id)
return instance.status, instance.task_description
def delete_instance(self, instance_id, assert_deleted=True):
instance = self.dbaas.instances.get(instance_id)
instance.delete()
if assert_deleted:
asserts.assert_true(self.is_instance_deleted(instance_id))
def delete_errored_instance(self, instance_id):
self.wait_for_instance_status(instance_id, 'ERROR')
status, desc = self.get_task_info(instance_id)
asserts.assert_equal(status, "ERROR")
self.delete_instance(instance_id)
@test(runs_after_groups=["services.initialize", "dbaas.guest.shutdown"],
groups=['dbaas.api.instances.delete'])
class ErroredInstanceDelete(TestBase):
"""
Test that an instance in an ERROR state is actually deleted when delete
is called.
"""
@before_class
def set_up_err(self):
"""Create some flawed instances."""
from trove.taskmanager.models import CONF
self.old_dns_support = CONF.trove_dns_support
CONF.trove_dns_support = False
super(ErroredInstanceDelete, self).set_up()
# Create an instance that fails during server prov.
self.server_error = self.create_instance('test_SERVER_ERROR')
if VOLUME_SUPPORT:
# Create an instance that fails during volume prov.
self.volume_error = self.create_instance('test_VOLUME_ERROR',
size=9)
else:
self.volume_error = None
# Create an instance that fails during DNS prov.
#self.dns_error = self.create_instance('test_DNS_ERROR')
# Create an instance that fails while it's been deleted the first time.
self.delete_error = self.create_instance('test_ERROR_ON_DELETE')
@after_class(always_run=True)
def clean_up(self):
from trove.taskmanager.models import CONF
CONF.trove_dns_support = self.old_dns_support
@test
@time_out(30)
def delete_server_error(self):
self.delete_errored_instance(self.server_error)
@test(enabled=VOLUME_SUPPORT)
@time_out(30)
def delete_volume_error(self):
self.delete_errored_instance(self.volume_error)
@test(enabled=False)
@time_out(30)
def delete_dns_error(self):
self.delete_errored_instance(self.dns_error)
@test
@time_out(30)
def delete_error_on_delete_instance(self):
id = self.delete_error
self.wait_for_instance_status(id, 'ACTIVE')
self.wait_for_instance_task_status(id, 'No tasks for the instance.')
instance = self.dbaas.management.show(id)
asserts.assert_equal(instance.status, "ACTIVE")
asserts.assert_equal(instance.task_description,
'No tasks for the instance.')
# Try to delete the instance. This fails the first time due to how
# the test fake is setup.
self.delete_instance(id, assert_deleted=False)
instance = self.dbaas.management.show(id)
asserts.assert_equal(instance.status, "SHUTDOWN")
asserts.assert_equal(instance.task_description,
"Deleting the instance.")
# Try a second time. This will succeed.
self.delete_instance(id)
| apache-2.0 |
RobertoMalatesta/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/options.py | 131 | 6106 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from optparse import make_option
class Options(object):
blocks = make_option("--blocks", action="store", type="string", dest="blocks", default=None, help="Bug number which the created bug blocks.")
build = make_option("--build", action="store_true", dest="build", default=False, help="Build and run run-webkit-tests before committing.")
build_style = make_option("--build-style", action="store", dest="build_style", default=None, help="Whether to build debug, release, or both.")
cc = make_option("--cc", action="store", type="string", dest="cc", help="Comma-separated list of email addresses to carbon-copy.")
check_style = make_option("--ignore-style", action="store_false", dest="check_style", default=True, help="Don't check to see if the patch has proper style before uploading.")
check_style_filter = make_option("--check-style-filter", action="store", type="string", dest="check_style_filter", default=None, help="Filter style-checker rules (see check-webkit-style --help).")
clean = make_option("--no-clean", action="store_false", dest="clean", default=True, help="Don't check if the working directory is clean before applying patches")
close_bug = make_option("--no-close", action="store_false", dest="close_bug", default=True, help="Leave bug open after landing.")
comment = make_option("--comment", action="store", type="string", dest="comment", help="Comment to post to bug.")
component = make_option("--component", action="store", type="string", dest="component", help="Component for the new bug.")
confirm = make_option("--no-confirm", action="store_false", dest="confirm", default=True, help="Skip confirmation steps.")
description = make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment")
email = make_option("--email", action="store", type="string", dest="email", help="Email address to use in ChangeLogs.")
force_clean = make_option("--force-clean", action="store_true", dest="force_clean", default=False, help="Clean working directory before applying patches (removes local changes and commits)")
git_commit = make_option("-g", "--git-commit", action="store", dest="git_commit", help="Operate on a local commit. If a range, the commits are squashed into one. <ref>.... includes the working copy changes. UPSTREAM can be used for the upstream/tracking branch.")
local_commit = make_option("--local-commit", action="store_true", dest="local_commit", default=False, help="Make a local commit for each applied patch")
non_interactive = make_option("--non-interactive", action="store_true", dest="non_interactive", default=False, help="Never prompt the user, fail as fast as possible.")
obsolete_patches = make_option("--no-obsolete", action="store_false", dest="obsolete_patches", default=True, help="Do not obsolete old patches before posting this one.")
open_bug = make_option("--open-bug", action="store_true", dest="open_bug", default=False, help="Opens the associated bug in a browser.")
parent_command = make_option("--parent-command", action="store", dest="parent_command", default=None, help="(Internal) The command that spawned this instance.")
quiet = make_option("--quiet", action="store_true", dest="quiet", default=False, help="Produce less console output.")
request_commit = make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review.")
review = make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review.")
reviewer = make_option("-r", "--reviewer", action="store", type="string", dest="reviewer", help="Update ChangeLogs to say Reviewed by REVIEWER.")
suggest_reviewers = make_option("--suggest-reviewers", action="store_true", default=False, help="Offer to CC appropriate reviewers.")
test = make_option("--test", action="store_true", dest="test", default=False, help="Run run-webkit-tests before committing.")
update = make_option("--no-update", action="store_false", dest="update", default=True, help="Don't update the working directory.")
update_changelogs = make_option("--update-changelogs", action="store_true", dest="update_changelogs", default=False, help="Update existing ChangeLog entries with new date, bug description, and touched files/functions.")
changelog_count = make_option("--changelog-count", action="store", type="int", dest="changelog_count", help="Number of changelogs to parse.")
| bsd-3-clause |
jacobsenanaizabel/shoop | shoop_tests/admin/test_order_module.py | 6 | 1652 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from shoop.admin.modules.orders.dashboard import OrderValueChartDashboardBlock
from shoop.admin.modules.orders.views.detail import OrderSetStatusView
from shoop.core.models.orders import OrderStatusRole, OrderStatus, Order
from shoop.testing.factories import create_random_order, get_default_product, create_random_person
from shoop_tests.utils import apply_request_middleware
@pytest.mark.django_db
def test_order_set_status_works(admin_user, rf):
order = create_random_order(customer=create_random_person(), products=(get_default_product(),))
order.create_shipment_of_all_products() # Need to be shipped to set complete
assert order.status.role == OrderStatusRole.INITIAL
complete_status = OrderStatus.objects.get_default_complete()
view = OrderSetStatusView.as_view()
request = apply_request_middleware(rf.post("/", {"status": complete_status.pk}), user=admin_user)
response = view(request, pk=order.pk)
assert response.status_code < 400
order = Order.objects.get(pk=order.pk)
assert order.status_id == complete_status.id
assert order.log_entries.filter(identifier="status_change").exists()
@pytest.mark.django_db
def test_order_chart_works():
order = create_random_order(customer=create_random_person(), products=(get_default_product(),))
chart = OrderValueChartDashboardBlock("test").get_chart()
assert len(chart.series[0]) > 0
| agpl-3.0 |
melbit-kevinwessel/ansible-modules-extras | network/dnsmadeeasy.py | 77 | 13305 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: dnsmadeeasy
version_added: "1.3"
short_description: Interface with dnsmadeeasy.com (a DNS hosting service).
description:
- "Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or monitor/account support yet. See: U(http://www.dnsmadeeasy.com/services/rest-api/)"
options:
account_key:
description:
- Accout API Key.
required: true
default: null
account_secret:
description:
- Accout Secret Key.
required: true
default: null
domain:
description:
- Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster resolution.
required: true
default: null
record_name:
description:
- Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless of the state argument.
required: false
default: null
record_type:
description:
- Record type.
required: false
choices: [ 'A', 'AAAA', 'CNAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ]
default: null
record_value:
description:
- "Record value. HTTPRED: <redirection URL>, MX: <priority> <target name>, NS: <name server>, PTR: <target name>, SRV: <priority> <weight> <port> <target name>, TXT: <text value>"
- "If record_value is not specified; no changes will be made and the record will be returned in 'result' (in other words, this module can be used to fetch a record's current id, type, and ttl)"
required: false
default: null
record_ttl:
description:
- record's "Time to live". Number of seconds the record remains cached in DNS servers.
required: false
default: 1800
state:
description:
- whether the record should exist or not
required: true
choices: [ 'present', 'absent' ]
default: null
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
notes:
- The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP.
- This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks.
requirements: [ hashlib, hmac ]
author: "Brice Burgess (@briceburg)"
'''
EXAMPLES = '''
# fetch my.com domain records
- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present
register: response
# create / ensure the presence of a record
- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" record_type="A" record_value="127.0.0.1"
# update the previously created record
- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" record_value="192.168.0.1"
# fetch a specific record
- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test"
register: response
# delete a record / ensure it is absent
- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=absent record_name="test"
'''
# ============================================
# DNSMadeEasy module specific support methods.
#
import urllib
IMPORT_ERROR = None
try:
import json
from time import strftime, gmtime
import hashlib
import hmac
except ImportError, e:
IMPORT_ERROR = str(e)
class DME2:
def __init__(self, apikey, secret, domain, module):
self.module = module
self.api = apikey
self.secret = secret
self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/'
self.domain = str(domain)
self.domain_map = None # ["domain_name"] => ID
self.record_map = None # ["record_name"] => ID
self.records = None # ["record_ID"] => <record>
self.all_records = None
# Lookup the domain ID if passed as a domain name vs. ID
if not self.domain.isdigit():
self.domain = self.getDomainByName(self.domain)['id']
self.record_url = 'dns/managed/' + str(self.domain) + '/records'
def _headers(self):
currTime = self._get_date()
hashstring = self._create_hash(currTime)
headers = {'x-dnsme-apiKey': self.api,
'x-dnsme-hmac': hashstring,
'x-dnsme-requestDate': currTime,
'content-type': 'application/json'}
return headers
def _get_date(self):
return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime())
def _create_hash(self, rightnow):
return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest()
def query(self, resource, method, data=None):
url = self.baseurl + resource
if data and not isinstance(data, basestring):
data = urllib.urlencode(data)
response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers())
if info['status'] not in (200, 201, 204):
self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
try:
return json.load(response)
except Exception, e:
return {}
def getDomain(self, domain_id):
if not self.domain_map:
self._instMap('domain')
return self.domains.get(domain_id, False)
def getDomainByName(self, domain_name):
if not self.domain_map:
self._instMap('domain')
return self.getDomain(self.domain_map.get(domain_name, 0))
def getDomains(self):
return self.query('dns/managed', 'GET')['data']
def getRecord(self, record_id):
if not self.record_map:
self._instMap('record')
return self.records.get(record_id, False)
# Try to find a single record matching this one.
# How we do this depends on the type of record. For instance, there
# can be several MX records for a single record_name while there can
# only be a single CNAME for a particular record_name. Note also that
# there can be several records with different types for a single name.
def getMatchingRecord(self, record_name, record_type, record_value):
# Get all the records if not already cached
if not self.all_records:
self.all_records = self.getRecords()
# TODO SRV type not yet implemented
if record_type in ["A", "AAAA", "CNAME", "HTTPRED", "PTR"]:
for result in self.all_records:
if result['name'] == record_name and result['type'] == record_type:
return result
return False
elif record_type in ["MX", "NS", "TXT"]:
for result in self.all_records:
if record_type == "MX":
value = record_value.split(" ")[1]
else:
value = record_value
if result['name'] == record_name and result['type'] == record_type and result['value'] == value:
return result
return False
else:
raise Exception('record_type not yet supported')
def getRecords(self):
return self.query(self.record_url, 'GET')['data']
def _instMap(self, type):
#@TODO cache this call so it's executed only once per ansible execution
map = {}
results = {}
# iterate over e.g. self.getDomains() || self.getRecords()
for result in getattr(self, 'get' + type.title() + 's')():
map[result['name']] = result['id']
results[result['id']] = result
# e.g. self.domain_map || self.record_map
setattr(self, type + '_map', map)
setattr(self, type + 's', results) # e.g. self.domains || self.records
def prepareRecord(self, data):
return json.dumps(data, separators=(',', ':'))
def createRecord(self, data):
#@TODO update the cache w/ resultant record + id when impleneted
return self.query(self.record_url, 'POST', data)
def updateRecord(self, record_id, data):
#@TODO update the cache w/ resultant record + id when impleneted
return self.query(self.record_url + '/' + str(record_id), 'PUT', data)
def deleteRecord(self, record_id):
#@TODO remove record from the cache when impleneted
return self.query(self.record_url + '/' + str(record_id), 'DELETE')
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
account_key=dict(required=True),
account_secret=dict(required=True, no_log=True),
domain=dict(required=True),
state=dict(required=True, choices=['present', 'absent']),
record_name=dict(required=False),
record_type=dict(required=False, choices=[
'A', 'AAAA', 'CNAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']),
record_value=dict(required=False),
record_ttl=dict(required=False, default=1800, type='int'),
validate_certs = dict(default='yes', type='bool'),
),
required_together=(
['record_value', 'record_ttl', 'record_type']
)
)
if IMPORT_ERROR:
module.fail_json(msg="Import Error: " + IMPORT_ERROR)
DME = DME2(module.params["account_key"], module.params[
"account_secret"], module.params["domain"], module)
state = module.params["state"]
record_name = module.params["record_name"]
record_type = module.params["record_type"]
record_value = module.params["record_value"]
# Follow Keyword Controlled Behavior
if record_name is None:
domain_records = DME.getRecords()
if not domain_records:
module.fail_json(
msg="The requested domain name is not accessible with this api_key; try using its ID if known.")
module.exit_json(changed=False, result=domain_records)
# Fetch existing record + Build new one
current_record = DME.getMatchingRecord(record_name, record_type, record_value)
new_record = {'name': record_name}
for i in ["record_value", "record_type", "record_ttl"]:
if not module.params[i] is None:
new_record[i[len("record_"):]] = module.params[i]
# Special handling for mx record
if new_record["type"] == "MX":
new_record["mxLevel"] = new_record["value"].split(" ")[0]
new_record["value"] = new_record["value"].split(" ")[1]
# Compare new record against existing one
changed = False
if current_record:
for i in new_record:
if str(current_record[i]) != str(new_record[i]):
changed = True
new_record['id'] = str(current_record['id'])
# Follow Keyword Controlled Behavior
if state == 'present':
# return the record if no value is specified
if not "value" in new_record:
if not current_record:
module.fail_json(
msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain']))
module.exit_json(changed=False, result=current_record)
# create record as it does not exist
if not current_record:
record = DME.createRecord(DME.prepareRecord(new_record))
module.exit_json(changed=True, result=record)
# update the record
if changed:
DME.updateRecord(
current_record['id'], DME.prepareRecord(new_record))
module.exit_json(changed=True, result=new_record)
# return the record (no changes)
module.exit_json(changed=False, result=current_record)
elif state == 'absent':
# delete the record if it exists
if current_record:
DME.deleteRecord(current_record['id'])
module.exit_json(changed=True)
# record does not exist, return w/o change.
module.exit_json(changed=False)
else:
module.fail_json(
msg="'%s' is an unknown value for the state argument" % state)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 |
wbap/Hackathon2015 | Hiroshiba/NeuralNetwork/test_check.py | 2 | 7036 |
# coding: utf-8
# In[ ]:
import os
import sys
import re # for regex
import math
import json
import pickle
from PIL import Image
import numpy as np
from sklearn.datasets import fetch_mldata
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
# import pycuda.autoinit
from chainer import cuda, Function, FunctionSet, gradient_check, Variable, optimizers
import chainer.functions as F
from dA import DenoisingAutoencoder
from SdA import StackedDenoisingAutoencoder
from CdA import ConvolutionalDenoisingAutoencoder
# In[ ]:
## load images
path_imagedir = '/Users/Hiho/Downloads/mit_body_v2'
# count up
num_images = 0
for name in os.listdir(path_imagedir):
if re.match( '.*png$', name ):
num_images = num_images+1
# get image size
for name in os.listdir(path_imagedir):
if re.match( '.*png$', name ):
img = Image.open( os.path.join(path_imagedir, name) )
size_image = img.size
break
num_pximage = size_image[0]*size_image[1]
# laod images
imgs = np.zeros((num_images, num_pximage), dtype=np.float32)
i=0
for name in os.listdir(path_imagedir):
if re.match( '.*png$', name ):
img = Image.open( os.path.join(path_imagedir, name) )
img = np.asarray(img, dtype=np.uint8).T
imgs[i,:] = np.reshape( np.mean(img, axis=0), (1, -1) ).astype(np.float32) / 255
i=i+1
# In[ ]:
## make movie
num_frame = 5
num_movies = num_images - num_frame + 1
num_pxmovie = num_pximage*num_frame
movies = np.zeros((num_movies, num_pxmovie), dtype=np.float32)
for i in range(num_movies):
movies[i,:] = np.reshape( imgs[i:i+5,:], (1, -1) )
# In[ ]:
## load json files
i=0
true_poses = [{}] * num_images
joint_angles = [{}] * num_images
for name in os.listdir(path_imagedir):
if re.match( '.*json$', name ):
j = json.load( open(os.path.join(path_imagedir, name)) )
true_poses[i] = j['true_position']
joint_angles[i] = j['joint_angle']
i = i+1
# In[ ]:
## setup ML values
num_test = num_movies // 40
num_train = num_movies - num_test
v_all = movies.copy()
num_node_tp = 9
tp_all = np.zeros((num_movies, num_node_tp), dtype=np.float32)
for i in range(num_movies):
tp = true_poses[i+num_frame-1]
tp_all[i][0:3] = [tp['right_elbow']['x'], tp['right_elbow']['y'], tp['right_elbow']['z']]
tp_all[i][3:6] = [tp['right_shoulder']['x'], tp['right_shoulder']['y'], tp['right_shoulder']['z']]
tp_all[i][6:9] = [tp['right_hand']['x'], tp['right_hand']['y'], tp['right_hand']['z']]
num_node_xA = 4
xA_all = np.zeros((num_movies, num_node_xA), dtype=np.float32)
for i in range(num_movies):
xA = joint_angles[i+num_frame-1]
xA_all[i][0:3] = [xA['right_shoulder']['y'], xA['right_shoulder']['p'], xA['right_shoulder']['r']]
xA_all[i][3] = xA['right_elbow']['p']
xA_all = xA_all/360
# shuffle all data
rng = np.random.RandomState(1234)
indices = np.arange(num_movies)
rng.shuffle(indices)
v_all = v_all[indices]
tp_all = tp_all[indices]
# split test and train data
v_train, v_test = np.split(v_all, [num_train])
tp_train, tp_test = np.split(tp_all, [num_train])
xA_train, xA_test = np.split(xA_all, [num_train])
batchsize = 100
n_epoch = 300
# In[ ]:
# create SdA
n_hiddens = (12**2*num_frame, 6**2*num_frame)
sda = StackedDenoisingAutoencoder(num_pxmovie, n_hiddens)
sda.train(v_all, n_epoch=n_epoch)
sda.save('history', n_hiddens, n_epoch, batchsize)
# sda.load('history/SdA_layer(576, 64)_epoch300.pkl')
# split test and train data
yA_each = sda.predict(v_all, bAllLayer=True)
yA_all = yA_each[-1]
# yA_hidden1_all = yA_each[0]
yA_train, yA_test = np.split(yA_all, [num_train])
# check output histgram
dummy = plt.hist(np.reshape(yA_all, (-1, 1)), 50)
# In[ ]:
## draw weight
def draw_weight(data, size):
Z = data.reshape(size).T
plt.imshow(Z, interpolation='none')
plt.xlim(0,size[0])
plt.ylim(0,size[1])
plt.gray()
plt.tick_params(labelbottom="off")
plt.tick_params(labelleft="off")
num_show = 4
for i_layer in range(len(n_hiddens)):
for i in range(num_show):
for i_frame in range(num_frame):
plt.subplot(len(n_hiddens)*num_frame, num_show, num_show*(num_frame*i_layer+i_frame)+i+1)
iw_s = num_pximage*i_frame
iw_e = num_pximage*(i_frame+1)
draw_weight( sda.SdA[i_layer].model.encode.W[i][iw_s:iw_e], (math.sqrt(sda.n_nodes[i_layer]/num_frame), math.sqrt(sda.n_nodes[i_layer]/num_frame)) )
# In[ ]:
# check true position
model = FunctionSet(
# l1 = F.Linear(n_hiddens[-1], 50),
# l2 = F.Linear(50, num_node_tp),
l = F.Linear(n_hiddens[-1], num_node_tp),
)
optimizer = optimizers.SGD()
optimizer.setup(model.collect_parameters())
def forward(x_data, y_data):
x = Variable(x_data); t = Variable(y_data)
# h = F.relu(model.l1(x))
y = model.l(x)
return F.mean_squared_error(y, t), y
for epoch in range(n_epoch):
indexes = np.random.permutation(num_train)
sum_loss = 0
for i in range(0, num_train, batchsize):
x_batch = yA_train[indexes[i : i + batchsize]]
y_batch = tp_train[indexes[i : i + batchsize]]
optimizer.zero_grads()
loss, output = forward(x_batch, y_batch)
loss.backward()
optimizer.update()
sum_loss = sum_loss+loss.data*batchsize
print('epoch:'+str(epoch)+' loss:' + str(sum_loss/num_train))
# test
loss, output = forward(yA_test, tp_test)
print('test loss:' + str(loss.data))
for i_check in range(0, num_test, math.floor(num_test/8)):
print(i_check)
print( "true : %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f" % (tp_test[i_check][0], tp_test[i_check][1], tp_test[i_check][2], tp_test[i_check][3], tp_test[i_check][4], tp_test[i_check][5], tp_test[i_check][6], tp_test[i_check][7], tp_test[i_check][8] ))
print( "predicted : %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f" % (output.data[i_check][0], output.data[i_check][1], output.data[i_check][2], output.data[i_check][3], output.data[i_check][4], output.data[i_check][5], output.data[i_check][6], output.data[i_check][7], output.data[i_check][8] ))
# In[ ]:
# fA(xA->yA)
model = FunctionSet(
l1 = F.Linear(num_node_xA, 50),
l2 = F.Linear(50, n_hiddens[-1]),
)
optimizer = optimizers.SGD()
optimizer.setup(model.collect_parameters())
def forward(x_data, y_data):
x = Variable(x_data); t = Variable(y_data)
h = F.sigmoid(model.l1(x))
y = model.l2(h)
return F.mean_squared_error(y, t), y
for epoch in range(n_epoch):
indexes = np.random.permutation(num_images)
sum_loss = 0
for i in range(0, num_train, batchsize):
x_batch = xA_all[indexes[i : i + batchsize]]
y_batch = yA_all[indexes[i : i + batchsize]]
optimizer.zero_grads()
loss, output = forward(x_batch, y_batch)
loss.backward()
optimizer.update()
sum_loss = sum_loss+loss.data*batchsize
print('epoch:'+str(epoch)+' loss:' + str(sum_loss/num_train))
# test
loss, output = forward(xA_test, yA_test)
print('test loss:' + str(loss.data))
| apache-2.0 |
kazemakase/scikit-learn | examples/applications/plot_out_of_core_classification.py | 255 | 13919 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
dyyi/moneybook | venv/Lib/site-packages/django/conf/locale/lt/formats.py | 504 | 1830 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'Y \m. E j \d.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'Y \m. E j \d., H:i'
YEAR_MONTH_FORMAT = r'Y \m. F'
MONTH_DAY_FORMAT = r'E j \d.'
SHORT_DATE_FORMAT = 'Y-m-d'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
]
TIME_INPUT_FORMATS = [
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
'%H.%M.%S', # '14.30.59'
'%H.%M.%S.%f', # '14.30.59.000200'
'%H.%M', # '14.30'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y %H.%M.%S', # '25.10.06 14.30.59'
'%d.%m.%y %H.%M.%S.%f', # '25.10.06 14.30.59.000200'
'%d.%m.%y %H.%M', # '25.10.06 14.30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| apache-2.0 |
pra85/calibre | src/calibre/ebooks/oeb/transforms/guide.py | 4 | 2067 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
class Clean(object):
'''Clean up guide, leaving only known values '''
def __call__(self, oeb, opts):
self.oeb, self.log, self.opts = oeb, oeb.log, opts
if 'cover' not in self.oeb.guide:
covers = []
for x in ('other.ms-coverimage-standard', 'coverimagestandard',
'other.ms-titleimage-standard', 'other.ms-titleimage',
'other.ms-coverimage', 'other.ms-thumbimage-standard',
'other.ms-thumbimage', 'thumbimagestandard'):
if x in self.oeb.guide:
href = self.oeb.guide[x].href
try:
item = self.oeb.manifest.hrefs[href]
except KeyError:
continue
else:
covers.append([self.oeb.guide[x], len(item.data)])
covers.sort(cmp=lambda x,y:cmp(x[1], y[1]), reverse=True)
if covers:
ref = covers[0][0]
if len(covers) > 1:
self.log('Choosing %s:%s as the cover'%(ref.type, ref.href))
ref.type = 'cover'
self.oeb.guide.refs['cover'] = ref
if ('start' in self.oeb.guide and 'text' not in self.oeb.guide):
# Prefer text to start as per the OPF 2.0 spec
x = self.oeb.guide['start']
self.oeb.guide.add('text', x.title, x.href)
self.oeb.guide.remove('start')
for x in list(self.oeb.guide):
if x.lower() not in {'cover', 'titlepage', 'masthead', 'toc',
'title-page', 'copyright-page', 'text'}:
item = self.oeb.guide[x]
if item.title and item.title.lower() == 'start':
continue
self.oeb.guide.remove(x)
| gpl-3.0 |
binarytemple/ansible | lib/ansible/module_utils/splitter.py | 2 | 6643 | # (c) 2014 James Cammarata, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
def _get_quote_state(token, quote_char):
'''
the goal of this block is to determine if the quoted string
is unterminated in which case it needs to be put back together
'''
# the char before the current one, used to see if
# the current character is escaped
prev_char = None
for idx, cur_char in enumerate(token):
if idx > 0:
prev_char = token[idx-1]
if cur_char in '"\'':
if quote_char:
if cur_char == quote_char and prev_char != '\\':
quote_char = None
else:
quote_char = cur_char
return quote_char
def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
'''
this function counts the number of opening/closing blocks for a
given opening/closing type and adjusts the current depth for that
block based on the difference
'''
num_open = token.count(open_token)
num_close = token.count(close_token)
if num_open != num_close:
cur_depth += (num_open - num_close)
if cur_depth < 0:
cur_depth = 0
return cur_depth
def split_args(args):
'''
Splits args on whitespace, but intelligently reassembles
those that may have been split over a jinja2 block or quotes.
When used in a remote module, we won't ever have to be concerned about
jinja2 blocks, however this function is/will be used in the
core portions as well before the args are templated.
example input: a=b c="foo bar"
example output: ['a=b', 'c="foo bar"']
Basically this is a variation shlex that has some more intelligence for
how Ansible needs to use it.
'''
# the list of params parsed out of the arg string
# this is going to be the result value when we are donei
params = []
# here we encode the args, so we have a uniform charset to
# work with, and split on white space
args = args.strip()
try:
args = args.encode('utf-8')
do_decode = True
except UnicodeDecodeError:
do_decode = False
tokens = args.split(' ')
# iterate over the tokens, and reassemble any that may have been
# split on a space inside a jinja2 block.
# ex if tokens are "{{", "foo", "}}" these go together
# These variables are used
# to keep track of the state of the parsing, since blocks and quotes
# may be nested within each other.
quote_char = None
inside_quotes = False
print_depth = 0 # used to count nested jinja2 {{ }} blocks
block_depth = 0 # used to count nested jinja2 {% %} blocks
comment_depth = 0 # used to count nested jinja2 {# #} blocks
# now we loop over each split token, coalescing tokens if the white space
# split occurred within quotes or a jinja2 block of some kind
for token in tokens:
# store the previous quoting state for checking later
was_inside_quotes = inside_quotes
quote_char = _get_quote_state(token, quote_char)
inside_quotes = quote_char is not None
# multiple conditions may append a token to the list of params,
# so we keep track with this flag to make sure it only happens once
# append means add to the end of the list, don't append means concatenate
# it to the end of the last token
appended = False
# if we're inside quotes now, but weren't before, append the token
# to the end of the list, since we'll tack on more to it later
# otherwise, if we're inside any jinja2 block, inside quotes, or we were
# inside quotes (but aren't now) concat this token to the last param
if inside_quotes and not was_inside_quotes:
params.append(token)
appended = True
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
params[-1] = "%s %s" % (params[-1], token)
appended = True
# if the number of paired block tags is not the same, the depth has changed, so we calculate that here
# and may append the current token to the params (if we haven't previously done so)
prev_print_depth = print_depth
print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
if print_depth != prev_print_depth and not appended:
params.append(token)
appended = True
prev_block_depth = block_depth
block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
if block_depth != prev_block_depth and not appended:
params.append(token)
appended = True
prev_comment_depth = comment_depth
comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
if comment_depth != prev_comment_depth and not appended:
params.append(token)
appended = True
# finally, if we're at zero depth for all blocks and not inside quotes, and have not
# yet appended anything to the list of params, we do so now
if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
params.append(token)
# If we're done and things are not at zero depth or we're still inside quotes,
# raise an error to indicate that the args were unbalanced
if print_depth or block_depth or comment_depth or inside_quotes:
raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes")
# finally, we decode each param back to the unicode it was in the arg string
if do_decode:
params = [x.decode('utf-8') for x in params]
return params
def unquote(data):
''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
if len(data) > 0 and (data[0] == '"' and data[-1] == '"' or data[0] == "'" and data[-1] == "'"):
return data[1:-1]
return data
| gpl-3.0 |
be-cloud-be/horizon-addons | server/addons/mail/wizard/invite.py | 16 | 3680 | # -*- coding: utf-8 -*-
from openerp import _, api, fields, models
class Invite(models.TransientModel):
""" Wizard to invite partners (or channels) and make them followers. """
_name = 'mail.wizard.invite'
_description = 'Invite wizard'
@api.model
def default_get(self, fields):
result = super(Invite, self).default_get(fields)
user_name = self.env.user.name_get()[0][1]
model = result.get('res_model')
res_id = result.get('res_id')
if self._context.get('mail_invite_follower_channel_only'):
result['send_mail'] = False
if 'message' in fields and model and res_id:
model_name = self.env['ir.model'].search([('model', '=', self.pool[model]._name)]).name_get()[0][1]
document_name = self.env[model].browse(res_id).name_get()[0][1]
message = _('<div><p>Hello,</p><p>%s invited you to follow %s document: %s.</p></div>') % (user_name, model_name, document_name)
result['message'] = message
elif 'message' in fields:
result['message'] = _('<div><p>Hello,</p><p>%s invited you to follow a new document.</p></div>') % user_name
return result
res_model = fields.Char('Related Document Model', required=True, index=True, help='Model of the followed resource')
res_id = fields.Integer('Related Document ID', index=True, help='Id of the followed resource')
partner_ids = fields.Many2many('res.partner', string='Recipients', help="List of partners that will be added as follower of the current document.")
channel_ids = fields.Many2many('mail.channel', string='Channels', help='List of channels that will be added as listeners of the current document.',
domain=[('channel_type', '=', 'channel')])
message = fields.Html('Message')
send_mail = fields.Boolean('Send Email', default=True, help="If checked, the partners will receive an email warning they have been added in the document's followers.")
@api.multi
def add_followers(self):
email_from = self.env['mail.message']._get_default_from()
for wizard in self:
Model = self.env[wizard.res_model]
document = Model.browse(wizard.res_id)
# filter partner_ids to get the new followers, to avoid sending email to already following partners
new_partners = wizard.partner_ids - document.message_partner_ids
new_channels = wizard.channel_ids - document.message_channel_ids
document.message_subscribe(new_partners.ids, new_channels.ids)
model_ids = self.env['ir.model'].search([('model', '=', wizard.res_model)])
model_name = model_ids.name_get()[0][1]
# send an email if option checked and if a message exists (do not send void emails)
if wizard.send_mail and wizard.message and not wizard.message == '<br>': # when deleting the message, cleditor keeps a <br>
message = self.env['mail.message'].create({
'subject': _('Invitation to follow %s: %s') % (model_name, document.name_get()[0][1]),
'body': wizard.message,
'record_name': document.name_get()[0][1],
'email_from': email_from,
'reply_to': email_from,
'model': wizard.res_model,
'res_id': wizard.res_id,
'no_auto_thread': True,
})
new_partners.with_context(auto_delete=True)._notify(message, force_send=True, user_signature=True)
message.unlink()
return {'type': 'ir.actions.act_window_close'}
| agpl-3.0 |
depet/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 44 | 7031 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
"""Test partial dependence for classifier """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
"""Test partial dependence for multi-class classifier """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
"""Test partial dependence for regressor """
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
"""Test input validation of partial dependence. """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
"""Test partial dependence plot function. """
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
"""Test partial dependence plot function input checks. """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
"""Test partial dependence plot function on multi-class input. """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
nukru/projectQ | app/auth/views.py | 1 | 4303 | from app import app, db, lm
from flask import Blueprint, request, url_for, flash, redirect, abort, session, g
from flask import render_template
from flask.ext.login import login_user, logout_user, current_user, login_required
from forms import LoginFormOpenID, RegistrationForm, LoginFormEmail, RegistrationForm2
from . import blueprint
from ..models import User, ROLE_USER
from flask.ext.babel import gettext
# @blueprint.route('/login', methods=['GET', 'POST'])
# @blueprint.route('/', methods=['GET', 'POST'])
# @oid.loginhandler
# def login():
# """
# login method for users.
# Returns a Jinja2 template with the result of signing process.
# """
# if g.user is not None and g.user.is_authenticated():
# return redirect(url_for('main.index'))
# form = LoginFormOpenID()
# if form.validate_on_submit():
# session['remember_me'] = form.remember_me.data
# return oid.try_login(form.openid.data, ask_for = ['nickname', 'email'])
# return render_template('/auth/login.html',
# title = 'Sign In',
# form = form,
# providers = app.config['OPENID_PROVIDERS'])
@blueprint.route('/login', methods=['GET', 'POST'])
@blueprint.route('/', methods=['GET', 'POST'])
@blueprint.route('/login-email', methods=['GET', 'POST'])
def login_email():
form = LoginFormEmail()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash(gettext('Invalid email or password.'))
return render_template('auth/loginEmail.html', form=form)
# @blueprint.route('/register2', methods=['GET', 'POST'])
# def register2():
# form = RegistrationForm()
# if form.validate_on_submit():
# user = User(email=form.email.data,
# password=form.email.data)
# db.session.add(user)
# db.session.commit()
# # token = user.generate_confirmation_token()
# # send_email(user.email, 'Confirm Your Account',
# # 'auth/email/confirm', user=user, token=token)
# flash(gettext('A password has been sent to you by email.'))
# return redirect(url_for('auth.login'))
# return render_template('auth/register.html', form=form)
@blueprint.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm2()
if form.validate_on_submit():
user = User(email=form.email.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
# token = user.generate_confirmation_token()
# send_email(user.email, 'Confirm Your Account',
# 'auth/email/confirm', user=user, token=token)
# flash(gettext('A password has been sent to you by email.'))
login_user(user, True)
return redirect(url_for('main.index'))
return render_template('auth/register.html', form=form)
@blueprint.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('main.index'))
@blueprint.before_app_request
def before_request():
# g.user = current_user
if current_user.is_authenticated():
g.user = current_user # return username in get_id()
else:
g.user = None # or 'some fake value', whatever
# @oid.after_login
# def after_login(resp):
# if resp.email is None or resp.email == "":
# flash(gettext('Invalid login. Please try again.'))
# return redirect(url_for('login'))
# user = User.query.filter_by(email = resp.email).first()
# if user is None:
# nickname = resp.nickname
# if nickname is None or nickname == "":
# nickname = resp.email.split('@')[0]
# user = User(nickname = nickname, email = resp.email, role = ROLE_USER)
# db.session.add(user)
# db.session.commit()
# remember_me = False
# if 'remember_me' in session:
# remember_me = session['remember_me']
# session.pop('remember_me', None)
# login_user(user, remember = remember_me)
# return redirect(request.args.get('next') or url_for('main.index'))
| apache-2.0 |
kaniblu/wiki-mysql | filters.py | 1 | 1818 | # encoding: utf-8
from __future__ import unicode_literals
import re
import string
import six
import gensim
class WikiBodyFilter(object):
"""Generic wikipedia article filter
Strips off illegal characters and markups. Borrows some basic logic from
gensim utils.
"""
def __init__(self, remove_html=True, valid_unicodes=(), invalid_unicodes=()):
self.valid_unicodes = valid_unicodes
self.invalid_unicodes = invalid_unicodes
self.remove_html = remove_html
self.uni_patterns = []
if valid_unicodes:
valids = []
for s, e in valid_unicodes:
s_str = six.unichr(s)
e_str = six.unichr(e)
valids.append("{}-{}".format(s_str, e_str))
valid_pat = re.compile(r"[^{}]".format("".join(valids)),
re.UNICODE)
self.uni_patterns.append(valid_pat)
if invalid_unicodes:
invalids = []
for s, e in invalid_unicodes:
s_str = six.unichr(s)
e_str = six.unichr(e)
invalids.append("{}-{}".format(s_str, e_str))
invalid_pat = re.compile(r"[{}]".format("".join(invalids)),
re.UNICODE)
self.uni_patterns.append(invalid_pat)
dbws_pat = re.compile(r"(\s)\s*")
self.dbws_pattern = dbws_pat
def __call__(self, text):
text = gensim.utils.to_unicode(text, "utf8", errors="ignore")
if self.remove_html:
text = gensim.utils.decode_htmlentities(text)
text = gensim.corpora.wikicorpus.remove_markup(text)
for pat in self.uni_patterns:
text = pat.sub("", text)
text = self.dbws_pattern.sub(r"\g<1>", text)
return text | mit |
openhatch/oh-mainline | vendor/packages/django-http-proxy/versioneer.py | 7 | 38235 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Versioneer 2
============
* Like a rocketeer, but for versions!
* Based on https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
* Edited by Ryan Dwyer
This is a tool for managing a recorded version number in python projects.
The goal is to remove the tedious and error-prone "update the embedded version
string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control system,
and maybe making new tarballs.
## Cookiecutter
* If you got this file using cookiecutter, the manual steps listed below should
all be done.
* Run `git tag 1.0` (for example), and register and upload to PyPI as usual.
## Manual Install
* Copy this file to beside your setup.py
* Add the following to your setup.py:
import imp
fp, pathname, description = imp.find_module('versioneer')
try:
versioneer = imp.load_module('versioneer', fp, pathname, description)
finally:
if fp: fp.close()
versioneer.VCS = 'git'
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
* Add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* Now run `python setup.py versioneer`, which will create `_version.py`, and will
modify your `__init__.py` (if one exists next to `_version.py`) to define
`__version__` (by calling a function from `_version.py`). It will also
modify your `MANIFEST.in` to include both `versioneer.py` and the generated
`_version.py` in sdist tarballs.
* Commit these changes to your VCS. To make sure you won't forget,
`setup.py versioneer` will mark everything it touched for addition.
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example 'git describe --tags --dirty --always' reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time. However,
when you use "setup.py build" or "setup.py sdist", `_version.py` in the new
copy is replaced by a small static file that contains just the generated
version data.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the "git archive" command. As a result, generated tarballs will
contain enough information to get the proper version.
## Detailed Installation Instructions
First, decide on values for the following configuration variables:
* `VCS`: the version control system you use. Currently accepts "git".
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file, so it can be imported at runtime. If your project uses
`src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
This file should be checked in to your VCS as usual: the copy created below
by `setup.py versioneer` will include code that parses expanded VCS
keywords in generated tarballs. The 'build' and 'sdist' commands will
replace it with a copy that has just the calculated version string.
This must be set even if your project does not have any modules (and will
therefore never import `_version.py`), since "setup.py sdist" -based trees
still need somewhere to record the pre-calculated version strings. Anywhere
in the source tree should do. If there is a `__init__.py` next to your
`_version.py`, the `setup.py versioneer` command (described below) will
append some `__version__`-setting assignments, if they aren't already
present.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
If this is set to None, then `setup.py build` will not attempt to rewrite
any `_version.py` in the built tree. If your project does not have any
libraries (e.g. if it only builds a script), then you should use
`versionfile_build = None` and override `distutils.command.build_scripts`
to explicitly insert a copy of `versioneer.get_version()` into your
generated script.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string.
* `parentdir_prefix`:
a string, frequently the same as tag_prefix, which appears at the start of
all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'.
This tool provides one script, named `versioneer-installer`. That script does
one thing: write a copy of `versioneer.py` into the current directory.
To versioneer-enable your project:
* 1: Run `versioneer-installer` to copy `versioneer.py` into the top of your
source tree.
* 2: add the following lines to the top of your `setup.py`, with the
configuration values you decided earlier:
import imp
fp, pathname, description = imp.find_module('versioneer')
try:
versioneer = imp.load_module('versioneer', fp, pathname, description)
finally:
if fp: fp.close()
versioneer.VCS = 'git'
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
* 3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: now run `setup.py versioneer`, which will create `_version.py`, and will
modify your `__init__.py` (if one exists next to `_version.py`) to define
`__version__` (by calling a function from `_version.py`). It will also
modify your `MANIFEST.in` to include both `versioneer.py` and the generated
`_version.py` in sdist tarballs.
* 5: commit these changes to your VCS. To make sure you won't forget,
`setup.py versioneer` will mark everything it touched for addition.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Currently, all version strings must be based upon a tag. Versioneer will
report "unknown" until your tree has at least one tag in its history. This
restriction will be fixed eventually (see issue #12).
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different keys for different flavors
of the version string:
* `['version']`: condensed tag+distance+shortid+dirty identifier.
This is based on the output of `git describe --tags --dirty --always` but
strips the tag_prefix. For example "0.11.post0.dev2+g1076c97-dirty" indicates
that the tree is like the "1076c97" commit but has uncommitted changes
("-dirty"), and that this commit is two revisions (".dev2") beyond the "0.11"
tag. For released software (exactly equal to a known tag),
the identifier will only contain the stripped tag, e.g. "0.11".
This version string is always fully PEP440 compliant.
* `['full']`: detailed revision identifier. For Git, this is the full SHA1
commit id, followed by "-dirty" if the tree contains uncommitted changes,
e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac-dirty".
Some variants are more useful than others. Including `full` in a bug report
should allow developers to reconstruct the exact code being tested (or
indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The `setup.py versioneer` command adds the following text to your
`__init__.py` to place a basic version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Updating Versioneer2
To upgrade your project to a new release of Versioneer, do the following:
* install the new versioneer2 (`pip install -U versioneer2` or equivalent)
* re-run `versioneer2installer` in your source tree to replace your copy of
`versioneer.py`
* edit `setup.py`, if necessary, to include any new configuration settings
indicated by the release notes
* re-run `setup.py versioneer` to replace `SRC/_version.py`
* commit any changed files
"""
import os
import sys
import re
import subprocess
import errno
import string
import io
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
__version__ = '0.1.10'
# these configuration settings will be overridden by setup.py after it
# imports us
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = None
# This is hard-coded for now.
release_type_string = "post0.dev"
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer2-(%(__version__)s) (https://github.com/ryanpdwyer/versioneer2)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
import os
import sys
import re
import subprocess
import errno
import io
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):].replace("_", "+").strip(".egg"), "full": ""}
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = io.open(versionfile_abs, "r", encoding='utf-8')
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": keywords["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = { "refnames": git_refnames, "full": git_full }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split('/'))):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
'''
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = io.open(versionfile_abs, "r", encoding='utf-8')
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": keywords["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full, '__version__': __version__}
def do_vcs_install(manifest_in, versionfile_source, ipy):
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = io.open(".gitattributes", "r", encoding='utf-8')
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = io.open(".gitattributes", "a+", encoding='utf-8')
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):].replace("_", "+").strip(".egg"), "full": ""}
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (%(__version__)s) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
with io.open(filename, encoding='utf-8') as f:
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
except EnvironmentError:
return {}
return versions
def write_to_version_file(filename, versions):
with io.open(filename, "w", encoding='utf-8') as f:
f.write(SHORT_VERSION_PY % versions)
print("set %s to '%s'" % (filename, versions["version"]))
def get_root():
try:
return os.path.dirname(os.path.abspath(__file__))
except NameError:
return os.path.dirname(os.path.abspath(sys.argv[0]))
def vcs_function(vcs, suffix):
return getattr(sys.modules[__name__], '%s_%s' % (vcs, suffix), None)
def get_versions(default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
assert VCS is not None, "please set versioneer.VCS"
# I am in versioneer.py, which must live at the top of the source tree,
# which we use to compute the root directory. py2exe/bbfreeze/non-CPython
# don't have __file__, in which case we fall back to sys.argv[0] (which
# ought to be the setup.py script). We prefer __file__ since that's more
# robust in cases where setup.py was invoked in some weird way (e.g. pip)
root = get_root()
versionfile_abs = os.path.join(root, versionfile_source)
# extract version first from _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = vcs_function(VCS, "get_keywords")
versions_from_keywords_f = vcs_function(VCS, "versions_from_keywords")
if get_keywords_f and versions_from_keywords_f:
vcs_keywords = get_keywords_f(versionfile_abs)
ver = versions_from_keywords_f(vcs_keywords, tag_prefix)
if ver:
if verbose: print("got version from expanded keyword %s" % ver)
return rep_by_pep440(ver)
ver = versions_from_file(versionfile_abs)
if ver:
if verbose: print("got version from file %s %s" % (versionfile_abs,ver))
return rep_by_pep440(ver)
versions_from_vcs_f = vcs_function(VCS, "versions_from_vcs")
if versions_from_vcs_f:
ver = versions_from_vcs_f(tag_prefix, root, verbose)
if ver:
if verbose: print("got version from VCS %s" % ver)
return rep_by_pep440(ver)
ver = versions_from_parentdir(parentdir_prefix, root, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return rep_by_pep440(ver)
if verbose: print("got version from default %s" % default)
return default
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
def git2pep440(ver_str):
ver_parts = ver_str.split('-')
tag = ver_parts[0]
if len(ver_parts) == 1:
return tag
elif len(ver_parts) == 2:
commits = 0
git_hash = ''
dirty = 'dirty'
elif len(ver_parts) == 3:
commits = ver_parts[1]
git_hash = ver_parts[2]
dirty=''
elif len(ver_parts) == 4:
commits = ver_parts[1]
git_hash = ver_parts[2]
dirty = '.dirty'
else:
raise Warning("git version string could not be parsed.")
return ver_str
return "{tag}.{release_type_string}{commits}+{git_hash}{dirty}".format(
tag=tag,
release_type_string=release_type_string,
commits=commits,
git_hash=git_hash,
dirty=dirty)
def rep_by_pep440(ver):
ver["version"] = git2pep440(ver["version"])
return ver
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
if versionfile_build:
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with io.open(target_versionfile, "w", encoding='utf-8') as f:
f.write(SHORT_VERSION_PY % versions)
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
versions = get_versions(verbose=True)
target_versionfile = versionfile_source
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with io.open(target_versionfile, "w", encoding='utf-8') as f:
f.write(SHORT_VERSION_PY(version=__version__) % versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with io.open(versionfile_source, "w", encoding='utf-8') as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
"__version__": __version__
})
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with io.open(target_versionfile, "w", encoding='utf-8') as f:
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "install/upgrade Versioneer files: __init__.py SRC/_version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print(" creating %s" % versionfile_source)
with io.open(versionfile_source, "w", encoding='utf-8') as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
"__version__": __version__
})
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with io.open(ipy, "r", encoding='utf-8') as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with io.open(ipy, "a", encoding='utf-8') as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(get_root(), "MANIFEST.in")
simple_includes = set()
try:
with io.open(manifest_in, "r", encoding='utf-8') as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with io.open(manifest_in, "a", encoding='utf-8') as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
versionfile_source)
with io.open(manifest_in, "a", encoding='utf-8') as f:
f.write("include %s\n" % versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, versionfile_source, ipy)
def get_cmdclass():
cmds = {'version': cmd_version,
'versioneer': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
cmds['build_exe'] = cmd_build_exe
del cmds['build']
return cmds
| agpl-3.0 |
tlksio/tlksio | env/lib/python3.4/site-packages/pytz/__init__.py | 70 | 34435 | '''
datetime.tzinfo timezone definitions generated from the
Olson timezone database:
ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
See the datetime section of the Python Library Reference for information
on how to use these modules.
'''
# The IANA (nee Olson) database is updated several times a year.
OLSON_VERSION = '2017b'
VERSION = '2017.2' # Switching to pip compatible version numbering.
__version__ = VERSION
OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling
__all__ = [
'timezone', 'utc', 'country_timezones', 'country_names',
'AmbiguousTimeError', 'InvalidTimeError',
'NonExistentTimeError', 'UnknownTimeZoneError',
'all_timezones', 'all_timezones_set',
'common_timezones', 'common_timezones_set',
]
import sys, datetime, os.path, gettext
from pytz.exceptions import AmbiguousTimeError
from pytz.exceptions import InvalidTimeError
from pytz.exceptions import NonExistentTimeError
from pytz.exceptions import UnknownTimeZoneError
from pytz.lazy import LazyDict, LazyList, LazySet
from pytz.tzinfo import unpickler
from pytz.tzfile import build_tzinfo, _byte_string
try:
unicode
except NameError: # Python 3.x
# Python 3.x doesn't have unicode(), making writing code
# for Python 2.3 and Python 3.x a pain.
unicode = str
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii('\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
s.encode('ASCII') # Raise an exception if not ASCII
return s # But return the original string - not a byte string.
else: # Python 2.x
def ascii(s):
r"""
>>> ascii('Hello')
'Hello'
>>> ascii(u'Hello')
'Hello'
>>> ascii(u'\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnicodeEncodeError: ...
"""
return s.encode('ASCII')
def open_resource(name):
"""Open a resource from the zoneinfo subdir for reading.
Uses the pkg_resources module if available and no standard file
found at the calculated location.
"""
name_parts = name.lstrip('/').split('/')
for part in name_parts:
if part == os.path.pardir or os.path.sep in part:
raise ValueError('Bad path segment: %r' % part)
filename = os.path.join(os.path.dirname(__file__),
'zoneinfo', *name_parts)
if not os.path.exists(filename):
# http://bugs.launchpad.net/bugs/383171 - we avoid using this
# unless absolutely necessary to help when a broken version of
# pkg_resources is installed.
try:
from pkg_resources import resource_stream
except ImportError:
resource_stream = None
if resource_stream is not None:
return resource_stream(__name__, 'zoneinfo/' + name)
return open(filename, 'rb')
def resource_exists(name):
"""Return true if the given resource exists"""
try:
open_resource(name).close()
return True
except IOError:
return False
# Enable this when we get some translations?
# We want an i18n API that is useful to programs using Python's gettext
# module, as well as the Zope3 i18n package. Perhaps we should just provide
# the POT file and translations, and leave it up to callers to make use
# of them.
#
# t = gettext.translation(
# 'pytz', os.path.join(os.path.dirname(__file__), 'locales'),
# fallback=True
# )
# def _(timezone_name):
# """Translate a timezone name using the current locale, returning Unicode"""
# return t.ugettext(timezone_name)
_tzinfo_cache = {}
def timezone(zone):
r''' Return a datetime.tzinfo implementation for the given timezone
>>> from datetime import datetime, timedelta
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> eastern.zone
'US/Eastern'
>>> timezone(unicode('US/Eastern')) is eastern
True
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
>>> (loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
>>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
>>> (loc_dt + timedelta(minutes=10)).strftime(fmt)
'2002-10-27 01:10:00 EST (-0500)'
Raises UnknownTimeZoneError if passed an unknown zone.
>>> try:
... timezone('Asia/Shangri-La')
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
>>> try:
... timezone(unicode('\N{TRADE MARK SIGN}'))
... except UnknownTimeZoneError:
... print('Unknown')
Unknown
'''
if zone.upper() == 'UTC':
return utc
try:
zone = ascii(zone)
except UnicodeEncodeError:
# All valid timezones are ASCII
raise UnknownTimeZoneError(zone)
zone = _unmunge_zone(zone)
if zone not in _tzinfo_cache:
if zone in all_timezones_set:
fp = open_resource(zone)
try:
_tzinfo_cache[zone] = build_tzinfo(zone, fp)
finally:
fp.close()
else:
raise UnknownTimeZoneError(zone)
return _tzinfo_cache[zone]
def _unmunge_zone(zone):
"""Undo the time zone name munging done by older versions of pytz."""
return zone.replace('_plus_', '+').replace('_minus_', '-')
ZERO = datetime.timedelta(0)
HOUR = datetime.timedelta(hours=1)
class UTC(datetime.tzinfo):
"""UTC
Optimized UTC implementation. It unpickles using the single module global
instance defined beneath this class declaration.
"""
zone = "UTC"
_utcoffset = ZERO
_dst = ZERO
_tzname = zone
def fromutc(self, dt):
if dt.tzinfo is None:
return self.localize(dt)
return super(utc.__class__, self).fromutc(dt)
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def __reduce__(self):
return _UTC, ()
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
UTC = utc = UTC() # UTC is a singleton
def _UTC():
"""Factory function for utc unpickling.
Makes sure that unpickling a utc instance always returns the same
module global.
These examples belong in the UTC class above, but it is obscured; or in
the README.txt, but we are not depending on Python 2.4 so integrating
the README.txt examples with the unit tests is not trivial.
>>> import datetime, pickle
>>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc)
>>> naive = dt.replace(tzinfo=None)
>>> p = pickle.dumps(dt, 1)
>>> naive_p = pickle.dumps(naive, 1)
>>> len(p) - len(naive_p)
17
>>> new = pickle.loads(p)
>>> new == dt
True
>>> new is dt
False
>>> new.tzinfo is dt.tzinfo
True
>>> utc is UTC is timezone('UTC')
True
>>> utc is timezone('GMT')
False
"""
return utc
_UTC.__safe_for_unpickling__ = True
def _p(*args):
"""Factory function for unpickling pytz tzinfo instances.
Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle
by shortening the path.
"""
return unpickler(*args)
_p.__safe_for_unpickling__ = True
class _CountryTimezoneDict(LazyDict):
"""Map ISO 3166 country code to a list of timezone names commonly used
in that country.
iso3166_code is the two letter code used to identify the country.
>>> def print_list(list_of_strings):
... 'We use a helper so doctests work under Python 2.3 -> 3.x'
... for s in list_of_strings:
... print(s)
>>> print_list(country_timezones['nz'])
Pacific/Auckland
Pacific/Chatham
>>> print_list(country_timezones['ch'])
Europe/Zurich
>>> print_list(country_timezones['CH'])
Europe/Zurich
>>> print_list(country_timezones[unicode('ch')])
Europe/Zurich
>>> print_list(country_timezones['XXX'])
Traceback (most recent call last):
...
KeyError: 'XXX'
Previously, this information was exposed as a function rather than a
dictionary. This is still supported::
>>> print_list(country_timezones('nz'))
Pacific/Auckland
Pacific/Chatham
"""
def __call__(self, iso3166_code):
"""Backwards compatibility."""
return self[iso3166_code]
def _fill(self):
data = {}
zone_tab = open_resource('zone.tab')
try:
for line in zone_tab:
line = line.decode('UTF-8')
if line.startswith('#'):
continue
code, coordinates, zone = line.split(None, 4)[:3]
if zone not in all_timezones_set:
continue
try:
data[code].append(zone)
except KeyError:
data[code] = [zone]
self.data = data
finally:
zone_tab.close()
country_timezones = _CountryTimezoneDict()
class _CountryNameDict(LazyDict):
'''Dictionary proving ISO3166 code -> English name.
>>> print(country_names['au'])
Australia
'''
def _fill(self):
data = {}
zone_tab = open_resource('iso3166.tab')
try:
for line in zone_tab.readlines():
line = line.decode('UTF-8')
if line.startswith('#'):
continue
code, name = line.split(None, 1)
data[code] = name.strip()
self.data = data
finally:
zone_tab.close()
country_names = _CountryNameDict()
# Time-zone info based solely on fixed offsets
class _FixedOffset(datetime.tzinfo):
zone = None # to match the standard pytz API
def __init__(self, minutes):
if abs(minutes) >= 1440:
raise ValueError("absolute offset is too large", minutes)
self._minutes = minutes
self._offset = datetime.timedelta(minutes=minutes)
def utcoffset(self, dt):
return self._offset
def __reduce__(self):
return FixedOffset, (self._minutes, )
def dst(self, dt):
return ZERO
def tzname(self, dt):
return None
def __repr__(self):
return 'pytz.FixedOffset(%d)' % self._minutes
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def FixedOffset(offset, _tzinfos = {}):
"""return a fixed-offset timezone based off a number of minutes.
>>> one = FixedOffset(-330)
>>> one
pytz.FixedOffset(-330)
>>> one.utcoffset(datetime.datetime.now())
datetime.timedelta(-1, 66600)
>>> one.dst(datetime.datetime.now())
datetime.timedelta(0)
>>> two = FixedOffset(1380)
>>> two
pytz.FixedOffset(1380)
>>> two.utcoffset(datetime.datetime.now())
datetime.timedelta(0, 82800)
>>> two.dst(datetime.datetime.now())
datetime.timedelta(0)
The datetime.timedelta must be between the range of -1 and 1 day,
non-inclusive.
>>> FixedOffset(1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', 1440)
>>> FixedOffset(-1440)
Traceback (most recent call last):
...
ValueError: ('absolute offset is too large', -1440)
An offset of 0 is special-cased to return UTC.
>>> FixedOffset(0) is UTC
True
There should always be only one instance of a FixedOffset per timedelta.
This should be true for multiple creation calls.
>>> FixedOffset(-330) is one
True
>>> FixedOffset(1380) is two
True
It should also be true for pickling.
>>> import pickle
>>> pickle.loads(pickle.dumps(one)) is one
True
>>> pickle.loads(pickle.dumps(two)) is two
True
"""
if offset == 0:
return UTC
info = _tzinfos.get(offset)
if info is None:
# We haven't seen this one before. we need to save it.
# Use setdefault to avoid a race condition and make sure we have
# only one
info = _tzinfos.setdefault(offset, _FixedOffset(offset))
return info
FixedOffset.__safe_for_unpickling__ = True
def _test():
import doctest, os, sys
sys.path.insert(0, os.pardir)
import pytz
return doctest.testmod(pytz)
if __name__ == '__main__':
_test()
all_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Asmera',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Timbuktu',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/ComodRivadavia',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Atka',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Buenos_Aires',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Catamarca',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Coral_Harbour',
'America/Cordoba',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Ensenada',
'America/Fort_Nelson',
'America/Fort_Wayne',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Indianapolis',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Jujuy',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Knox_IN',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Louisville',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Mendoza',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Acre',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Punta_Arenas',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Rosario',
'America/Santa_Isabel',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Shiprock',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Virgin',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/South_Pole',
'Antarctica/Syowa',
'Antarctica/Troll',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Ashkhabad',
'Asia/Atyrau',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Barnaul',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Calcutta',
'Asia/Chita',
'Asia/Choibalsan',
'Asia/Chongqing',
'Asia/Chungking',
'Asia/Colombo',
'Asia/Dacca',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Famagusta',
'Asia/Gaza',
'Asia/Harbin',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Istanbul',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kashgar',
'Asia/Kathmandu',
'Asia/Katmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macao',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Saigon',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Srednekolymsk',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Tel_Aviv',
'Asia/Thimbu',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Tomsk',
'Asia/Ujung_Pandang',
'Asia/Ulaanbaatar',
'Asia/Ulan_Bator',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yangon',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faeroe',
'Atlantic/Faroe',
'Atlantic/Jan_Mayen',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/ACT',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Canberra',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/LHI',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/NSW',
'Australia/North',
'Australia/Perth',
'Australia/Queensland',
'Australia/South',
'Australia/Sydney',
'Australia/Tasmania',
'Australia/Victoria',
'Australia/West',
'Australia/Yancowinna',
'Brazil/Acre',
'Brazil/DeNoronha',
'Brazil/East',
'Brazil/West',
'CET',
'CST6CDT',
'Canada/Atlantic',
'Canada/Central',
'Canada/East-Saskatchewan',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Canada/Saskatchewan',
'Canada/Yukon',
'Chile/Continental',
'Chile/EasterIsland',
'Cuba',
'EET',
'EST',
'EST5EDT',
'Egypt',
'Eire',
'Etc/GMT',
'Etc/GMT+0',
'Etc/GMT+1',
'Etc/GMT+10',
'Etc/GMT+11',
'Etc/GMT+12',
'Etc/GMT+2',
'Etc/GMT+3',
'Etc/GMT+4',
'Etc/GMT+5',
'Etc/GMT+6',
'Etc/GMT+7',
'Etc/GMT+8',
'Etc/GMT+9',
'Etc/GMT-0',
'Etc/GMT-1',
'Etc/GMT-10',
'Etc/GMT-11',
'Etc/GMT-12',
'Etc/GMT-13',
'Etc/GMT-14',
'Etc/GMT-2',
'Etc/GMT-3',
'Etc/GMT-4',
'Etc/GMT-5',
'Etc/GMT-6',
'Etc/GMT-7',
'Etc/GMT-8',
'Etc/GMT-9',
'Etc/GMT0',
'Etc/Greenwich',
'Etc/UCT',
'Etc/UTC',
'Etc/Universal',
'Etc/Zulu',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Astrakhan',
'Europe/Athens',
'Europe/Belfast',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Kirov',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Nicosia',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Saratov',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Tiraspol',
'Europe/Ulyanovsk',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GB',
'GB-Eire',
'GMT',
'GMT+0',
'GMT-0',
'GMT0',
'Greenwich',
'HST',
'Hongkong',
'Iceland',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Iran',
'Israel',
'Jamaica',
'Japan',
'Kwajalein',
'Libya',
'MET',
'MST',
'MST7MDT',
'Mexico/BajaNorte',
'Mexico/BajaSur',
'Mexico/General',
'NZ',
'NZ-CHAT',
'Navajo',
'PRC',
'PST8PDT',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Ponape',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Samoa',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Truk',
'Pacific/Wake',
'Pacific/Wallis',
'Pacific/Yap',
'Poland',
'Portugal',
'ROC',
'ROK',
'Singapore',
'Turkey',
'UCT',
'US/Alaska',
'US/Aleutian',
'US/Arizona',
'US/Central',
'US/East-Indiana',
'US/Eastern',
'US/Hawaii',
'US/Indiana-Starke',
'US/Michigan',
'US/Mountain',
'US/Pacific',
'US/Pacific-New',
'US/Samoa',
'UTC',
'Universal',
'W-SU',
'WET',
'Zulu']
all_timezones = LazyList(
tz for tz in all_timezones if resource_exists(tz))
all_timezones_set = LazySet(all_timezones)
common_timezones = \
['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Fort_Nelson',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port-au-Prince',
'America/Port_of_Spain',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Punta_Arenas',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/Syowa',
'Antarctica/Troll',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Atyrau',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Barnaul',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Chita',
'Asia/Choibalsan',
'Asia/Colombo',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Famagusta',
'Asia/Gaza',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kathmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Riyadh',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Srednekolymsk',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Tomsk',
'Asia/Ulaanbaatar',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yangon',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faroe',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/Perth',
'Australia/Sydney',
'Canada/Atlantic',
'Canada/Central',
'Canada/Eastern',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Astrakhan',
'Europe/Athens',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Kirov',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Saratov',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Ulyanovsk',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GMT',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Wake',
'Pacific/Wallis',
'US/Alaska',
'US/Arizona',
'US/Central',
'US/Eastern',
'US/Hawaii',
'US/Mountain',
'US/Pacific',
'UTC']
common_timezones = LazyList(
tz for tz in common_timezones if tz in all_timezones)
common_timezones_set = LazySet(common_timezones)
| mit |
dahlstrom-g/intellij-community | python/helpers/pycharm/_jb_pytest_runner.py | 6 | 1693 | # coding=utf-8
import pytest
from distutils import version
import sys
from _pytest.config import get_plugin_manager
from pkg_resources import iter_entry_points
from _jb_runner_tools import jb_patch_separator, jb_doc_args, JB_DISABLE_BUFFERING, start_protocol, parse_arguments, \
set_parallel_mode
from teamcity import pytest_plugin
if __name__ == '__main__':
path, targets, additional_args = parse_arguments()
sys.argv += additional_args
joined_targets = jb_patch_separator(targets, fs_glue="/", python_glue="::", fs_to_python_glue=".py::")
# When file is launched in pytest it should be file.py: you can't provide it as bare module
joined_targets = [t + ".py" if ":" not in t else t for t in joined_targets]
sys.argv += [path] if path else joined_targets
# plugin is discovered automatically in 3, but not in 2
# to prevent "plugin already registered" problem we check it first
plugins_to_load = []
if not get_plugin_manager().hasplugin("pytest-teamcity"):
if "pytest-teamcity" not in map(lambda e: e.name, iter_entry_points(group='pytest11', name=None)):
plugins_to_load.append(pytest_plugin)
args = sys.argv[1:]
if version.LooseVersion(pytest.__version__) >= version.LooseVersion("6.0"):
args += ["--no-header", "--no-summary", "-q"]
if JB_DISABLE_BUFFERING and "-s" not in args:
args += ["-s"]
jb_doc_args("pytest", args)
class Plugin:
@staticmethod
def pytest_configure(config):
if getattr(config.option, "numprocesses", None):
set_parallel_mode()
start_protocol()
sys.exit(pytest.main(args, plugins_to_load + [Plugin]))
| apache-2.0 |
jima80525/pyres | pavement.py | 1 | 3641 | # -*- coding: utf-8 -*-
""" File to drive paver scripts """
from __future__ import print_function
import sys
import subprocess
# Import parameters from the setup file.
sys.path.append('.')
from setup import (
print_success_message, print_failure_message,
setup_dict, _lint, _test, _test_all, _test_file)
from paver.easy import options, task, consume_args
from paver.setuputils import install_distutils_tasks
options(setup=setup_dict)
install_distutils_tasks()
## Miscellaneous helper functions
def print_passed():
"""
generated on http://patorjk.com/software/taag/#p=display&f=Small&t=PASSED
"""
print_success_message(r''' ___ _ ___ ___ ___ ___
| _ \/_\ / __/ __| __| \
| _/ _ \\__ \__ \ _|| |) |
|_|/_/ \_\___/___/___|___/
''')
def print_failed():
"""
# generated on http://patorjk.com/software/taag/#p=display&f=Small&t=FAILED
"""
print_failure_message(r''' ___ _ ___ _ ___ ___
| __/_\ |_ _| | | __| \
| _/ _ \ | || |__| _|| |) |
|_/_/ \_\___|____|___|___/
''')
@task
def lint():
# This refuses to format properly when running `paver help' unless
# this ugliness is used.
('Perform PEP8 style check, run PyFlakes, and run McCabe complexity '
'metrics on the code.')
raise SystemExit(_lint())
@task
@consume_args
def run(args):
"""Run the package's main script. All arguments are passed to it."""
# The main script expects to get the called executable's name as
# argv[0]. However, paver doesn't provide that in args. Even if it did (or
# we dove into sys.argv), it wouldn't be useful because it would be paver's
# executable. So we just pass the package name in as the executable name,
# since it's close enough. This should never be seen by an end user
# installing through Setuptools anyway.
# This is stuffed into sys.argv as setuptools calls entry points without
# args.
from pyres.main import main
sys.argv = ['pyres']
sys.argv.extend(args)
raise SystemExit(main())
@task
def add_sciam():
""" Test adding a scientific american podcast """
# see notes in run task above
from pyres.main import main
arg = ['add', 'http://rss.sciam.com/sciam/60secsciencepodcast',
'--start-date', '05/01/15', '--max-update', '3']
#'--start-date', '10/25/14', '--max-update', '3']
#arg = ['add', 'http://rss.sciam.com/sciam/60secsciencepodcast',
#'--start-date', '10/25/14']
sys.argv = ['pyres']
sys.argv.extend(arg)
main()
arg = ['add', 'http://rss.sciam.com/sciam/60-second-psych',
'--start-date', '09/20/14']
sys.argv = ['pyres']
sys.argv.extend(arg)
raise SystemExit()
@task
def add_serial():
""" Test adding the serial podcast """
# see notes in run task above
from pyres.main import main
arg = ['add', 'http://feeds.serialpodcast.org/serialpodcast',
'--start-date', '01/01/14']
sys.argv = ['pyres']
sys.argv.extend(arg)
raise SystemExit(main())
@task
@consume_args
def test(args):
"""Run the unit tests."""
if len(args) == 0:
raise SystemExit(_test())
else:
raise SystemExit(_test_file(args[0]))
@task
def test_all():
"""Perform a style check and run all unit tests."""
retcode = _test_all()
if retcode == 0:
print_passed()
else:
print_failed()
raise SystemExit(retcode)
@task
def cov():
""" Get test coverage """
retcode = subprocess.call(
'py.test --cov-report term-missing --cov pyres', shell=True)
if retcode != 0:
print_failure_message('Failed running pytest')
| mit |
mdeejay/android_kernel_dlxu | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
moreati/django | django/db/backends/postgresql/base.py | 143 | 10451 | """
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.validation import BaseDatabaseValidation
from django.db.utils import DatabaseError as WrappedDatabaseError
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
try:
import psycopg2 as Database
import psycopg2.extensions
import psycopg2.extras
except ImportError as e:
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
def psycopg2_version():
version = psycopg2.__version__.split(' ', 1)[0]
return tuple(int(v) for v in version.split('.') if v.isdigit())
PSYCOPG2_VERSION = psycopg2_version()
if PSYCOPG2_VERSION < (2, 4, 5):
raise ImproperlyConfigured("psycopg2_version 2.4.5 or newer is required; you have %s" % psycopg2.__version__)
# Some of these import psycopg2, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .utils import utc_tzinfo_factory # isort:skip
from .version import get_version # isort:skip
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
psycopg2.extensions.register_adapter(SafeBytes, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString)
psycopg2.extras.register_uuid()
# Register support for inet[] manually so we don't have to handle the Inet()
# object on load all the time.
INETARRAY_OID = 1041
INETARRAY = psycopg2.extensions.new_array_type(
(INETARRAY_OID,),
'INETARRAY',
psycopg2.extensions.UNICODE,
)
psycopg2.extensions.register_type(INETARRAY)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BinaryField': 'bytea',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'interval',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer',
'PositiveSmallIntegerField': 'smallint',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'uuid',
}
data_type_check_constraints = {
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE '%%' || {} || '%%'",
'icontains': "LIKE '%%' || UPPER({}) || '%%'",
'startswith': "LIKE {} || '%%'",
'istartswith': "LIKE UPPER({}) || '%%'",
'endswith': "LIKE '%%' || {}",
'iendswith': "LIKE '%%' || UPPER({})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
# None may be used to connect to the default 'postgres' db
if settings_dict['NAME'] == '':
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
conn_params = {
'database': settings_dict['NAME'] or 'postgres',
}
conn_params.update(settings_dict['OPTIONS'])
conn_params.pop('isolation_level', None)
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
return conn_params
def get_new_connection(self, conn_params):
connection = Database.connect(**conn_params)
# self.isolation_level must be set:
# - after connecting to the database in order to obtain the database's
# default when no value is explicitly specified in options.
# - before calling _set_autocommit() because if autocommit is on, that
# will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT.
options = self.settings_dict['OPTIONS']
try:
self.isolation_level = options['isolation_level']
except KeyError:
self.isolation_level = connection.isolation_level
else:
# Set the isolation level to the value from OPTIONS.
if self.isolation_level != connection.isolation_level:
connection.set_session(isolation_level=self.isolation_level)
return connection
def init_connection_state(self):
self.connection.set_client_encoding('UTF8')
conn_timezone_name = self.connection.get_parameter_status('TimeZone')
if conn_timezone_name != self.timezone_name:
cursor = self.connection.cursor()
try:
cursor.execute(self.ops.set_time_zone_sql(), [self.timezone_name])
finally:
cursor.close()
# Commit after setting the time zone (see #17062)
if not self.get_autocommit():
self.connection.commit()
def create_cursor(self):
cursor = self.connection.cursor()
cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
return cursor
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
# Use a psycopg cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1")
except Database.Error:
return False
else:
return True
@property
def _nodb_connection(self):
nodb_connection = super(DatabaseWrapper, self)._nodb_connection
try:
nodb_connection.ensure_connection()
except (DatabaseError, WrappedDatabaseError):
warnings.warn(
"Normally Django will use a connection to the 'postgres' database "
"to avoid running initialization queries against the production "
"database when it's not needed (for example, when running tests). "
"Django was unable to create a connection to the 'postgres' database "
"and will use the default database instead.",
RuntimeWarning
)
settings_dict = self.settings_dict.copy()
settings_dict['NAME'] = settings.DATABASES[DEFAULT_DB_ALIAS]['NAME']
nodb_connection = self.__class__(
self.settings_dict.copy(),
alias=self.alias,
allow_thread_sharing=False)
return nodb_connection
@cached_property
def psycopg2_version(self):
return PSYCOPG2_VERSION
@cached_property
def pg_version(self):
with self.temporary_connection():
return get_version(self.connection)
| bsd-3-clause |
tiagofrepereira2012/tensorflow | tensorflow/contrib/rnn/python/kernel_tests/rnn_cell_test.py | 20 | 61091 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.contrib.rnn.python.ops import rnn_cell as contrib_rnn_cell
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class RNNCellTest(test.TestCase):
def testCoupledInputForgetGateLSTMCell(self):
with self.test_session() as sess:
num_units = 2
state_size = num_units * 2
batch_size = 3
input_size = 4
expected_output = np.array(
[[0.121753, 0.121753],
[0.103349, 0.103349],
[0.100178, 0.100178]],
dtype=np.float32)
expected_state = np.array(
[[0.137523, 0.137523, 0.121753, 0.121753],
[0.105450, 0.105450, 0.103349, 0.103349],
[0.100742, 0.100742, 0.100178, 0.100178]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size])
output, state = contrib_rnn_cell.CoupledInputForgetGateLSTMCell(
num_units=num_units, forget_bias=1.0, state_is_tuple=False)(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state], {
x.name:
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]]),
m.name:
0.1 * np.ones((batch_size, state_size))
})
# This is a smoke test: Only making sure expected values didn't change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
self.assertAllClose(res[1], expected_state)
def testTimeFreqLSTMCell(self):
with self.test_session() as sess:
num_units = 8
state_size = num_units * 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = (input_size - feature_size) // frequency_skip + 1
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([batch_size, input_size])
m = array_ops.zeros([batch_size, state_size * num_shifts])
output, state = contrib_rnn_cell.TimeFreqLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0)(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state], {
x.name:
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]]),
m.name:
0.1 * np.ones((batch_size, int(state_size * (num_shifts))))
})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts))
self.assertEqual(res[1].shape, (batch_size, state_size * num_shifts))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)
def testGridLSTMCell(self):
with self.test_session() as sess:
num_units = 8
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.GridLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts],
couple_input_forget_gates=True,
state_is_tuple=True)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 2))
for ss in res[1]:
self.assertEqual(ss.shape, (batch_size, num_units))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(
np.linalg.norm((res[1].state_f00_b00_c[0, :] - res[1]
.state_f00_b00_c[i, :]))) > 1e-6)
def testGridLSTMCellWithFrequencyBlocks(self):
with self.test_session() as sess:
num_units = 8
batch_size = 3
feature_size = 2
frequency_skip = 1
num_frequency_blocks = [1, 1]
total_blocks = num_frequency_blocks[0] + num_frequency_blocks[1]
start_freqindex_list = [0, 2]
end_freqindex_list = [2, 4]
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.GridLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=num_frequency_blocks,
start_freqindex_list=start_freqindex_list,
end_freqindex_list=end_freqindex_list,
couple_input_forget_gates=True,
state_is_tuple=True)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * total_blocks))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape,
(batch_size, num_units * total_blocks * 2))
for ss in res[1]:
self.assertEqual(ss.shape, (batch_size, num_units))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(
np.linalg.norm((res[1].state_f00_b00_c[0, :] - res[1]
.state_f00_b00_c[i, :]))) > 1e-6)
def testGridLstmCellWithCoupledInputForgetGates(self):
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.416383, 0.416383, 0.403238, 0.403238, 0.524020, 0.524020,
0.565425, 0.565425, 0.557865, 0.557865, 0.609699, 0.609699],
[0.627331, 0.627331, 0.622393, 0.622393, 0.688342, 0.688342,
0.708078, 0.708078, 0.694245, 0.694245, 0.715171, 0.715171],
[0.711050, 0.711050, 0.709197, 0.709197, 0.736533, 0.736533,
0.744264, 0.744264, 0.737390, 0.737390, 0.745250, 0.745250]],
dtype=np.float32)
expected_state = np.array(
[[0.625556, 0.625556, 0.416383, 0.416383, 0.759134, 0.759134,
0.524020, 0.524020, 0.798795, 0.798795, 0.557865, 0.557865],
[0.875488, 0.875488, 0.627331, 0.627331, 0.936432, 0.936432,
0.688342, 0.688342, 0.941961, 0.941961, 0.694245, 0.694245],
[0.957327, 0.957327, 0.711050, 0.711050, 0.979522, 0.979522,
0.736533, 0.736533, 0.980245, 0.980245, 0.737390, 0.737390]],
dtype=np.float32)
for state_is_tuple in [False, True]:
with self.test_session() as sess:
with variable_scope.variable_scope(
"state_is_tuple" + str(state_is_tuple),
initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.GridLSTMCell(
num_units=num_units,
feature_size=feature_size,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts],
couple_input_forget_gates=True,
state_is_tuple=state_is_tuple)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
if state_is_tuple:
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts))
else:
init_state = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units * num_shifts * 2), dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values not change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
if not state_is_tuple:
self.assertAllClose(res[1], expected_state)
else:
# There should be num_shifts * 2 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 2)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testBidirectionGridLSTMCell(self):
with self.test_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.464130, 0.464130, 0.419165, 0.419165, 0.593283, 0.593283,
0.738350, 0.738350, 0.661638, 0.661638, 0.866774, 0.866774,
0.520789, 0.520789, 0.476968, 0.476968, 0.604341, 0.604341,
0.760207, 0.760207, 0.635773, 0.635773, 0.850218, 0.850218],
[0.669636, 0.669636, 0.628966, 0.628966, 0.736057, 0.736057,
0.895927, 0.895927, 0.755559, 0.755559, 0.954359, 0.954359,
0.692621, 0.692621, 0.652363, 0.652363, 0.737517, 0.737517,
0.899558, 0.899558, 0.745984, 0.745984, 0.946840, 0.946840],
[0.751109, 0.751109, 0.711716, 0.711716, 0.778357, 0.778357,
0.940779, 0.940779, 0.784530, 0.784530, 0.980604, 0.980604,
0.759940, 0.759940, 0.720652, 0.720652, 0.778552, 0.778552,
0.941606, 0.941606, 0.781035, 0.781035, 0.977731, 0.977731]],
dtype=np.float32)
expected_state = np.array(
[[0.710660, 0.710660, 0.464130, 0.464130, 0.877293, 0.877293,
0.593283, 0.593283, 0.958505, 0.958505, 0.661638, 0.661638,
0.785405, 0.785405, 0.520789, 0.520789, 0.890836, 0.890836,
0.604341, 0.604341, 0.928512, 0.928512, 0.635773, 0.635773],
[0.967579, 0.967579, 0.669636, 0.669636, 1.038811, 1.038811,
0.736057, 0.736057, 1.058201, 1.058201, 0.755559, 0.755559,
0.993088, 0.993088, 0.692621, 0.692621, 1.040288, 1.040288,
0.737517, 0.737517, 1.048773, 1.048773, 0.745984, 0.745984],
[1.053842, 1.053842, 0.751109, 0.751109, 1.079919, 1.079919,
0.778357, 0.778357, 1.085620, 1.085620, 0.784530, 0.784530,
1.062455, 1.062455, 0.759940, 0.759940, 1.080101, 1.080101,
0.778552, 0.778552, 1.082402, 1.082402, 0.781035, 0.781035]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.BidirectionalGridLSTMCell(
num_units=num_units,
feature_size=feature_size,
share_time_frequency_weights=True,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts])
inputs = constant_op.constant(
np.array([[1.0, 1.1, 1.2, 1.3],
[2.0, 2.1, 2.2, 2.3],
[3.0, 3.1, 3.2, 3.3]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts * 2))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 4))
self.assertAllClose(res[0], expected_output)
# There should be num_shifts * 4 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 4)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testBidirectionGridLSTMCellWithSliceOffset(self):
with self.test_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.464130, 0.464130, 0.419165, 0.419165, 0.593283, 0.593283,
0.738350, 0.738350, 0.661638, 0.661638, 0.866774, 0.866774,
0.322645, 0.322645, 0.276068, 0.276068, 0.584654, 0.584654,
0.690292, 0.690292, 0.640446, 0.640446, 0.840071, 0.840071],
[0.669636, 0.669636, 0.628966, 0.628966, 0.736057, 0.736057,
0.895927, 0.895927, 0.755559, 0.755559, 0.954359, 0.954359,
0.493625, 0.493625, 0.449236, 0.449236, 0.730828, 0.730828,
0.865996, 0.865996, 0.749429, 0.749429, 0.944958, 0.944958],
[0.751109, 0.751109, 0.711716, 0.711716, 0.778357, 0.778357,
0.940779, 0.940779, 0.784530, 0.784530, 0.980604, 0.980604,
0.608587, 0.608587, 0.566683, 0.566683, 0.777345, 0.777345,
0.925820, 0.925820, 0.782597, 0.782597, 0.976858, 0.976858]],
dtype=np.float32)
expected_state = np.array(
[[0.710660, 0.710660, 0.464130, 0.464130, 0.877293, 0.877293,
0.593283, 0.593283, 0.958505, 0.958505, 0.661638, 0.661638,
0.516575, 0.516575, 0.322645, 0.322645, 0.866628, 0.866628,
0.584654, 0.584654, 0.934002, 0.934002, 0.640446, 0.640446],
[0.967579, 0.967579, 0.669636, 0.669636, 1.038811, 1.038811,
0.736057, 0.736057, 1.058201, 1.058201, 0.755559, 0.755559,
0.749836, 0.749836, 0.493625, 0.493625, 1.033488, 1.033488,
0.730828, 0.730828, 1.052186, 1.052186, 0.749429, 0.749429],
[1.053842, 1.053842, 0.751109, 0.751109, 1.079919, 1.079919,
0.778357, 0.778357, 1.085620, 1.085620, 0.784530, 0.784530,
0.895999, 0.895999, 0.608587, 0.608587, 1.078978, 1.078978,
0.777345, 0.777345, 1.083843, 1.083843, 0.782597, 0.782597]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.BidirectionalGridLSTMCell(
num_units=num_units,
feature_size=feature_size,
share_time_frequency_weights=True,
frequency_skip=frequency_skip,
forget_bias=1.0,
num_frequency_blocks=[num_shifts],
backward_slice_offset=1)
inputs = constant_op.constant(
np.array([[1.0, 1.1, 1.2, 1.3],
[2.0, 2.1, 2.2, 2.3],
[3.0, 3.1, 3.2, 3.3]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts * 2))
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 4))
self.assertAllClose(res[0], expected_output)
# There should be num_shifts * 4 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 4)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testAttentionCellWrapperFailures(self):
with self.assertRaisesRegexp(TypeError,
"The parameter cell is not RNNCell."):
contrib_rnn_cell.AttentionCellWrapper(None, 0)
num_units = 8
for state_is_tuple in [False, True]:
with ops.Graph().as_default():
lstm_cell = rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
with self.assertRaisesRegexp(
ValueError, "attn_length should be greater than zero, got 0"):
contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, 0, state_is_tuple=state_is_tuple)
with self.assertRaisesRegexp(
ValueError, "attn_length should be greater than zero, got -1"):
contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, -1, state_is_tuple=state_is_tuple)
with ops.Graph().as_default():
lstm_cell = rnn_cell.BasicLSTMCell(num_units, state_is_tuple=True)
with self.assertRaisesRegexp(
ValueError, "Cell returns tuple of states, but the flag "
"state_is_tuple is not set. State size is: *"):
contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, 4, state_is_tuple=False)
def testAttentionCellWrapperZeros(self):
num_units = 8
attn_length = 16
batch_size = 3
input_size = 4
for state_is_tuple in [False, True]:
with ops.Graph().as_default():
with self.test_session() as sess:
with variable_scope.variable_scope("state_is_tuple_" + str(
state_is_tuple)):
lstm_cell = rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
if state_is_tuple:
zeros = array_ops.zeros([batch_size, num_units], dtype=np.float32)
attn_state_zeros = array_ops.zeros(
[batch_size, attn_length * num_units], dtype=np.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
else:
zero_state = array_ops.zeros(
[
batch_size,
num_units * 2 + attn_length * num_units + num_units
],
dtype=np.float32)
inputs = array_ops.zeros(
[batch_size, input_size], dtype=dtypes.float32)
output, state = cell(inputs, zero_state)
self.assertEquals(output.get_shape(), [batch_size, num_units])
if state_is_tuple:
self.assertEquals(len(state), 3)
self.assertEquals(len(state[0]), 2)
self.assertEquals(state[0][0].get_shape(),
[batch_size, num_units])
self.assertEquals(state[0][1].get_shape(),
[batch_size, num_units])
self.assertEquals(state[1].get_shape(), [batch_size, num_units])
self.assertEquals(state[2].get_shape(),
[batch_size, attn_length * num_units])
tensors = [output] + list(state)
else:
self.assertEquals(state.get_shape(), [
batch_size,
num_units * 2 + num_units + attn_length * num_units
])
tensors = [output, state]
zero_result = sum(
[math_ops.reduce_sum(math_ops.abs(x)) for x in tensors])
sess.run(variables.global_variables_initializer())
self.assertTrue(sess.run(zero_result) < 1e-6)
def testAttentionCellWrapperValues(self):
num_units = 8
attn_length = 16
batch_size = 3
for state_is_tuple in [False, True]:
with ops.Graph().as_default():
with self.test_session() as sess:
with variable_scope.variable_scope("state_is_tuple_" + str(
state_is_tuple)):
lstm_cell = rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
if state_is_tuple:
zeros = constant_op.constant(
0.1 * np.ones(
[batch_size, num_units], dtype=np.float32),
dtype=dtypes.float32)
attn_state_zeros = constant_op.constant(
0.1 * np.ones(
[batch_size, attn_length * num_units], dtype=np.float32),
dtype=dtypes.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
else:
zero_state = constant_op.constant(
0.1 * np.ones(
[
batch_size,
num_units * 2 + num_units + attn_length * num_units
],
dtype=np.float32),
dtype=dtypes.float32)
inputs = constant_op.constant(
np.array(
[[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, zero_state)
if state_is_tuple:
concat_state = array_ops.concat(
[state[0][0], state[0][1], state[1], state[2]], 1)
else:
concat_state = state
sess.run(variables.global_variables_initializer())
output, state = sess.run([output, concat_state])
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((output[0, :] - output[i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((state[0, :] - state[i, :]))) > 1e-6)
def _testAttentionCellWrapperCorrectResult(self):
num_units = 4
attn_length = 6
batch_size = 2
expected_output = np.array(
[[1.068372, 0.45496, -0.678277, 0.340538],
[1.018088, 0.378983, -0.572179, 0.268591]],
dtype=np.float32)
expected_state = np.array(
[[0.74946702, 0.34681597, 0.26474735, 1.06485605, 0.38465962,
0.11420801, 0.10272158, 0.30925757, 0.63899988, 0.7181077,
0.47534478, 0.33715725, 0.58086717, 0.49446869, 0.7641536,
0.12814975, 0.92231739, 0.89857256, 0.21889746, 0.38442063,
0.53481543, 0.8876909, 0.45823169, 0.5905602, 0.78038228,
0.56501579, 0.03971386, 0.09870267, 0.8074435, 0.66821432,
0.99211812, 0.12295902, 1.14606023, 0.34370938, -0.79251152,
0.51843399],
[0.5179342, 0.48682183, -0.25426468, 0.96810579, 0.28809637,
0.13607743, -0.11446252, 0.26792109, 0.78047138, 0.63460857,
0.49122369, 0.52007174, 0.73000264, 0.66986895, 0.73576689,
0.86301267, 0.87887371, 0.35185754, 0.93417215, 0.64732957,
0.63173044, 0.66627824, 0.53644657, 0.20477486, 0.98458421,
0.38277245, 0.03746676, 0.92510188, 0.57714164, 0.84932971,
0.36127412, 0.12125921, 1.1362772, 0.34361625, -0.78150457,
0.70582712]],
dtype=np.float32)
seed = 12345
random_seed.set_random_seed(seed)
rnn_scope = None
for state_is_tuple in [False, True]:
with session.Session() as sess:
with variable_scope.variable_scope(
"state_is_tuple", reuse=state_is_tuple,
initializer=init_ops.glorot_uniform_initializer()):
lstm_cell = rnn_cell.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = contrib_rnn_cell.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
# This is legacy behavior to preserve the test. Weight
# sharing no longer works by creating a new RNNCell in the
# same variable scope; so here we restore the scope of the
# RNNCells after the first use below.
if rnn_scope is not None:
(cell._scope, lstm_cell._scope) = rnn_scope # pylint: disable=protected-access,unpacking-non-sequence
zeros1 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 1)
zeros2 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 2)
zeros3 = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 3)
attn_state_zeros = random_ops.random_uniform(
(batch_size, attn_length * num_units), 0.0, 1.0, seed=seed + 4)
zero_state = ((zeros1, zeros2), zeros3, attn_state_zeros)
if not state_is_tuple:
zero_state = array_ops.concat([
zero_state[0][0], zero_state[0][1], zero_state[1], zero_state[2]
], 1)
inputs = random_ops.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 5)
output, state = cell(inputs, zero_state)
# This is legacy behavior to preserve the test. Weight
# sharing no longer works by creating a new RNNCell in the
# same variable scope; so here we store the scope of the
# first RNNCell for reuse above.
if rnn_scope is None:
rnn_scope = (cell._scope, lstm_cell._scope) # pylint: disable=protected-access
if state_is_tuple:
state = array_ops.concat(
[state[0][0], state[0][1], state[1], state[2]], 1)
sess.run(variables.global_variables_initializer())
self.assertAllClose(sess.run(output), expected_output)
self.assertAllClose(sess.run(state), expected_state)
def testNASCell(self):
num_units = 6
batch_size = 3
expected_output = np.array([[0.576751, 0.576751, 0.576751, 0.576751,
0.576751, 0.576751],
[0.618936, 0.618936, 0.618936, 0.618936,
0.618936, 0.618936],
[0.627393, 0.627393, 0.627393, 0.627393,
0.627393, 0.627393]])
expected_state = np.array([[0.71579772, 0.71579772, 0.71579772, 0.71579772,
0.71579772, 0.71579772, 0.57675087, 0.57675087,
0.57675087, 0.57675087, 0.57675087, 0.57675087],
[0.78041625, 0.78041625, 0.78041625, 0.78041625,
0.78041625, 0.78041625, 0.6189357, 0.6189357,
0.61893570, 0.6189357, 0.6189357, 0.6189357],
[0.79457647, 0.79457647, 0.79457647, 0.79457647,
0.79457653, 0.79457653, 0.62739348, 0.62739348,
0.62739348, 0.62739348, 0.62739348, 0.62739348]
])
with self.test_session() as sess:
with variable_scope.variable_scope(
"nas_test",
initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.NASCell(num_units=num_units)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
init_state = rnn_cell.LSTMStateTuple(state_value, state_value)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values not change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
# There should be 2 states in the tuple.
self.assertEqual(len(res[1]), 2)
# Checking the shape of each state to be batch_size * num_units
new_c, new_h = res[1]
self.assertEqual(new_c.shape[0], batch_size)
self.assertEqual(new_c.shape[1], num_units)
self.assertEqual(new_h.shape[0], batch_size)
self.assertEqual(new_h.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testNASCellProj(self):
num_units = 6
batch_size = 3
num_proj = 5
expected_output = np.array([[1.697418, 1.697418, 1.697418, 1.697418,
1.697418],
[1.840037, 1.840037, 1.840037, 1.840037,
1.840037],
[1.873985, 1.873985, 1.873985, 1.873985,
1.873985]])
expected_state = np.array([[0.69855207, 0.69855207, 0.69855207, 0.69855207,
0.69855207, 0.69855207, 1.69741797, 1.69741797,
1.69741797, 1.69741797, 1.69741797],
[0.77073824, 0.77073824, 0.77073824, 0.77073824,
0.77073824, 0.77073824, 1.84003687, 1.84003687,
1.84003687, 1.84003687, 1.84003687],
[0.78973997, 0.78973997, 0.78973997, 0.78973997,
0.78973997, 0.78973997, 1.87398517, 1.87398517,
1.87398517, 1.87398517, 1.87398517]])
with self.test_session() as sess:
with variable_scope.variable_scope(
"nas_proj_test",
initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.NASCell(num_units=num_units, num_proj=num_proj)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
state_value_c = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
state_value_h = constant_op.constant(
0.1 * np.ones(
(batch_size, num_proj), dtype=np.float32),
dtype=dtypes.float32)
init_state = rnn_cell.LSTMStateTuple(state_value_c, state_value_h)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values not change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
# There should be 2 states in the tuple.
self.assertEqual(len(res[1]), 2)
# Checking the shape of each state to be batch_size * num_units
new_c, new_h = res[1]
self.assertEqual(new_c.shape[0], batch_size)
self.assertEqual(new_c.shape[1], num_units)
self.assertEqual(new_h.shape[0], batch_size)
self.assertEqual(new_h.shape[1], num_proj)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testUGRNNCell(self):
num_units = 2
batch_size = 3
expected_state_and_output = np.array(
[[0.13752282, 0.13752282],
[0.10545051, 0.10545051],
[0.10074195, 0.10074195]],
dtype=np.float32)
with self.test_session() as sess:
with variable_scope.variable_scope(
"ugrnn_cell_test",
initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.UGRNNCell(num_units=num_units)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
init_state = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values didn't change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_state_and_output)
self.assertAllClose(res[1], expected_state_and_output)
def testIntersectionRNNCell(self):
num_units = 2
batch_size = 3
expected_state = np.array(
[[0.13752282, 0.13752282],
[0.10545051, 0.10545051],
[0.10074195, 0.10074195]],
dtype=np.float32)
expected_output = np.array(
[[2.00431061, 2.00431061],
[4.00060606, 4.00060606],
[6.00008249, 6.00008249]],
dtype=np.float32)
with self.test_session() as sess:
with variable_scope.variable_scope(
"intersection_rnn_cell_test",
initializer=init_ops.constant_initializer(0.5)):
cell = contrib_rnn_cell.IntersectionRNNCell(
num_units=num_units, num_in_proj=num_units)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
init_state = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
output, state = cell(inputs, init_state)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values didn't change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
self.assertAllClose(res[1], expected_state)
def testIntersectionRNNCellFailure(self):
num_units = 2
batch_size = 3
cell = contrib_rnn_cell.IntersectionRNNCell(num_units=num_units)
inputs = constant_op.constant(
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32),
dtype=dtypes.float32)
init_state = constant_op.constant(
0.1 * np.ones(
(batch_size, num_units), dtype=np.float32),
dtype=dtypes.float32)
with self.assertRaisesRegexp(
ValueError, "Must have input size == output size for "
"Intersection RNN. To fix, num_in_proj should "
"be set to num_units at cell init."):
cell(inputs, init_state)
def testPhasedLSTMCell(self):
with self.test_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
expected_state_c = np.array(
[[0.00072015, 0.00036633], [0.00083481, 0.00047266],
[0.00085111, 0.00053054]],
dtype=np.float32)
expected_state_h = np.array(
[[0.0005159, 0.00026243], [0.00062958, 0.00035646],
[0.00064732, 0.00040351]],
dtype=np.float32)
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
t = array_ops.zeros([batch_size, 1], dtype=dtypes.float64)
x = array_ops.zeros([batch_size, input_size])
c0 = array_ops.zeros([batch_size, 2])
h0 = array_ops.zeros([batch_size, 2])
state0 = rnn_cell.LSTMStateTuple(c0, h0)
output, state = contrib_rnn_cell.PhasedLSTMCell(num_units=num_units)(
(t, x), state0)
sess.run([variables.global_variables_initializer()])
res = sess.run([output, state], {
t.name:
np.array([[1.], [2.], [3.]]),
x.name:
np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]]),
})
# This is a smoke test, making sure expected values are unchanged.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], res[1].h)
self.assertAllClose(res[1].c, expected_state_c)
self.assertAllClose(res[1].h, expected_state_h)
def testHighwayWrapper(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"base_cell", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 3])
m = array_ops.zeros([1, 3])
base_cell = rnn_cell.GRUCell(3)
g, m_new = base_cell(x, m)
with variable_scope.variable_scope(
"hw_cell", initializer=init_ops.constant_initializer(0.5)):
hw_cell = contrib_rnn_cell.HighwayWrapper(
rnn_cell.GRUCell(3), carry_bias_init=-100.0)
g_res, m_new_res = hw_cell(x, m)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, g_res, m_new, m_new_res], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.1, 0.1]])
})
# As carry_bias_init is very negative, the carry gate is 'open' and the
# transform gate is 'closed'. This means the output equals the input.
self.assertAllClose(res[1], res[0])
# States are left untouched
self.assertAllClose(res[2], res[3])
def testGLSTMCell(self):
# Ensure that G-LSTM matches LSTM when number_of_groups = 1
batch_size = 2
num_units = 4
number_of_groups = 1
with self.test_session() as sess:
with variable_scope.variable_scope(
"root1", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.ones([batch_size, num_units])
# When number_of_groups = 1, G-LSTM is equivalent to regular LSTM
gcell = contrib_rnn_cell.GLSTMCell(
num_units=num_units, number_of_groups=number_of_groups)
cell = rnn_cell.LSTMCell(num_units=num_units)
self.assertTrue(isinstance(gcell.state_size, tuple))
zero_state = gcell.zero_state(batch_size=batch_size,
dtype=dtypes.float32)
gh, gs = gcell(x, zero_state)
h, g = cell(x, zero_state)
sess.run([variables.global_variables_initializer()])
glstm_result = sess.run([gh, gs])
lstm_result = sess.run([h, g])
self.assertAllClose(glstm_result[0], lstm_result[0], 1e-5)
self.assertAllClose(glstm_result[1], lstm_result[1], 1e-5)
# Test that G-LSTM subgroup act like corresponding sub-LSTMs
batch_size = 2
num_units = 4
number_of_groups = 2
with self.test_session() as sess:
with variable_scope.variable_scope(
"root2", initializer=init_ops.constant_initializer(0.5)):
# input for G-LSTM with 2 groups
glstm_input = array_ops.ones([batch_size, num_units])
gcell = contrib_rnn_cell.GLSTMCell(
num_units=num_units, number_of_groups=number_of_groups)
gcell_zero_state = gcell.zero_state(batch_size=batch_size,
dtype=dtypes.float32)
gh, gs = gcell(glstm_input, gcell_zero_state)
# input for LSTM cell simulating single G-LSTM group
lstm_input = array_ops.ones([batch_size, num_units / number_of_groups])
# note division by number_of_groups. This cell one simulates G-LSTM group
cell = rnn_cell.LSTMCell(num_units=int(num_units / number_of_groups))
cell_zero_state = cell.zero_state(batch_size=batch_size,
dtype=dtypes.float32)
h, g = cell(lstm_input, cell_zero_state)
sess.run([variables.global_variables_initializer()])
[gh_res, h_res] = sess.run([gh, h])
self.assertAllClose(gh_res[:, 0:int(num_units / number_of_groups)],
h_res, 1e-5)
self.assertAllClose(gh_res[:, int(num_units / number_of_groups):],
h_res, 1e-5)
class LayerNormBasicLSTMCellTest(test.TestCase):
# NOTE: all the values in the current test case have been calculated.
def testBasicLSTMCell(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
c0 = array_ops.zeros([1, 2])
h0 = array_ops.zeros([1, 2])
state0 = rnn_cell.LSTMStateTuple(c0, h0)
c1 = array_ops.zeros([1, 2])
h1 = array_ops.zeros([1, 2])
state1 = rnn_cell.LSTMStateTuple(c1, h1)
state = (state0, state1)
single_cell = lambda: contrib_rnn_cell.LayerNormBasicLSTMCell(2)
cell = rnn_cell.MultiRNNCell([single_cell() for _ in range(2)])
g, out_m = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m], {
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_state0_c = np.array([[-1.0, 1.0]])
expected_state0_h = np.array([[-0.38079708, 0.38079708]])
expected_state1_c = np.array([[-1.0, 1.0]])
expected_state1_h = np.array([[-0.38079708, 0.38079708]])
actual_h = res[0]
actual_state0_c = res[1][0].c
actual_state0_h = res[1][0].h
actual_state1_c = res[1][1].c
actual_state1_h = res[1][1].h
self.assertAllClose(actual_h, expected_h, 1e-5)
self.assertAllClose(expected_state0_c, actual_state0_c, 1e-5)
self.assertAllClose(expected_state0_h, actual_state0_h, 1e-5)
self.assertAllClose(expected_state1_c, actual_state1_c, 1e-5)
self.assertAllClose(expected_state1_h, actual_state1_h, 1e-5)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros(
[1, 3]) # Test BasicLSTMCell with input_size != num_units.
c = array_ops.zeros([1, 2])
h = array_ops.zeros([1, 2])
state = rnn_cell.LSTMStateTuple(c, h)
cell = contrib_rnn_cell.LayerNormBasicLSTMCell(2)
g, out_m = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m], {
x.name: np.array([[1., 1., 1.]]),
c.name: 0.1 * np.asarray([[0, 1]]),
h.name: 0.1 * np.asarray([[2, 3]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_c = np.array([[-1.0, 1.0]])
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c, 1e-5)
self.assertAllClose(res[1].h, expected_h, 1e-5)
def testBasicLSTMCellWithoutNorm(self):
"""Tests that BasicLSTMCell with layer_norm=False."""
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
c0 = array_ops.zeros([1, 2])
h0 = array_ops.zeros([1, 2])
state0 = rnn_cell.LSTMStateTuple(c0, h0)
c1 = array_ops.zeros([1, 2])
h1 = array_ops.zeros([1, 2])
state1 = rnn_cell.LSTMStateTuple(c1, h1)
state = (state0, state1)
single_cell = lambda: contrib_rnn_cell.LayerNormBasicLSTMCell(2, layer_norm=False)
cell = rnn_cell.MultiRNNCell([single_cell() for _ in range(2)])
g, out_m = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m], {
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[ 0.70230919, 0.72581059]])
expected_state0_c = np.array([[ 0.8020075, 0.89599884]])
expected_state0_h = np.array([[ 0.56668288, 0.60858738]])
expected_state1_c = np.array([[ 1.17500675, 1.26892781]])
expected_state1_h = np.array([[ 0.70230919, 0.72581059]])
actual_h = res[0]
actual_state0_c = res[1][0].c
actual_state0_h = res[1][0].h
actual_state1_c = res[1][1].c
actual_state1_h = res[1][1].h
self.assertAllClose(actual_h, expected_h, 1e-5)
self.assertAllClose(expected_state0_c, actual_state0_c, 1e-5)
self.assertAllClose(expected_state0_h, actual_state0_h, 1e-5)
self.assertAllClose(expected_state1_c, actual_state1_c, 1e-5)
self.assertAllClose(expected_state1_h, actual_state1_h, 1e-5)
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(0.5)) as vs:
x = array_ops.zeros(
[1, 3]) # Test BasicLSTMCell with input_size != num_units.
c = array_ops.zeros([1, 2])
h = array_ops.zeros([1, 2])
state = rnn_cell.LSTMStateTuple(c, h)
cell = contrib_rnn_cell.LayerNormBasicLSTMCell(2, layer_norm=False)
g, out_m = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, out_m], {
x.name: np.array([[1., 1., 1.]]),
c.name: 0.1 * np.asarray([[0, 1]]),
h.name: 0.1 * np.asarray([[2, 3]]),
})
expected_h = np.array([[ 0.64121795, 0.68166804]])
expected_c = np.array([[ 0.88477188, 0.98103917]])
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c, 1e-5)
self.assertAllClose(res[1].h, expected_h, 1e-5)
def testBasicLSTMCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
c0 = array_ops.zeros([1, 2])
h0 = array_ops.zeros([1, 2])
state0 = rnn_cell.LSTMStateTuple(c0, h0)
c1 = array_ops.zeros([1, 2])
h1 = array_ops.zeros([1, 2])
state1 = rnn_cell.LSTMStateTuple(c1, h1)
cell = rnn_cell.MultiRNNCell(
[contrib_rnn_cell.LayerNormBasicLSTMCell(2) for _ in range(2)])
h, (s0, s1) = cell(x, (state0, state1))
sess.run([variables.global_variables_initializer()])
res = sess.run([h, s0, s1], {
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_h0 = np.array([[-0.38079708, 0.38079708]])
expected_c0 = np.array([[-1.0, 1.0]])
expected_h1 = np.array([[-0.38079708, 0.38079708]])
expected_c1 = np.array([[-1.0, 1.0]])
self.assertEqual(len(res), 3)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c0, 1e-5)
self.assertAllClose(res[1].h, expected_h0, 1e-5)
self.assertAllClose(res[2].c, expected_c1, 1e-5)
self.assertAllClose(res[2].h, expected_h1, 1e-5)
def testBasicLSTMCellWithDropout(self):
def _is_close(x, y, digits=4):
delta = x - y
return delta < 10**(-digits)
def _is_close_in(x, items, digits=4):
for i in items:
if _is_close(x, i, digits):
return True
return False
keep_prob = 0.5
c_high = 2.9998924946
c_low = 0.999983298578
h_low = 0.761552567265
h_high = 0.995008519604
num_units = 5
allowed_low = [2, 3]
with self.test_session() as sess:
with variable_scope.variable_scope(
"other", initializer=init_ops.constant_initializer(1)):
x = array_ops.zeros([1, 5])
c = array_ops.zeros([1, 5])
h = array_ops.zeros([1, 5])
state = rnn_cell.LSTMStateTuple(c, h)
cell = contrib_rnn_cell.LayerNormBasicLSTMCell(
num_units, layer_norm=False, dropout_keep_prob=keep_prob)
g, s = cell(x, state)
sess.run([variables.global_variables_initializer()])
res = sess.run([g, s], {
x.name: np.ones([1, 5]),
c.name: np.ones([1, 5]),
h.name: np.ones([1, 5]),
})
# Since the returned tensors are of size [1,n]
# get the first component right now.
actual_h = res[0][0]
actual_state_c = res[1].c[0]
actual_state_h = res[1].h[0]
# For each item in `c` (the cell inner state) check that
# it is equal to one of the allowed values `c_high` (not
# dropped out) or `c_low` (dropped out) and verify that the
# corresponding item in `h` (the cell activation) is coherent.
# Count the dropped activations and check that their number is
# coherent with the dropout probability.
dropped_count = 0
self.assertTrue((actual_h == actual_state_h).all())
for citem, hitem in zip(actual_state_c, actual_state_h):
self.assertTrue(_is_close_in(citem, [c_low, c_high]))
if _is_close(citem, c_low):
self.assertTrue(_is_close(hitem, h_low))
dropped_count += 1
elif _is_close(citem, c_high):
self.assertTrue(_is_close(hitem, h_high))
self.assertIn(dropped_count, allowed_low)
def _create_multi_lstm_cell_ops(batch_size, num_units, input_depth,
num_layers, max_time, compiled):
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(-0.1, 0.1, seed=2)):
inputs = variable_scope.get_variable(
"inputs", initializer=random_ops.random_uniform(
(max_time, batch_size, input_depth), seed=1))
maybe_xla = lambda c: contrib_rnn_cell.CompiledWrapper(c) if compiled else c
cell = rnn_cell.MultiRNNCell(
[maybe_xla(rnn_cell.LSTMCell(num_units)) for _ in range(num_layers)])
initial_state = cell.zero_state(
batch_size=batch_size, dtype=dtypes.float32)
outputs, final_state = rnn.dynamic_rnn(
cell=cell, inputs=inputs, initial_state=initial_state,
time_major=True)
flat_final_state = nest.flatten(final_state)
trainable_variables = variables.trainable_variables()
outputs_grad = gradients_impl.gradients(
[outputs],
trainable_variables + [inputs] + nest.flatten(initial_state))
final_state_grad = gradients_impl.gradients(
flat_final_state,
trainable_variables + [inputs] + nest.flatten(initial_state))
return {"outputs": outputs,
"final_state": flat_final_state,
"outputs_grad": outputs_grad,
"final_state_grad": final_state_grad}
class CompiledWrapperTest(test.TestCase):
def testMultiRNNCellWithLSTMCellAndXLA(self):
# TODO(b/34735319): Don't run this test if XLA is not available.
batch_size = 16
num_units = 32
input_depth = 12
num_layers = 2
max_time = 20
atol = 1e-5
random_seed.set_random_seed(1234)
with self.test_session(graph=ops.Graph()) as sess:
xla_ops = _create_multi_lstm_cell_ops(
batch_size=batch_size, num_units=num_units,
input_depth=input_depth, num_layers=num_layers,
max_time=max_time,
compiled=True)
sess.run([variables.global_variables_initializer()])
xla_results = sess.run(xla_ops)
random_seed.set_random_seed(1234)
with self.test_session(graph=ops.Graph()) as sess:
non_xla_ops = _create_multi_lstm_cell_ops(
batch_size=batch_size, num_units=num_units,
input_depth=input_depth, num_layers=num_layers,
max_time=max_time,
compiled=False)
sess.run([variables.global_variables_initializer()])
non_xla_results = sess.run(non_xla_ops)
self.assertAllClose(
non_xla_results["outputs"], xla_results["outputs"], atol=atol)
for xla_value, non_xla_value in zip(
xla_results["final_state"], non_xla_results["final_state"]):
self.assertAllClose(xla_value, non_xla_value, atol=atol)
for xla_g, non_xla_g in zip(
xla_results["outputs_grad"], non_xla_results["outputs_grad"]):
self.assertAllClose(xla_g, non_xla_g, atol=atol)
for xla_g, non_xla_g in zip(
xla_results["final_state_grad"], non_xla_results["final_state_grad"]):
self.assertAllClose(xla_g, non_xla_g, atol=atol)
def testMultiRNNCellWithStateTuple(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)):
x = array_ops.zeros([1, 2])
m_bad = array_ops.zeros([1, 4])
m_good = (array_ops.zeros([1, 2]), array_ops.zeros([1, 2]))
# Test incorrectness of state
with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"):
rnn_cell.MultiRNNCell(
[rnn_cell.GRUCell(2)
for _ in range(2)], state_is_tuple=True)(x, m_bad)
_, ml = rnn_cell.MultiRNNCell(
[rnn_cell.GRUCell(2)
for _ in range(2)], state_is_tuple=True)(x, m_good)
sess.run([variables.global_variables_initializer()])
res = sess.run(ml, {
x.name: np.array([[1., 1.]]),
m_good[0].name: np.array([[0.1, 0.1]]),
m_good[1].name: np.array([[0.1, 0.1]])
})
# The numbers in results were not calculated, this is just a
# smoke test. However, these numbers should match those of
# the test testMultiRNNCell.
self.assertAllClose(res[0], [[0.175991, 0.175991]])
self.assertAllClose(res[1], [[0.13248, 0.13248]])
class BenchmarkLSTMCellXLA(test.Benchmark):
def benchmarkDynamicRNNWithMultiLSTMCell(self):
num_layers = 3
max_time = 50
print("benchmarkDynamicRNNWithMultiLSTMCell")
print("\t" +
"\t".join(["inter_th", "intra_th",
"batch_size", "num_units", "input_depth", "device",
"compiled", "wall_time"]))
warmup_run = True
for (threads,
device,
num_units,
batch_size,
input_depth,
compiled) in itertools.product(
[{"inter": 0, "intra": 0}, {"inter": 1, "intra": 4}],
["cpu", "gpu"],
[32, 512],
[1, 32, 256],
[32, 512],
[False, True]):
if threads["inter"] != 0:
# We only care about testing inter/intra op limitations on
# CPU with small batch size, to mimic embedded devices.
if device != "cpu" or batch_size != 1:
continue
if device == "cpu" and batch_size > 32:
continue
random_seed.set_random_seed(1234)
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=threads["inter"],
intra_op_parallelism_threads=threads["intra"],
allow_soft_placement=False)
with session.Session(config=config, graph=ops.Graph()) as sess:
with ops.device("/%s:0" % device):
ops_dict = _create_multi_lstm_cell_ops(
batch_size=batch_size, num_units=num_units,
input_depth=input_depth, num_layers=num_layers,
max_time=max_time,
compiled=compiled)
sess.run([variables.global_variables_initializer()])
all_ops = nest.flatten(ops_dict.values())
all_ops_group = control_flow_ops.group(*all_ops)
name_suffix = (
"inter_th_%d_intra_th_%d_bs_%d_units_%d_inputdepth_%d"
"_device_%s_xla_%s" % (
threads["inter"], threads["intra"],
batch_size, num_units, input_depth, device, compiled))
if warmup_run:
self.run_op_benchmark(
sess, all_ops_group, min_iters=30, name="ignore_warmup")
warmup_run = False
benchmark_results = self.run_op_benchmark(
sess, all_ops_group, min_iters=50,
name="benchmarkDynamicRNNWithMultiLSTMCell_%s" % name_suffix)
print("\t" +
"\t".join(["%s" % x for x in [
threads["inter"], threads["intra"],
batch_size, num_units, input_depth, device, compiled,
benchmark_results["wall_time"]]]))
if __name__ == "__main__":
test.main()
| apache-2.0 |
mjtamlyn/django | django/utils/encoding.py | 38 | 8986 | import codecs
import datetime
import locale
from decimal import Decimal
from urllib.parse import quote
from django.utils import six
from django.utils.functional import Promise
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
super().__init__(*args)
def __str__(self):
return '%s. You passed in %r (%s)' % (super().__str__(), self.obj, type(self.obj))
# For backwards compatibility. (originally in Django, then added to six 1.9)
python_2_unicode_compatible = six.python_2_unicode_compatible
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Return a string representing 's'. Treat bytestrings using the 'encoding'
codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
_PROTECTED_TYPES = (
type(None), int, float, Decimal, datetime.datetime, datetime.date, datetime.time,
)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, _PROTECTED_TYPES)
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if issubclass(type(s), str):
return s
if strings_only and is_protected_type(s):
return s
try:
if isinstance(s, bytes):
s = str(s, encoding, errors)
else:
s = str(s)
except UnicodeDecodeError as e:
raise DjangoUnicodeDecodeError(s, *e.args)
return s
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Return a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and is_protected_type(s):
return s
if isinstance(s, memoryview):
return bytes(s)
if isinstance(s, Promise) or not isinstance(s, str):
return str(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
smart_str = smart_text
force_str = force_text
smart_str.__doc__ = """
Apply smart_text in Python 3 and smart_bytes in Python 2.
This is suitable for writing to sys.stdout (for instance).
"""
force_str.__doc__ = """
Apply force_text in Python 3 and force_bytes in Python 2.
"""
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987, slightly simplified
since the input is assumed to be a string rather than an arbitrary byte
stream.
Take an IRI (string or UTF-8 bytes, e.g. '/I ♥ Django/' or
b'/I \xe2\x99\xa5 Django/') and return a string containing the encoded
result with ASCII chars only (e.g. '/I%20%E2%99%A5%20Django/').
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
elif isinstance(iri, Promise):
iri = str(iri)
return quote(iri, safe="/#%[]=:;$&()+,!?*@'~")
# List of byte values that uri_to_iri() decodes from percent encoding.
# First, the unreserved characters from RFC 3986:
_ascii_ranges = [[45, 46, 95, 126], range(65, 91), range(97, 123)]
_hextobyte = {
(fmt % char).encode(): bytes((char,))
for ascii_range in _ascii_ranges
for char in ascii_range
for fmt in ['%02x', '%02X']
}
# And then everything above 128, because bytes ≥ 128 are part of multibyte
# unicode characters.
_hexdig = '0123456789ABCDEFabcdef'
_hextobyte.update({
(a + b).encode(): bytes.fromhex(a + b)
for a in _hexdig[8:] for b in _hexdig
})
def uri_to_iri(uri):
"""
Convert a Uniform Resource Identifier(URI) into an Internationalized
Resource Identifier(IRI).
This is the algorithm from section 3.2 of RFC 3987, excluding step 4.
Take an URI in ASCII bytes (e.g. '/I%20%E2%99%A5%20Django/') and return
a string containing the encoded result (e.g. '/I%20♥%20Django/').
"""
if uri is None:
return uri
uri = force_bytes(uri)
# Fast selective unqote: First, split on '%' and then starting with the
# second block, decode the first 2 bytes if they represent a hex code to
# decode. The rest of the block is the part after '%AB', not containing
# any '%'. Add that to the output without further processing.
bits = uri.split(b'%')
if len(bits) == 1:
iri = uri
else:
parts = [bits[0]]
append = parts.append
hextobyte = _hextobyte
for item in bits[1:]:
hex = item[:2]
if hex in hextobyte:
append(hextobyte[item[:2]])
append(item[2:])
else:
append(b'%')
append(item)
iri = b''.join(parts)
return repercent_broken_unicode(iri).decode()
def escape_uri_path(path):
"""
Escape the unsafe characters from the path portion of a Uniform Resource
Identifier (URI).
"""
# These are the "reserved" and "unreserved" characters specified in
# sections 2.2 and 2.3 of RFC 2396:
# reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | ","
# unreserved = alphanum | mark
# mark = "-" | "_" | "." | "!" | "~" | "*" | "'" | "(" | ")"
# The list of safe characters here is constructed subtracting ";", "=",
# and "?" according to section 3.3 of RFC 2396.
# The reason for not subtracting and escaping "/" is that we are escaping
# the entire path, not a path segment.
return quote(path, safe="/:@&+$,-_.!~*'()")
def repercent_broken_unicode(path):
"""
As per section 3.2 of RFC 3987, step three of converting a URI into an IRI,
repercent-encode any octet produced that is not part of a strictly legal
UTF-8 octet sequence.
"""
try:
path.decode()
except UnicodeDecodeError as e:
repercent = quote(path[e.start:e.end], safe=b"/#%[]=:;$&()+,!?*@'~")
path = repercent_broken_unicode(
path[:e.start] + force_bytes(repercent) + path[e.end:])
return path
def filepath_to_uri(path):
"""Convert a file system path to a URI portion that is suitable for
inclusion in a URL.
Encode certain chars that would normally be recognized as special chars
for URIs. Do not encode the ' character, as it is a valid character
within URIs. See the encodeURIComponent() JavaScript function for details.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(path.replace("\\", "/"), safe="/~!*()'")
def get_system_encoding():
"""
The encoding of the default system locale. Fallback to 'ascii' if the
#encoding is unsupported by Python or could not be determined. See tickets
#10335 and #5846.
"""
try:
encoding = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(encoding)
except Exception:
encoding = 'ascii'
return encoding
DEFAULT_LOCALE_ENCODING = get_system_encoding()
| bsd-3-clause |
Laurawly/tvm-1 | tests/python/contrib/test_thrust.py | 1 | 5987 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.topi.cuda import stable_sort_by_key_thrust
from tvm.topi.cuda.scan import exclusive_scan, scan_thrust, schedule_scan
from tvm.contrib.thrust import can_use_thrust, can_use_rocthrust
import numpy as np
thrust_check_func = {"cuda": can_use_thrust, "rocm": can_use_rocthrust}
def test_stable_sort_by_key():
size = 6
keys = te.placeholder((size,), name="keys", dtype="int32")
values = te.placeholder((size,), name="values", dtype="int32")
keys_out, values_out = stable_sort_by_key_thrust(keys, values)
for target in ["cuda", "rocm"]:
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
continue
with tvm.target.Target(target + " -libs=thrust") as tgt:
if not thrust_check_func[target](tgt, "tvm.contrib.thrust.stable_sort_by_key"):
print("skip because thrust is not enabled...")
return
ctx = tvm.context(target, 0)
s = te.create_schedule([keys_out.op, values_out.op])
f = tvm.build(s, [keys, values, keys_out, values_out], target)
keys_np = np.array([1, 4, 2, 8, 2, 7], np.int32)
values_np = np.random.randint(0, 10, size=(size,)).astype(np.int32)
keys_np_out = np.zeros(keys_np.shape, np.int32)
values_np_out = np.zeros(values_np.shape, np.int32)
keys_in = tvm.nd.array(keys_np, ctx)
values_in = tvm.nd.array(values_np, ctx)
keys_out = tvm.nd.array(keys_np_out, ctx)
values_out = tvm.nd.array(values_np_out, ctx)
f(keys_in, values_in, keys_out, values_out)
ref_keys_out = np.sort(keys_np)
ref_values_out = np.array([values_np[i] for i in np.argsort(keys_np)])
tvm.testing.assert_allclose(keys_out.asnumpy(), ref_keys_out, rtol=1e-5)
tvm.testing.assert_allclose(values_out.asnumpy(), ref_values_out, rtol=1e-5)
def test_exclusive_scan():
for target in ["cuda", "rocm"]:
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
continue
with tvm.target.Target(target + " -libs=thrust") as tgt:
if not thrust_check_func[target](tgt, "tvm.contrib.thrust.sum_scan"):
print("skip because thrust is not enabled...")
return
for ishape in [(10,), (10, 10), (10, 10, 10)]:
values = te.placeholder(ishape, name="values", dtype="int32")
scan, reduction = exclusive_scan(values, return_reduction=True)
s = schedule_scan([scan, reduction])
ctx = tvm.context(target, 0)
f = tvm.build(s, [values, scan, reduction], target)
values_np = np.random.randint(0, 10, size=ishape).astype(np.int32)
values_np_out = np.zeros(values_np.shape, np.int32)
if len(ishape) == 1:
reduction_shape = ()
else:
reduction_shape = ishape[:-1]
reduction_np_out = np.zeros(reduction_shape, np.int32)
values_in = tvm.nd.array(values_np, ctx)
values_out = tvm.nd.array(values_np_out, ctx)
reduction_out = tvm.nd.array(reduction_np_out, ctx)
f(values_in, values_out, reduction_out)
ref_values_out = np.cumsum(values_np, axis=-1, dtype="int32") - values_np
tvm.testing.assert_allclose(values_out.asnumpy(), ref_values_out, rtol=1e-5)
ref_reduction_out = np.sum(values_np, axis=-1)
tvm.testing.assert_allclose(reduction_out.asnumpy(), ref_reduction_out, rtol=1e-5)
def test_inclusive_scan():
out_dtype = "int64"
for target in ["cuda", "rocm"]:
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
continue
with tvm.target.Target(target + " -libs=thrust") as tgt:
if not thrust_check_func[target](tgt, "tvm.contrib.thrust.sum_scan"):
print("skip because thrust is not enabled...")
return
for ishape in [(10,), (10, 10)]:
values = te.placeholder(ishape, name="values", dtype="int32")
scan = scan_thrust(values, out_dtype, exclusive=False)
s = tvm.te.create_schedule([scan.op])
ctx = tvm.context(target, 0)
f = tvm.build(s, [values, scan], target)
values_np = np.random.randint(0, 10, size=ishape).astype(np.int32)
values_np_out = np.zeros(values_np.shape, out_dtype)
values_in = tvm.nd.array(values_np, ctx)
values_out = tvm.nd.array(values_np_out, ctx)
f(values_in, values_out)
ref_values_out = np.cumsum(values_np, axis=-1, dtype=out_dtype)
tvm.testing.assert_allclose(values_out.asnumpy(), ref_values_out, rtol=1e-5)
if __name__ == "__main__":
test_stable_sort_by_key()
test_exclusive_scan()
test_inclusive_scan()
| apache-2.0 |
ynikulin/tapp | tapp/core/data_flow/mq/rabbitmq.py | 1 | 2524 | from pika.adapters.tornado_connection import TornadoConnection
from pika import ConnectionParameters
from pika import PlainCredentials
from pika import BasicProperties
import tornado.ioloop
import tornado.web
class TornadoQueueConnection(object):
def __init__(self, host, port, virtual_host, user, password):
self._parameters = ConnectionParameters(
host=host,
port=port,
virtual_host=virtual_host,
credentials=PlainCredentials(user, password)
)
self._connection = None
self._channel = None
self.ioloop = tornado.ioloop.IOLoop.instance()
self.ioloop.add_timeout(0, self._connect)
self._delivery_tag = 0
self._confirmation_callbacks = {}
def publish(self, routing_key, body, callback=lambda x: x, exchange='', headers=''):
properties = BasicProperties(content_type='text/plain')
if self._connection is None or self._connection.is_closed:
self._connect()
callback(False)
if self._channel is None or self._channel.is_closed:
self._open_channel()
callback(False)
self._channel.basic_publish(exchange, routing_key, body, properties)
self._delivery_tag += 1
self._confirmation_callbacks[self._delivery_tag] = callback
def _on_delivery_confirmation(self, method_frame):
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
tag = method_frame.method.delivery_tag
if confirmation_type == 'ack':
success = True
else:
success = False
callback = self._confirmation_callbacks[tag]
del self._confirmation_callbacks[tag]
callback(success)
def close(self):
self._connection.close()
def _connect(self):
self.connection = TornadoConnection(
self._parameters,
on_open_callback=self._on_connected,
stop_ioloop_on_close=False,
)
def _on_connected(self, connection):
self._connection = connection
self._connection.add_on_close_callback(self._on_connection_closed)
self._open_channel()
def _on_connection_closed(self, method_frame):
self._connection = None
self._connect()
def _open_channel(self):
self.connection.channel(self._on_channel_open)
def _on_channel_open(self, channel):
self._channel = channel
self._channel.confirm_delivery(self._on_delivery_confirmation)
| apache-2.0 |
BV-DR/foamBazar | pythonScripts/foamStarForces.py | 1 | 1326 | #!/usr/bin/env python
"""
Plot force output for foamStar post-processing output files
"""
import pandas as pd
import matplotlib.pyplot as plt
try :
from droppy.Reader import dfRead
except ImportError as e:
print("Can not load TimeSignals, please add 'droppy' (from repo https://github.com/BV-DR/droppy) to your PYTHON_PATH")
raise Exception(e)
if __name__ == "__main__" :
"""
Example of use :
FoamStarForces forces.dat #->plot all the forces components
FoamStarForces forces.dat -indexName Fx Fy #->plot the selected components, based on labels
FoamStarForces forces.dat -index 0 1 #->plot the selected components, based on indexes
"""
import argparse
parser = argparse.ArgumentParser(description='foamStar forces plot')
parser.add_argument( "forceFile" )
parser.add_argument('-indexName', nargs='+', type = str , help='Index to plot' )
parser.add_argument('-index', nargs='+', type = int , help='Index to plot' )
args = parser.parse_args()
a = dfRead( args.forceFile , reader = "openFoamReader" , field = "total")
a.to_csv(args.forceFile.split('.')[0]+'.csv',sep=';')
#if args.indexName :a[args.indexName].plot()
#elif args.index : a.iloc[:,args.index].plot()
#else: a.plot()
#plt.show()
| gpl-3.0 |
palica/efikamx-smartbook | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
marcore/edx-platform | common/djangoapps/student/tests/test_parental_controls.py | 155 | 3850 | """Unit tests for parental controls."""
import datetime
from django.test import TestCase
from django.test.utils import override_settings
from student.models import UserProfile
from student.tests.factories import UserFactory
class ProfileParentalControlsTest(TestCase):
"""Unit tests for requires_parental_consent."""
password = "test"
def setUp(self):
super(ProfileParentalControlsTest, self).setUp()
self.user = UserFactory.create(password=self.password)
self.profile = UserProfile.objects.get(id=self.user.id)
def set_year_of_birth(self, year_of_birth):
"""
Helper method that creates a mock profile for the specified user.
"""
self.profile.year_of_birth = year_of_birth
self.profile.save()
def test_no_year_of_birth(self):
"""Verify the behavior for users with no specified year of birth."""
self.assertTrue(self.profile.requires_parental_consent())
self.assertTrue(self.profile.requires_parental_consent(default_requires_consent=True))
self.assertFalse(self.profile.requires_parental_consent(default_requires_consent=False))
@override_settings(PARENTAL_CONSENT_AGE_LIMIT=None)
def test_no_parental_controls(self):
"""Verify the behavior for all users when parental controls are not enabled."""
self.assertFalse(self.profile.requires_parental_consent())
self.assertFalse(self.profile.requires_parental_consent(default_requires_consent=True))
self.assertFalse(self.profile.requires_parental_consent(default_requires_consent=False))
# Verify that even a child does not require parental consent
current_year = datetime.datetime.now().year
self.set_year_of_birth(current_year - 10)
self.assertFalse(self.profile.requires_parental_consent())
def test_adult_user(self):
"""Verify the behavior for an adult."""
current_year = datetime.datetime.now().year
self.set_year_of_birth(current_year - 20)
self.assertFalse(self.profile.requires_parental_consent())
self.assertTrue(self.profile.requires_parental_consent(age_limit=21))
def test_child_user(self):
"""Verify the behavior for a child."""
current_year = datetime.datetime.now().year
# Verify for a child born 13 years agp
self.set_year_of_birth(current_year - 13)
self.assertTrue(self.profile.requires_parental_consent())
self.assertTrue(self.profile.requires_parental_consent(date=datetime.date(current_year, 12, 31)))
self.assertFalse(self.profile.requires_parental_consent(date=datetime.date(current_year + 1, 1, 1)))
# Verify for a child born 14 years ago
self.set_year_of_birth(current_year - 14)
self.assertFalse(self.profile.requires_parental_consent())
self.assertFalse(self.profile.requires_parental_consent(date=datetime.date(current_year, 1, 1)))
def test_profile_image(self):
"""Verify that a profile's image obeys parental controls."""
# Verify that an image cannot be set for a user with no year of birth set
self.profile.profile_image_uploaded_at = datetime.datetime.now()
self.profile.save()
self.assertFalse(self.profile.has_profile_image)
# Verify that an image can be set for an adult user
current_year = datetime.datetime.now().year
self.set_year_of_birth(current_year - 20)
self.profile.profile_image_uploaded_at = datetime.datetime.now()
self.profile.save()
self.assertTrue(self.profile.has_profile_image)
# verify that a user's profile image is removed when they switch to requiring parental controls
self.set_year_of_birth(current_year - 10)
self.profile.save()
self.assertFalse(self.profile.has_profile_image)
| agpl-3.0 |
canaltinova/servo | tests/wpt/web-platform-tests/tools/pywebsocket/test/test_stream_hixie75.py | 496 | 2285 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for stream module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket.stream import StreamHixie75
from test.test_msgutil import _create_request_hixie75
class StreamHixie75Test(unittest.TestCase):
"""A unittest for StreamHixie75 class."""
def test_payload_length(self):
for length, bytes in ((0, '\x00'), (0x7f, '\x7f'), (0x80, '\x81\x00'),
(0x1234, '\x80\xa4\x34')):
test_stream = StreamHixie75(_create_request_hixie75(bytes))
self.assertEqual(
length, test_stream._read_payload_length_hixie75())
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
TathagataChakraborti/resource-conflicts | PLANROB-2015/seq-sat-lama/Python-2.5.2/Tools/versioncheck/pyversioncheck.py | 98 | 4051 | """pyversioncheck - Module to help with checking versions"""
import types
import rfc822
import urllib
import sys
# Verbose options
VERBOSE_SILENT=0 # Single-line reports per package
VERBOSE_NORMAL=1 # Single-line reports per package, more info if outdated
VERBOSE_EACHFILE=2 # Report on each URL checked
VERBOSE_CHECKALL=3 # Check each URL for each package
# Test directory
## urllib bug: _TESTDIR="ftp://ftp.cwi.nl/pub/jack/python/versiontestdir/"
_TESTDIR="http://www.cwi.nl/~jack/versiontestdir/"
def versioncheck(package, url, version, verbose=0):
ok, newversion, fp = checkonly(package, url, version, verbose)
if verbose > VERBOSE_NORMAL:
return ok
if ok < 0:
print '%s: No correctly formatted current version file found'%(package)
elif ok == 1:
print '%s: up-to-date (version %s)'%(package, version)
else:
print '%s: version %s installed, version %s found:' % \
(package, version, newversion)
if verbose > VERBOSE_SILENT:
while 1:
line = fp.readline()
if not line: break
sys.stdout.write('\t'+line)
return ok
def checkonly(package, url, version, verbose=0):
if verbose >= VERBOSE_EACHFILE:
print '%s:'%package
if type(url) == types.StringType:
ok, newversion, fp = _check1version(package, url, version, verbose)
else:
for u in url:
ok, newversion, fp = _check1version(package, u, version, verbose)
if ok >= 0 and verbose < VERBOSE_CHECKALL:
break
return ok, newversion, fp
def _check1version(package, url, version, verbose=0):
if verbose >= VERBOSE_EACHFILE:
print ' Checking %s'%url
try:
fp = urllib.urlopen(url)
except IOError, arg:
if verbose >= VERBOSE_EACHFILE:
print ' Cannot open:', arg
return -1, None, None
msg = rfc822.Message(fp, seekable=0)
newversion = msg.getheader('current-version')
if not newversion:
if verbose >= VERBOSE_EACHFILE:
print ' No "Current-Version:" header in URL or URL not found'
return -1, None, None
version = version.lower().strip()
newversion = newversion.lower().strip()
if version == newversion:
if verbose >= VERBOSE_EACHFILE:
print ' Version identical (%s)'%newversion
return 1, version, fp
else:
if verbose >= VERBOSE_EACHFILE:
print ' Versions different (installed: %s, new: %s)'% \
(version, newversion)
return 0, newversion, fp
def _test():
print '--- TEST VERBOSE=1'
print '--- Testing existing and identical version file'
versioncheck('VersionTestPackage', _TESTDIR+'Version10.txt', '1.0', verbose=1)
print '--- Testing existing package with new version'
versioncheck('VersionTestPackage', _TESTDIR+'Version11.txt', '1.0', verbose=1)
print '--- Testing package with non-existing version file'
versioncheck('VersionTestPackage', _TESTDIR+'nonexistent.txt', '1.0', verbose=1)
print '--- Test package with 2 locations, first non-existing second ok'
versfiles = [_TESTDIR+'nonexistent.txt', _TESTDIR+'Version10.txt']
versioncheck('VersionTestPackage', versfiles, '1.0', verbose=1)
print '--- TEST VERBOSE=2'
print '--- Testing existing and identical version file'
versioncheck('VersionTestPackage', _TESTDIR+'Version10.txt', '1.0', verbose=2)
print '--- Testing existing package with new version'
versioncheck('VersionTestPackage', _TESTDIR+'Version11.txt', '1.0', verbose=2)
print '--- Testing package with non-existing version file'
versioncheck('VersionTestPackage', _TESTDIR+'nonexistent.txt', '1.0', verbose=2)
print '--- Test package with 2 locations, first non-existing second ok'
versfiles = [_TESTDIR+'nonexistent.txt', _TESTDIR+'Version10.txt']
versioncheck('VersionTestPackage', versfiles, '1.0', verbose=2)
if __name__ == '__main__':
_test()
| mit |
dataversioncontrol/dvc | dvc/command/destroy.py | 1 | 1496 | from __future__ import unicode_literals
import argparse
import dvc.prompt as prompt
import dvc.logger as logger
from dvc.command.base import CmdBase, append_doc_link
from dvc.exceptions import DvcException
class CmdDestroy(CmdBase):
def run_cmd(self):
try:
statement = (
"This will destroy all information about your pipelines,"
" all data files, as well as cache in .dvc/cache."
"\n"
"Are you sure you want to continue?"
)
if not self.args.force and not prompt.confirm(statement):
raise DvcException(
"cannot destroy without a confirmation from the user."
" Use '-f' to force."
)
self.repo.destroy()
except Exception:
logger.error("failed to destroy DVC")
return 1
return 0
def add_parser(subparsers, parent_parser):
DESTROY_HELP = "Remove DVC files, local DVC config and data cache."
destroy_parser = subparsers.add_parser(
"destroy",
parents=[parent_parser],
description=append_doc_link(DESTROY_HELP, "destroy"),
help=DESTROY_HELP,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
destroy_parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
help="Force destruction.",
)
destroy_parser.set_defaults(func=CmdDestroy)
| apache-2.0 |
neloe/EasyZMQ-Cpp | tests/gtest-1.7.0/test/gtest_catch_exceptions_test.py | 2139 | 9901 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = '[email protected] (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| lgpl-2.1 |
WholeGrainGoats/servo | components/script/dom/bindings/codegen/parser/tests/test_duplicate_qualifiers.py | 241 | 1893 | def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface DuplicateQualifiers1 {
getter getter byte foo(unsigned long index);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface DuplicateQualifiers2 {
setter setter byte foo(unsigned long index, byte value);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface DuplicateQualifiers3 {
creator creator byte foo(unsigned long index, byte value);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface DuplicateQualifiers4 {
deleter deleter byte foo(unsigned long index);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface DuplicateQualifiers5 {
getter deleter getter byte foo(unsigned long index);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
results = parser.parse("""
interface DuplicateQualifiers6 {
creator setter creator byte foo(unsigned long index, byte value);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
| mpl-2.0 |
CivilHub/CivilHub | articles/migrations/0004_auto_20150319_1520.py | 3 | 2272 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('articles', '0003_article_subtitle'),
]
operations = [
migrations.AddField(
model_name='article',
name='subtitle_cz',
field=models.CharField(default='', max_length=200, null=True, verbose_name='subtitle', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='subtitle_de',
field=models.CharField(default='', max_length=200, null=True, verbose_name='subtitle', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='subtitle_en',
field=models.CharField(default='', max_length=200, null=True, verbose_name='subtitle', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='subtitle_es',
field=models.CharField(default='', max_length=200, null=True, verbose_name='subtitle', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='subtitle_fr',
field=models.CharField(default='', max_length=200, null=True, verbose_name='subtitle', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='subtitle_it',
field=models.CharField(default='', max_length=200, null=True, verbose_name='subtitle', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='subtitle_pl',
field=models.CharField(default='', max_length=200, null=True, verbose_name='subtitle', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='subtitle_pt',
field=models.CharField(default='', max_length=200, null=True, verbose_name='subtitle', blank=True),
preserve_default=True,
),
]
| gpl-3.0 |
eezee-it/addons-yelizariev | project_kanban_customer/models.py | 16 | 1384 | from openerp import api, models, fields, SUPERUSER_ID
class project_project(models.Model):
_inherit = 'project.project'
partner_country_image = fields.Binary('Partner\'s country flag', related='partner_id.country_id.image')
partner_country_name = fields.Char('Partner\'s country name', related='partner_id.country_id.name')
def name_get(self, cr, uid, ids, context=None):
if isinstance(ids, (list, tuple)) and not len(ids):
return []
if isinstance(ids, (long, int)):
ids = [ids]
reads = self.read(cr, uid, ids, ['partner_id', 'name'], context=context)
res = []
for record in reads:
name = record['name'] or ''
partner = record['partner_id'] or ''
if partner:
name = '%s (%s)' % (name, partner[1])
res.append((record['id'], name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if not context:
context = {}
if name:
ids = self.search(cr, uid, ['|', ('partner_id', operator, name), ('name', operator, name)] + args, limit=limit, context=context)
else:
ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, ids, context)
| lgpl-3.0 |
valkyriesavage/gasustainability | django/template/loader.py | 149 | 8326 | # Wrapper for loading templates from storage of some sort (e.g. filesystem, database).
#
# This uses the TEMPLATE_LOADERS setting, which is a list of loaders to use.
# Each loader is expected to have this interface:
#
# callable(name, dirs=[])
#
# name is the template name.
# dirs is an optional list of directories to search instead of TEMPLATE_DIRS.
#
# The loader should return a tuple of (template_source, path). The path returned
# might be shown to the user for debugging purposes, so it should identify where
# the template was loaded from.
#
# A loader may return an already-compiled template instead of the actual
# template source. In that case the path returned should be None, since the
# path information is associated with the template during the compilation,
# which has already been done.
#
# Each loader should have an "is_usable" attribute set. This is a boolean that
# specifies whether the loader can be used in this Python installation. Each
# loader is responsible for setting this when it's initialized.
#
# For example, the eggs loader (which is capable of loading templates from
# Python eggs) sets is_usable to False if the "pkg_resources" module isn't
# installed, because pkg_resources is necessary to read eggs.
from django.core.exceptions import ImproperlyConfigured
from django.template.base import Origin, Template, Context, TemplateDoesNotExist, add_to_builtins
from django.utils.importlib import import_module
from django.conf import settings
template_source_loaders = None
class BaseLoader(object):
is_usable = False
def __init__(self, *args, **kwargs):
pass
def __call__(self, template_name, template_dirs=None):
return self.load_template(template_name, template_dirs)
def load_template(self, template_name, template_dirs=None):
source, display_name = self.load_template_source(template_name, template_dirs)
origin = make_origin(display_name, self.load_template_source, template_name, template_dirs)
try:
template = get_template_from_string(source, origin, template_name)
return template, None
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist, back off to
# returning the source and display name for the template we were asked to load.
# This allows for correct identification (later) of the actual template that does
# not exist.
return source, display_name
def load_template_source(self, template_name, template_dirs=None):
"""
Returns a tuple containing the source and origin for the given template
name.
"""
raise NotImplementedError
def reset(self):
"""
Resets any state maintained by the loader instance (e.g., cached
templates or cached loader modules).
"""
pass
class LoaderOrigin(Origin):
def __init__(self, display_name, loader, name, dirs):
super(LoaderOrigin, self).__init__(display_name)
self.loader, self.loadname, self.dirs = loader, name, dirs
def reload(self):
return self.loader(self.loadname, self.dirs)[0]
def make_origin(display_name, loader, name, dirs):
if settings.TEMPLATE_DEBUG and display_name:
return LoaderOrigin(display_name, loader, name, dirs)
else:
return None
def find_template_loader(loader):
if isinstance(loader, (tuple, list)):
loader, args = loader[0], loader[1:]
else:
args = []
if isinstance(loader, basestring):
module, attr = loader.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing template source loader %s: "%s"' % (loader, e))
try:
TemplateLoader = getattr(mod, attr)
except AttributeError, e:
raise ImproperlyConfigured('Error importing template source loader %s: "%s"' % (loader, e))
if hasattr(TemplateLoader, 'load_template_source'):
func = TemplateLoader(*args)
else:
# Try loading module the old way - string is full path to callable
if args:
raise ImproperlyConfigured("Error importing template source loader %s - can't pass arguments to function-based loader." % loader)
func = TemplateLoader
if not func.is_usable:
import warnings
warnings.warn("Your TEMPLATE_LOADERS setting includes %r, but your Python installation doesn't support that type of template loading. Consider removing that line from TEMPLATE_LOADERS." % loader)
return None
else:
return func
else:
raise ImproperlyConfigured('Loader does not define a "load_template" callable template source loader')
def find_template(name, dirs=None):
# Calculate template_source_loaders the first time the function is executed
# because putting this logic in the module-level namespace may cause
# circular import errors. See Django ticket #1292.
global template_source_loaders
if template_source_loaders is None:
loaders = []
for loader_name in settings.TEMPLATE_LOADERS:
loader = find_template_loader(loader_name)
if loader is not None:
loaders.append(loader)
template_source_loaders = tuple(loaders)
for loader in template_source_loaders:
try:
source, display_name = loader(name, dirs)
return (source, make_origin(display_name, loader, name, dirs))
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name)
def find_template_source(name, dirs=None):
# For backward compatibility
import warnings
warnings.warn(
"`django.template.loader.find_template_source` is deprecated; use `django.template.loader.find_template` instead.",
DeprecationWarning
)
template, origin = find_template(name, dirs)
if hasattr(template, 'render'):
raise Exception("Found a compiled template that is incompatible with the deprecated `django.template.loader.find_template_source` function.")
return template, origin
def get_template(template_name):
"""
Returns a compiled Template object for the given template name,
handling template inheritance recursively.
"""
template, origin = find_template(template_name)
if not hasattr(template, 'render'):
# template needs to be compiled
template = get_template_from_string(template, origin, template_name)
return template
def get_template_from_string(source, origin=None, name=None):
"""
Returns a compiled Template object for the given template code,
handling template inheritance recursively.
"""
return Template(source, origin, name)
def render_to_string(template_name, dictionary=None, context_instance=None):
"""
Loads the given template_name and renders it with the given dictionary as
context. The template_name may be a string to load a single template using
get_template, or it may be a tuple to use select_template to find one of
the templates in the list. Returns a string.
"""
dictionary = dictionary or {}
if isinstance(template_name, (list, tuple)):
t = select_template(template_name)
else:
t = get_template(template_name)
if not context_instance:
return t.render(Context(dictionary))
# Add the dictionary to the context stack, ensuring it gets removed again
# to keep the context_instance in the same state it started in.
context_instance.update(dictionary)
try:
return t.render(context_instance)
finally:
context_instance.pop()
def select_template(template_name_list):
"Given a list of template names, returns the first that can be loaded."
not_found = []
for template_name in template_name_list:
try:
return get_template(template_name)
except TemplateDoesNotExist, e:
if e.args[0] not in not_found:
not_found.append(e.args[0])
continue
# If we get here, none of the templates could be loaded
raise TemplateDoesNotExist(', '.join(not_found))
add_to_builtins('django.template.loader_tags')
| bsd-3-clause |
proxysh/Safejumper-for-Desktop | buildmac/Resources/env/lib/python2.7/site-packages/twisted/conch/avatar.py | 12 | 1438 | # -*- test-case-name: twisted.conch.test.test_conch -*-
from __future__ import absolute_import, division
from zope.interface import implementer
from twisted.conch.error import ConchError
from twisted.conch.interfaces import IConchUser
from twisted.conch.ssh.connection import OPEN_UNKNOWN_CHANNEL_TYPE
from twisted.python import log
from twisted.python.compat import nativeString
@implementer(IConchUser)
class ConchUser:
def __init__(self):
self.channelLookup = {}
self.subsystemLookup = {}
def lookupChannel(self, channelType, windowSize, maxPacket, data):
klass = self.channelLookup.get(channelType, None)
if not klass:
raise ConchError(OPEN_UNKNOWN_CHANNEL_TYPE, "unknown channel")
else:
return klass(remoteWindow=windowSize,
remoteMaxPacket=maxPacket,
data=data, avatar=self)
def lookupSubsystem(self, subsystem, data):
log.msg(repr(self.subsystemLookup))
klass = self.subsystemLookup.get(subsystem, None)
if not klass:
return False
return klass(data, avatar=self)
def gotGlobalRequest(self, requestType, data):
# XXX should this use method dispatch?
requestType = nativeString(requestType.replace(b'-', b'_'))
f = getattr(self, "global_%s" % requestType, None)
if not f:
return 0
return f(data)
| gpl-2.0 |
simalytics/askbot-devel | askbot/migrations/0112_add_model_ReplyAddress.py | 17 | 24822 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ReplyAddress'
db.create_table('askbot_replyaddress', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('address', self.gf('django.db.models.fields.CharField')(unique=True, max_length=25)),
('post', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['askbot.Post'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('allowed_from_email', self.gf('django.db.models.fields.EmailField')(max_length=150)),
('used_at', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True)),
))
db.send_create_signal('askbot', ['ReplyAddress'])
def backwards(self, orm):
# Deleting model 'ReplyAddress'
db.delete_table('askbot_replyaddress')
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Post']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.post': {
'Meta': {'object_name': 'Post'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'old_answer_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'old_comment_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'old_question_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'post_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('post', 'revision'),)", 'object_name': 'PostRevision'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Post']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.replyaddress': {
'Meta': {'object_name': 'ReplyAddress'},
'address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'allowed_from_email': ('django.db.models.fields.EmailField', [], {'max_length': '150'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']"}),
'used_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'accepted_answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'answer_accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'unused_favorite_threads'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteQuestion']", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_threads'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unused_last_active_in_threads'", 'to': "orm['auth.User']"}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'threads'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('user', 'voted_post'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'voted_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['askbot.Post']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot'] | gpl-3.0 |
andersinno/suds-jurko | tests/test_document_store.py | 7 | 5052 | # -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify it under
# the terms of the (LGPL) GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Library Lesser General Public License
# for more details at ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jurko Gospodnetić ( [email protected] )
"""
Suds Python library DocumentStore unit tests.
Implemented using the 'pytest' testing framework.
"""
if __name__ == "__main__":
import testutils
testutils.run_using_pytest(globals())
import suds
import suds.store
import pytest
def test_accessing_DocumentStore_content():
content1 = suds.byte_str("one")
content2 = suds.byte_str("two")
content1_1 = suds.byte_str("one one")
store = suds.store.DocumentStore({"1": content1})
assert len(store) == 2
_test_default_DocumentStore_content(store)
_test_open(store, "1", content1)
store = suds.store.DocumentStore({"1": content1, "2": content2})
assert len(store) == 3
_test_default_DocumentStore_content(store)
_test_open(store, "1", content1)
_test_open(store, "2", content2)
store = suds.store.DocumentStore(uno=content1, due=content2)
assert len(store) == 3
_test_default_DocumentStore_content(store)
_test_open(store, "uno", content1)
_test_open(store, "due", content2)
store = suds.store.DocumentStore({"1 1": content1_1})
assert len(store) == 2
_test_default_DocumentStore_content(store)
_test_open(store, "1 1", content1_1)
store = suds.store.DocumentStore({"1": content1, "1 1": content1_1})
assert len(store) == 3
_test_default_DocumentStore_content(store)
_test_open(store, "1", content1)
_test_open(store, "1 1", content1_1)
def test_accessing_missing_DocumentStore_content():
store = suds.store.DocumentStore()
assert store.open("missing-content") is None
assert store.open("buga-wuga://missing-content") is None
assert store.open("ftp://missing-content") is None
assert store.open("http://missing-content") is None
assert store.open("https://missing-content") is None
pytest.raises(Exception, store.open, "suds://missing-content")
def test_default_DocumentStore_instance():
assert len(suds.store.defaultDocumentStore) == 1
_test_default_DocumentStore_content(suds.store.defaultDocumentStore)
def test_empty_DocumentStore_instance_is_not_shared():
assert suds.store.DocumentStore() is not suds.store.defaultDocumentStore
assert suds.store.DocumentStore() is not suds.store.DocumentStore()
def test_updating_DocumentStore_content():
content1 = suds.byte_str("one")
content2 = suds.byte_str("two")
content1_1 = suds.byte_str("one one")
store = suds.store.DocumentStore()
assert len(store) == 1
_test_default_DocumentStore_content(store)
store.update({"1": content1})
assert len(store) == 2
_test_default_DocumentStore_content(store)
_test_open(store, "1", content1)
store.update({"1": content1, "2": content2, "1 1": content1_1})
assert len(store) == 4
_test_default_DocumentStore_content(store)
_test_open(store, "1", content1)
_test_open(store, "2", content2)
_test_open(store, "1 1", content1_1)
store.update({"2": content2, "1 1": content1_1})
assert len(store) == 4
_test_default_DocumentStore_content(store)
_test_open(store, "1", content1)
_test_open(store, "2", content2)
_test_open(store, "1 1", content1_1)
store.update(uno=content1, due=content2)
assert len(store) == 6
_test_default_DocumentStore_content(store)
_test_open(store, "1", content1)
_test_open(store, "2", content2)
_test_open(store, "1 1", content1_1)
_test_open(store, "uno", content1)
_test_open(store, "due", content2)
def _test_default_DocumentStore_content(store):
_test_open(store, "schemas.xmlsoap.org/soap/encoding/",
suds.store.soap5_encoding_schema)
def _test_open(store, location, expected_content):
assert store.open(location) is expected_content
assert store.open("buga-wuga://%s" % location) is expected_content
assert store.open("ftp://%s" % location) is expected_content
assert store.open("http://%s" % location) is expected_content
assert store.open("https://%s" % location) is expected_content
assert store.open("suds://%s" % location) is expected_content
| lgpl-3.0 |
ahmadiga/min_edx | lms/djangoapps/django_comment_client/management/commands/assign_role.py | 251 | 1144 | from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django_comment_common.models import Role
from django.contrib.auth.models import User
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--remove',
action='store_true',
dest='remove',
default=False,
help='Remove the role instead of adding it'),
)
args = '<user|email> <role> <course_id>'
help = 'Assign a discussion forum role to a user '
def handle(self, *args, **options):
if len(args) != 3:
raise CommandError('Usage is assign_role {0}'.format(self.args))
name_or_email, role, course_id = args
role = Role.objects.get(name=role, course_id=course_id)
if '@' in name_or_email:
user = User.objects.get(email=name_or_email)
else:
user = User.objects.get(username=name_or_email)
if options['remove']:
user.roles.remove(role)
else:
user.roles.add(role)
print 'Success!'
| agpl-3.0 |
JGarcia-Panach/odoo | openerp/addons/base/__openerp__.py | 336 | 3703 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Base',
'version': '1.3',
'category': 'Hidden',
'description': """
The kernel of OpenERP, needed for all installation.
===================================================
""",
'author': 'OpenERP SA',
'maintainer': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': [],
'data': [
'base_data.xml',
'res/res_currency_data.xml',
'res/res_country_data.xml',
'security/base_security.xml',
'base_menu.xml',
'res/res_config.xml',
'res/res.country.state.csv',
'ir/ir_actions.xml',
'ir/ir_config_parameter_view.xml',
'ir/ir_cron_view.xml',
'ir/ir_filters.xml',
'ir/ir_mail_server_view.xml',
'ir/ir_model_view.xml',
'ir/ir_attachment_view.xml',
'ir/ir_rule_view.xml',
'ir/ir_sequence_view.xml',
'ir/ir_translation_view.xml',
'ir/ir_ui_menu_view.xml',
'ir/ir_ui_view_view.xml',
'ir/ir_values_view.xml',
'ir/osv_memory_autovacuum.xml',
'ir/ir_model_report.xml',
'ir/ir_logging_view.xml',
'ir/ir_qweb.xml',
'workflow/workflow_view.xml',
'module/module_view.xml',
'module/module_data.xml',
'module/module_report.xml',
'module/wizard/base_module_update_view.xml',
'module/wizard/base_language_install_view.xml',
'module/wizard/base_import_language_view.xml',
'module/wizard/base_module_upgrade_view.xml',
'module/wizard/base_module_configuration_view.xml',
'module/wizard/base_export_language_view.xml',
'module/wizard/base_update_translations_view.xml',
'module/wizard/base_module_immediate_install.xml',
'res/res_company_view.xml',
'res/res_request_view.xml',
'res/res_lang_view.xml',
'res/res_partner_report.xml',
'res/res_partner_view.xml',
'res/res_bank_view.xml',
'res/res_country_view.xml',
'res/res_currency_view.xml',
'res/res_users_view.xml',
'res/res_partner_data.xml',
'res/ir_property_view.xml',
'res/res_security.xml',
'security/ir.model.access.csv',
],
'demo': [
'base_demo.xml',
'res/res_partner_demo.xml',
'res/res_partner_demo.yml',
'res/res_partner_image_demo.xml',
],
'test': [
'tests/base_test.yml',
'tests/test_osv_expression.yml',
'tests/test_ir_rule.yml', # <-- These tests modify/add/delete ir_rules.
],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yongshengwang/hue | build/env/lib/python2.7/site-packages/setuptools/extension.py | 192 | 1821 | import sys
import re
import functools
import distutils.core
import distutils.errors
import distutils.extension
from .dist import _get_unpatched
from . import msvc9_support
_Extension = _get_unpatched(distutils.core.Extension)
msvc9_support.patch_for_specialized_compiler()
def have_pyrex():
"""
Return True if Cython or Pyrex can be imported.
"""
pyrex_impls = 'Cython.Distutils.build_ext', 'Pyrex.Distutils.build_ext'
for pyrex_impl in pyrex_impls:
try:
# from (pyrex_impl) import build_ext
__import__(pyrex_impl, fromlist=['build_ext']).build_ext
return True
except Exception:
pass
return False
class Extension(_Extension):
"""Extension that uses '.c' files in place of '.pyx' files"""
def __init__(self, *args, **kw):
_Extension.__init__(self, *args, **kw)
self._convert_pyx_sources_to_lang()
def _convert_pyx_sources_to_lang(self):
"""
Replace sources with .pyx extensions to sources with the target
language extension. This mechanism allows language authors to supply
pre-converted sources but to prefer the .pyx sources.
"""
if have_pyrex():
# the build has Cython, so allow it to compile the .pyx files
return
lang = self.language or ''
target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
sub = functools.partial(re.sub, '.pyx$', target_ext)
self.sources = list(map(sub, self.sources))
class Library(Extension):
"""Just like a regular Extension, but built as a library instead"""
distutils.core.Extension = Extension
distutils.extension.Extension = Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = Extension
| apache-2.0 |
flwh/KK_mt6589_iq451 | prebuilts/python/darwin-x86/2.7.5/lib/python2.7/test/test_structmembers.py | 88 | 3438 | from _testcapi import _test_structmembersType, \
CHAR_MAX, CHAR_MIN, UCHAR_MAX, \
SHRT_MAX, SHRT_MIN, USHRT_MAX, \
INT_MAX, INT_MIN, UINT_MAX, \
LONG_MAX, LONG_MIN, ULONG_MAX, \
LLONG_MAX, LLONG_MIN, ULLONG_MAX
import unittest
from test import test_support
ts=_test_structmembersType(False, 1, 2, 3, 4, 5, 6, 7, 8,
9.99999, 10.1010101010, "hi")
class ReadWriteTests(unittest.TestCase):
def test_bool(self):
ts.T_BOOL = True
self.assertEqual(ts.T_BOOL, True)
ts.T_BOOL = False
self.assertEqual(ts.T_BOOL, False)
self.assertRaises(TypeError, setattr, ts, 'T_BOOL', 1)
def test_byte(self):
ts.T_BYTE = CHAR_MAX
self.assertEqual(ts.T_BYTE, CHAR_MAX)
ts.T_BYTE = CHAR_MIN
self.assertEqual(ts.T_BYTE, CHAR_MIN)
ts.T_UBYTE = UCHAR_MAX
self.assertEqual(ts.T_UBYTE, UCHAR_MAX)
def test_short(self):
ts.T_SHORT = SHRT_MAX
self.assertEqual(ts.T_SHORT, SHRT_MAX)
ts.T_SHORT = SHRT_MIN
self.assertEqual(ts.T_SHORT, SHRT_MIN)
ts.T_USHORT = USHRT_MAX
self.assertEqual(ts.T_USHORT, USHRT_MAX)
def test_int(self):
ts.T_INT = INT_MAX
self.assertEqual(ts.T_INT, INT_MAX)
ts.T_INT = INT_MIN
self.assertEqual(ts.T_INT, INT_MIN)
ts.T_UINT = UINT_MAX
self.assertEqual(ts.T_UINT, UINT_MAX)
def test_long(self):
ts.T_LONG = LONG_MAX
self.assertEqual(ts.T_LONG, LONG_MAX)
ts.T_LONG = LONG_MIN
self.assertEqual(ts.T_LONG, LONG_MIN)
ts.T_ULONG = ULONG_MAX
self.assertEqual(ts.T_ULONG, ULONG_MAX)
@unittest.skipUnless(hasattr(ts, "T_LONGLONG"), "long long not present")
def test_longlong(self):
ts.T_LONGLONG = LLONG_MAX
self.assertEqual(ts.T_LONGLONG, LLONG_MAX)
ts.T_LONGLONG = LLONG_MIN
self.assertEqual(ts.T_LONGLONG, LLONG_MIN)
ts.T_ULONGLONG = ULLONG_MAX
self.assertEqual(ts.T_ULONGLONG, ULLONG_MAX)
## make sure these will accept a plain int as well as a long
ts.T_LONGLONG = 3
self.assertEqual(ts.T_LONGLONG, 3)
ts.T_ULONGLONG = 4
self.assertEqual(ts.T_ULONGLONG, 4)
def test_inplace_string(self):
self.assertEqual(ts.T_STRING_INPLACE, "hi")
self.assertRaises(TypeError, setattr, ts, "T_STRING_INPLACE", "s")
self.assertRaises(TypeError, delattr, ts, "T_STRING_INPLACE")
class TestWarnings(unittest.TestCase):
def test_byte_max(self):
with test_support.check_warnings(('', RuntimeWarning)):
ts.T_BYTE = CHAR_MAX+1
def test_byte_min(self):
with test_support.check_warnings(('', RuntimeWarning)):
ts.T_BYTE = CHAR_MIN-1
def test_ubyte_max(self):
with test_support.check_warnings(('', RuntimeWarning)):
ts.T_UBYTE = UCHAR_MAX+1
def test_short_max(self):
with test_support.check_warnings(('', RuntimeWarning)):
ts.T_SHORT = SHRT_MAX+1
def test_short_min(self):
with test_support.check_warnings(('', RuntimeWarning)):
ts.T_SHORT = SHRT_MIN-1
def test_ushort_max(self):
with test_support.check_warnings(('', RuntimeWarning)):
ts.T_USHORT = USHRT_MAX+1
def test_main(verbose=None):
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main(verbose=True)
| gpl-2.0 |
tensorflow/io | tensorflow_io/python/experimental/parse_avro_ops.py | 1 | 23257 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""parse_avro_ops"""
import collections
import re
import tensorflow as tf
import tensorflow_io
from tensorflow_io.python.ops import core_ops
# Adjusted from
# https://github.com/tensorflow/tensorflow/blob/v2.0.0/tensorflow/python/ops/parsing_ops.py
# Note, there are several changes to 2.1.0
# Only copied parts from `parse_example_v2` and `_parse_example_raw`
def parse_avro(serialized, reader_schema, features, avro_names=None, name=None):
"""
Parses `avro` records into a `dict` of tensors.
This op parses serialized avro records into a dictionary mapping keys to
`Tensor`, and `SparseTensor` objects. `features` is a dict from keys to
`VarLenFeature`, `SparseFeature`, `RaggedFeature`, and `FixedLenFeature`
objects. Each `VarLenFeature` and `SparseFeature` is mapped to a
`SparseTensor`; each `FixedLenFeature` is mapped to a `Tensor`.
Each `VarLenFeature` maps to a `SparseTensor` of the specified type
representing a ragged matrix. Its indices are `[batch, index]` where `batch`
identifies the example in `serialized`, and `index` is the value's index in
the list of values associated with that feature and example.
Each `SparseFeature` maps to a `SparseTensor` of the specified type
representing a Tensor of `dense_shape` `[batch_size] + SparseFeature.size`.
Its `values` come from the feature in the examples with key `value_key`.
A `values[i]` comes from a position `k` in the feature of an example at batch
entry `batch`. This positional information is recorded in `indices[i]` as
`[batch, index_0, index_1, ...]` where `index_j` is the `k-th` value of
the feature in the example at with key `SparseFeature.index_key[j]`.
In other words, we split the indices (except the first index indicating the
batch entry) of a `SparseTensor` by dimension into different features of the
avro record. Due to its complexity a `VarLenFeature` should be preferred
over a `SparseFeature` whenever possible.
Each `FixedLenFeature` `df` maps to a `Tensor` of the specified type (or
`tf.float32` if not specified) and shape `(serialized.size(),) + df.shape`.
`FixedLenFeature` entries with a `default_value` are optional. With no default
value, we will fail if that `Feature` is missing from any example in
`serialized`.
Use this within the dataset.map(parser_fn=parse_avro).
Only works for batched serialized input!
Args:
serialized: The batched, serialized string tensors.
reader_schema: The reader schema. Note, this MUST match the reader schema
from the avro_record_dataset. Otherwise, this op will segfault!
features: A map of feature names mapped to feature information.
avro_names: (Optional.) may contain descriptive names for the
corresponding serialized avro parts. These may be useful for debugging
purposes, but they have no effect on the output. If not `None`,
`avro_names` must be the same length as `serialized`.
name: The name of the op.
Returns:
A map of feature names to tensors.
"""
if not features:
raise ValueError("Missing: features was %s." % features)
features = _build_keys_for_sparse_features(features)
(
sparse_keys,
sparse_types,
sparse_ranks,
dense_keys,
dense_types,
dense_defaults,
dense_shapes,
) = _features_to_raw_params(
features,
[
tensorflow_io.experimental.columnar.VarLenFeatureWithRank,
tf.io.SparseFeature,
tf.io.FixedLenFeature,
],
)
outputs = _parse_avro(
serialized,
reader_schema,
avro_names,
sparse_keys,
sparse_types,
sparse_ranks,
dense_keys,
dense_types,
dense_defaults,
dense_shapes,
name,
)
return construct_tensors_for_composite_features(features, outputs)
def _parse_avro(
serialized,
reader_schema,
names=None,
sparse_keys=None,
sparse_types=None,
sparse_ranks=None,
dense_keys=None,
dense_types=None,
dense_defaults=None,
dense_shapes=None,
name=None,
avro_num_minibatches=0,
):
"""Parses Avro records.
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
reader_schema: A scalar string representing the reader schema.
names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos.
sparse_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `SparseTensor` objects.
sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
sparse_ranks: ranks of sparse feature. `tf.int64` (`Int64List`) is supported.
dense_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
dense_types: A list of DTypes of the same length as `dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the dense_keys of the feature.
dense_shapes: A list of tuples with the same length as `dense_keys`.
The shape of the data for each dense feature referenced by `dense_keys`.
Required for any input tensors identified by `dense_keys`. Must be
either fully defined, or may contain an unknown first dimension.
An unknown first dimension means the feature is treated as having
a variable number of blocks, and the output shape along this dimension
is considered unknown at graph build time. Padding is applied for
minibatch elements smaller than the maximum number of blocks for the
given feature along this dimension.
name: A name for this operation (optional).
Returns:
A `dict` mapping keys to `Tensor`s and `SparseTensor`s.
"""
with tf.name_scope(name or "ParseAvro"):
(
names,
dense_defaults_vec,
sparse_keys,
sparse_types,
dense_keys,
dense_shapes,
_,
) = _process_raw_parameters(
names,
dense_defaults,
sparse_keys,
sparse_types,
dense_keys,
dense_types,
dense_shapes,
)
outputs = core_ops.io_parse_avro(
serialized=serialized,
reader_schema=reader_schema,
names=names,
dense_defaults=dense_defaults_vec,
sparse_keys=sparse_keys,
sparse_types=sparse_types,
num_sparse=len(sparse_keys),
sparse_ranks=sparse_ranks,
dense_keys=dense_keys,
dense_shapes=dense_shapes,
name=name,
avro_num_minibatches=avro_num_minibatches,
)
(sparse_indices, sparse_values, sparse_shapes, dense_values) = outputs
sparse_tensors = [
tf.sparse.SparseTensor(ix, val, shape)
for (ix, val, shape) in zip(sparse_indices, sparse_values, sparse_shapes)
]
return dict(zip(sparse_keys + dense_keys, sparse_tensors + dense_values))
# Adjusted from
# https://github.com/tensorflow/tensorflow/blob/v2.0.0/tensorflow/python/ops/parsing_ops.py
# _prepend_none_dimension with the following changes
# - Removed the warning
# - Switched this to FixedLenFeature -- instead of FixedLenSequenceFeature
def _prepend_none_dimension(features):
"""prepend_none_dimension"""
if features:
modified_features = dict(features) # Create a copy to modify
for key, feature in features.items():
if isinstance(feature, tf.io.FixedLenFeature):
modified_features[key] = tf.io.FixedLenFeature(
[None] + list(feature.shape), feature.dtype, feature.default_value
)
return modified_features
return features
def _build_keys_for_sparse_features(features):
"""
Builds the fully qualified names for keys of sparse features.
Args:
features: A map of features with keys to TensorFlow features.
Returns:
A map of features where for the sparse feature
the 'index_key' and the 'value_key' have been expanded
properly for the parser in the native code.
"""
def resolve_key(parser_key, index_or_value_key):
if not index_or_value_key.startswith("@"):
return parser_key + "[*]." + index_or_value_key
return index_or_value_key[1:]
def resolve_index_key(key_, index_key):
if isinstance(index_key, list):
return [resolve_key(key_, index_key_) for index_key_ in index_key]
return resolve_key(key_, index_key)
if features:
modified_features = dict(features) # Create a copy to modify
# NOTE: We iterate over sorted keys to keep things deterministic.
for key, feature in features.items():
if isinstance(feature, tf.io.SparseFeature):
modified_features[key] = tf.io.SparseFeature(
index_key=resolve_index_key(key, feature.index_key),
value_key=resolve_key(key, feature.value_key),
dtype=feature.dtype,
size=feature.size,
already_sorted=feature.already_sorted,
)
return modified_features
return features
# adapted from https://github.com/tensorflow/tensorflow/blob/6d0f422525d8c1dd3184d39494abacd32b52a840/tensorflow/python/ops/parsing_config.py#L661 and skipped RaggedFeature part
def construct_tensors_for_composite_features(features, tensor_dict):
"""construct_tensors_for_composite_features"""
tensor_dict = dict(tensor_dict) # Do not modify argument passed in.
updates = {}
for key in sorted(features.keys()):
feature = features[key]
if isinstance(feature, tf.io.SparseFeature):
# Construct SparseTensors for SparseFeatures
if isinstance(feature.index_key, str):
sp_ids = tensor_dict[feature.index_key]
else:
sp_ids = [tensor_dict[index_key] for index_key in feature.index_key]
sp_values = tensor_dict[feature.value_key]
updates[key] = tf.compat.v1.sparse_merge(
sp_ids,
sp_values,
vocab_size=feature.size,
already_sorted=feature.already_sorted,
)
# Process updates after all composite tensors have been constructed (in case
# multiple features use the same value_key, and one uses that key as its
# feature key).
tensor_dict.update(updates)
# Remove tensors from dictionary that were only used to construct
# tensors for SparseFeature or RaggedTensor.
for key in set(tensor_dict) - set(features):
del tensor_dict[key]
return tensor_dict
# Pulled this method from tensorflow/python/ops/parsing_ops.py
# changed the following
# - removed FixedLenSequenceFeature
# - removed checks about None dimension in FixedLenFeature
# -- since it acts as FixedLenSequenceFeature, there is no need for both concepts
def _features_to_raw_params(features, types):
"""Split feature tuples into raw params used by `gen_parsing_ops`.
Args:
features: A `dict` mapping feature keys to objects of a type in `types`.
types: Type of features to allow, among `FixedLenFeature`, `VarLenFeature`,
`SparseFeature`, and `FixedLenSequenceFeature`.
Returns:
Tuple of `sparse_keys`, `sparse_types`, `dense_keys`, `dense_types`,
`dense_defaults`, `dense_shapes`.
Raises:
ValueError: if `features` contains an item not in `types`, or an invalid
feature.
"""
sparse_keys = []
sparse_types = []
sparse_ranks = []
dense_keys = []
dense_types = []
# When the graph is built twice, multiple dense_defaults in a normal dict
# could come out in different orders. This will fail the _e2e_test which
# expects exactly the same graph.
# OrderedDict which preserves the order can solve the problem.
dense_defaults = collections.OrderedDict()
dense_shapes = []
if features:
# NOTE: We iterate over sorted keys to keep things deterministic.
for key in sorted(features.keys()):
feature = features[key]
if isinstance(
feature, tensorflow_io.experimental.columnar.VarLenFeatureWithRank
):
_handle_varlen_feature(
feature, key, sparse_keys, sparse_types, sparse_ranks, types
)
elif isinstance(feature, tf.io.SparseFeature):
_handle_sparse_feature(
feature, key, sparse_keys, sparse_types, sparse_ranks, types
)
elif isinstance(feature, tf.io.FixedLenFeature):
_handle_fixedlen_feature(
dense_defaults,
dense_keys,
dense_shapes,
dense_types,
feature,
key,
types,
)
else:
raise ValueError("Invalid feature {}:{}.".format(key, feature))
return (
sparse_keys,
sparse_types,
sparse_ranks,
dense_keys,
dense_types,
dense_defaults,
dense_shapes,
)
def _handle_fixedlen_feature(
dense_defaults, dense_keys, dense_shapes, dense_types, feature, key, types
):
"""handle_fixedlen_feature"""
if tf.io.FixedLenFeature not in types:
raise ValueError("Unsupported FixedLenFeature {}.".format(feature))
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
if feature.shape is None:
raise ValueError("Missing shape for feature %s." % key)
dense_keys.append(key)
dense_shapes.append(feature.shape)
dense_types.append(feature.dtype)
if feature.default_value is not None:
dense_defaults[key] = feature.default_value
def _handle_sparse_feature(
feature, key, sparse_keys, sparse_types, sparse_ranks, types
):
"""handle_sparse_feature"""
if tf.io.SparseFeature not in types:
raise ValueError("Unsupported SparseFeature {}.".format(feature))
if not feature.index_key:
raise ValueError("Missing index_key for SparseFeature {}.".format(feature))
if not feature.value_key:
raise ValueError("Missing value_key for SparseFeature {}.".format(feature))
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
index_keys = feature.index_key
if isinstance(index_keys, str):
index_keys = [index_keys]
elif len(index_keys) > 1:
tf.get_logger().warning(
"SparseFeature is a complicated feature config "
"and should only be used after careful "
"consideration of VarLenFeature."
)
for index_key in sorted(index_keys):
if index_key in sparse_keys:
dtype = sparse_types[sparse_keys.index(index_key)]
if dtype != tf.int64:
raise ValueError(
"Conflicting type {} vs int64 for feature {}.".format(
dtype, index_key
)
)
else:
sparse_keys.append(index_key)
sparse_types.append(tf.int64)
# sparse features always have rank 1 because they encode the indices separately (one for each component) and then merge these before the user get's them.
# setting 1 here is merely achieving the same behavior as before.
sparse_ranks.append(1)
if feature.value_key in sparse_keys:
dtype = sparse_types[sparse_keys.index(feature.value_key)]
if dtype != feature.dtype:
raise ValueError(
"Conflicting type %s vs %s for feature %s."
% (dtype, feature.dtype, feature.value_key)
)
else:
sparse_keys.append(feature.value_key)
sparse_types.append(feature.dtype)
# sparse features always have rank 1 because they encode the indices separately (one for each component) and then merge these before the user get's them.
# setting 1 here is merely achieving the same behavior as before.
sparse_ranks.append(1)
def _handle_varlen_feature(
feature, key, sparse_keys, sparse_types, sparse_ranks, types
):
"""handle_varlen_feature"""
if tensorflow_io.experimental.columnar.VarLenFeatureWithRank not in types:
raise ValueError("Unsupported VarLenFeatureWithRank {}.".format(feature))
if not feature.dtype:
raise ValueError("Missing type for VarLenFeatureWithRank %s." % key)
if not feature.rank:
raise ValueError("Missing rank for VarLenFeatureWithRank %s." % key)
sparse_keys.append(key)
sparse_types.append(feature.dtype)
sparse_ranks.append(feature.rank)
# Pulled this method from tensorflow/python/ops/parsing_ops.py
# here to customize the handling of default values because
# we have
# - more types
# - handling had to change because we don't have a batch dimension when
# calling this method
def _process_raw_parameters(
names,
dense_defaults,
sparse_keys,
sparse_types,
dense_keys,
dense_types,
dense_shapes,
):
"""Process raw parameters to params used by `gen_parsing_ops`.
Args:
names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos.
dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the dense_keys of the feature.
sparse_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `SparseTensor` objects.
sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
dense_types: A list of DTypes of the same length as `dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_shapes: A list of tuples with the same length as `dense_keys`.
The shape of the data for each dense feature referenced by `dense_keys`.
Required for any input tensors identified by `dense_keys`. Must be
either fully defined, or may contain an unknown first dimension.
An unknown first dimension means the feature is treated as having
a variable number of blocks, and the output shape along this dimension
is considered unknown at graph build time. Padding is applied for
minibatch elements smaller than the maximum number of blocks for the
given feature along this dimension.
Returns:
Tuple of `names`, `dense_defaults_vec`, `sparse_keys`, `sparse_types`,
`dense_keys`, `dense_shapes`.
Raises:
ValueError: If sparse and dense key sets intersect, or input lengths do not
match up.
"""
names = [] if names is None else names
dense_defaults = (
collections.OrderedDict() if dense_defaults is None else dense_defaults
)
sparse_keys = [] if sparse_keys is None else sparse_keys
sparse_types = [] if sparse_types is None else sparse_types
dense_keys = [] if dense_keys is None else dense_keys
dense_types = [] if dense_types is None else dense_types
dense_shapes = [[]] * len(dense_keys) if dense_shapes is None else dense_shapes
num_dense = len(dense_keys)
num_sparse = len(sparse_keys)
if len(dense_shapes) != num_dense:
raise ValueError(
"len(dense_shapes) != len(dense_keys): %d vs. %d"
% (len(dense_shapes), num_dense)
)
if len(dense_types) != num_dense:
raise ValueError(
"len(dense_types) != len(num_dense): %d vs. %d"
% (len(dense_types), num_dense)
)
if len(sparse_types) != num_sparse:
raise ValueError(
"len(sparse_types) != len(sparse_keys): %d vs. %d"
% (len(sparse_types), num_sparse)
)
if num_dense + num_sparse == 0:
raise ValueError("Must provide at least one sparse key or dense key")
if not set(dense_keys).isdisjoint(set(sparse_keys)):
raise ValueError(
"Dense and sparse keys must not intersect; intersection: %s"
% set(dense_keys).intersection(set(sparse_keys))
)
# Convert dense_shapes to TensorShape object.
dense_shapes = [tf.TensorShape(shape) for shape in dense_shapes]
dense_defaults_vec = []
for i, key in enumerate(dense_keys):
default_value = dense_defaults.get(key)
dense_shape = dense_shapes[i]
# Whenever the user did not provide a default, set it
# ************* START difference: This part is different from the originally copied code ***************
if default_value is None:
default_value = tf.constant([], dtype=dense_types[i])
elif not isinstance(default_value, tf.Tensor):
key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = tf.convert_to_tensor(
default_value, dtype=dense_types[i], name=key_name
)
# If we have a shape and the first dimension is not None
if dense_shape.rank and dense_shape.dims[0].value:
default_value = tf.reshape(default_value, dense_shape)
# ************* END difference: This part is different from the originally copied code *****************
dense_defaults_vec.append(default_value)
# Finally, convert dense_shapes to TensorShapeProto
dense_shapes_as_proto = [shape.as_proto() for shape in dense_shapes]
return (
names,
dense_defaults_vec,
sparse_keys,
sparse_types,
dense_keys,
dense_shapes_as_proto,
dense_shapes,
)
| apache-2.0 |
MikeMarcin/UIforETW | LabScripts/ETWPMCDemo/etwpmc_parser.py | 2 | 8792 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The data created by recording CPU performance counters (Pmc) for each context switch looks something like this:
CSwitch, TimeStamp, New Process Name ( PID), New TID, NPri, NQnt, TmSinceLast, WaitTime, Old Process Name ( PID), Old TID, OPri, OQnt, OldState, Wait Reason, Swapable, InSwitchTime, CPU, IdealProc, OldRemQnt, NewPriDecr, PrevCState
Pmc, TimeStamp, ThreadID, BranchInstructions, BranchMispredictions
...
CSwitch, 64630, Idle ( 0), 0, 0, -1, 0, 0, tracelog.exe (5200), 5912, 9, -1, Standby, WrPreempted, NonSwap, 6, 6, 7, 14768128, 0, 0
CSwitch, 64631, tracelog.exe (5200), 5912, 9, -1, 1, 1, RuntimeBroker.exe (3896), 7720, 8, -1, Ready, WrDispatchInt, NonSwap, 6, 7, 7, 32640000, 0, 0
CSwitch, 64648, MsMpEng.exe (3016), 13212, 8, -1, 19604, 2, Idle ( 0), 0, 0, -1, Running, Executive, NonSwap, 1465, 0, 0, 0, 0, 1
Pmc, 64662, 7720, 41066, 6977
CSwitch, 64662, RuntimeBroker.exe (3896), 7720, 8, -1, 31, 0, MsMpEng.exe (3016), 13212, 8, -1, Waiting, WrQueue, Swapable, 14, 0, 4, 68564992, 0, 0
Pmc, 64690, 0, 6723, 1485
CSwitch, 64690, Idle ( 0), 0, 0, -1, 0, 0, tracelog.exe (5200), 5912, 9, -1, Waiting, Executive, NonSwap, 59, 7, 2, 14640128, 0, 0
Pmc, 64693, 7904, 34481, 3028
CSwitch, 64693, conhost.exe (8148), 7904, 11, -1, 4243, 1, Idle ( 0), 0, 0, -1, Running, Executive, NonSwap, 1407, 2, 2, 0, 2, 1
Pmc, 64704, 0, 36020, 3267
CSwitch, 64704, Idle ( 0), 0, 0, -1, 0, 0, conhost.exe (8148), 7904, 11, -1, Waiting, UserRequest, Swapable, 12, 2, 6, 202464256, 0, 0
Pmc, 64710, 5912, 7077, 1518
CSwitch, 64710, tracelog.exe (5200), 5912, 9, -1, 19, 0, Idle ( 0), 0, 0, -1, Running, Executive, NonSwap, 19, 7, 7, 0, 0, 1
A few things can be observed about the data.
The Pmc data takes a while to get going - there can be thousands of CSwitch events
before the first Pmc event. Awesome.
The Pmc events are cumulative and per-processor. This explains why they increase over the
duration of the trace, but not monotonically. They only increase monotonically if you look
at them on a particular CPU.
The Pmc events are associated with the following event. This can be seen in the CSwitch at TimeStamp
64704. This CSwitch is on CPU 2 and the following Pmc has a BranchInstructions count of 7077, which
is incompatible with the previous CSwitch which is also on CPU 2.
The CSwitch events are when a thread *starts* executing. So, you don't know what counts to associate
with a timeslice until the *next* context switch on that CPU. So...
When a Pmc event is seen, look for a CSwitch event on the next line. If this is not the first Pmc/CSwitch
pair for this CPU (see column 16) then calculate the deltas for all of the Pmc counters and add those
deltas to the process listed in the Old Process Name ( PID) column (column 8).
Sometimes there will be an Error: message inbetween the Pmc and CSwitch lines. Ignore those, but don't
be too forgiving about what you parse or else you may end up calculating garbage results.
Example:
Pmc, 2428274, 84, 45813769, 2146039
Error: Description for thread state (9) could not be found. Thread state array out of date!!
CSwitch, 2428274, System ( 4), 84, 23, -1, 220, 0, csrss.exe ( 628), 732, 14, -1, <out of range>, WrProcessInSwap, Swapable, 19, 2, 4, 68552704, 0, 0
"""
import sys
if len(sys.argv) <= 1:
print 'Usage: %s xperfoutput [processname]' % sys.argv[0]
print 'The first parameter is the name of a file containing the results'
print 'of "xperf -i trace.etl". The second (optional) parameter is a'
print 'process name substring filter used to restrict which results are'
print 'shown - only processes that match are displayed.'
sys.exit(0)
xperfoutputfilename = sys.argv[1]
l = open(xperfoutputfilename).readlines()
# Scan through the counter data looking for Pmc and CSwitch records.
# If adjacent records are found that contain Pmc and CSwitch data then
# combine the data. This gives us some counters that we can assign to
# a particular CPU. If we have already seen counters for that CPU then
# we can subtract the previous counters to get a delta.
# That delta can then be applied to the process that was *previously*
# assigned to that CPU.
lastLineByCPU = {}
countersByCPU = {}
lastCSwitchTimeByCPU = {}
countersByProcess = {}
contextSwitchesByProcess = {}
cpuTimeByProcess = {}
processByCPU = {} # Which process has been switched in to a particular CPU
description = None
for x in range(len(l) - 1):
if l[x].startswith(" Pmc,"):
pmc_parts = l[x].split(",")
if not description:
# Grab the description of the Pmc counter records, see how many counters
# there are, and print the description.
num_counters = len(pmc_parts) - 3
description = l[x].strip()
print description
continue
counters = map(int, pmc_parts[3:])
# Look for a CSwitch line. Ideally it will be next, but sometimes an Error: line
# might be in-between.
cswitch_line = ""
if l[x+1].startswith(" CSwitch,"):
cswitch_line = l[x+1]
elif l[x+1].startswith("Error: ") and l[x+2].startswith(" CSwitch,"):
cswitch_line = l[x+2]
if cswitch_line:
cswitch_parts = cswitch_line.split(",")
CPU = int(cswitch_parts[16].strip())
process = cswitch_parts[2].strip()
timeStamp = int(cswitch_parts[1])
# See if we've got previous Pmc records for this CPU:
if countersByCPU.has_key(CPU):
diffs = map(lambda a,b : a - b, counters, countersByCPU[CPU])
old_process = cswitch_parts[8].strip()
# Sanity checking...
if old_process != processByCPU[CPU]:
print "Old process mismatch at line %d, %s versus %s" % (x, old_process, processByCPU[CPU])
sys.exit(0)
if old_process != "Idle ( 0)":
countersByProcess[old_process] = map(lambda x, y: x + y, countersByProcess.get(old_process, num_counters * [0]), diffs)
contextSwitchesByProcess[old_process] = contextSwitchesByProcess.get(old_process, 0) + 1
cpuTimeByProcess[old_process] = cpuTimeByProcess.get(old_process, 0) + (timeStamp - lastCSwitchTimeByCPU[CPU])
lastCSwitchTimeByCPU[CPU] = timeStamp
processByCPU[CPU] = process
countersByCPU[CPU] = counters
lastLineByCPU[CPU] = x
else:
print "Missing cswitch line at line %d" % x
sys.exit(0)
print "%43s: counter1/counter2, counters" % "Process name"
for process in countersByProcess.keys():
totals = countersByProcess[process]
if totals[0] > 100000: # Arbitrary filtering
# Filter to the specific process substring if requested.
if len(sys.argv) == 2 or process.lower().count(sys.argv[2].lower()) > 0:
print "%43s: %5.2f%%, %s, %d context switches, time: %d" % (process, totals[0] * 100.0 / totals[1], totals, contextSwitchesByProcess[process], cpuTimeByProcess[process])
| apache-2.0 |
100health/RedoxBlog | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/shell.py | 287 | 15340 | # -*- coding: utf-8 -*-
"""
pygments.lexers.shell
~~~~~~~~~~~~~~~~~~~~~
Lexers for various shells.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, include
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Generic
from pygments.util import shebang_matches
__all__ = ['BashLexer', 'BashSessionLexer', 'TcshLexer', 'BatchLexer',
'PowerShellLexer', 'ShellSessionLexer']
line_re = re.compile('.*?\n')
class BashLexer(RegexLexer):
"""
Lexer for (ba|k|)sh shell scripts.
*New in Pygments 0.6.*
"""
name = 'Bash'
aliases = ['bash', 'sh', 'ksh']
filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass',
'.bashrc', 'bashrc', '.bash_*', 'bash_*']
mimetypes = ['application/x-sh', 'application/x-shellscript']
tokens = {
'root': [
include('basic'),
(r'\$\(\(', Keyword, 'math'),
(r'\$\(', Keyword, 'paren'),
(r'\${#?', Keyword, 'curly'),
(r'`', String.Backtick, 'backticks'),
include('data'),
],
'basic': [
(r'\b(if|fi|else|while|do|done|for|then|return|function|case|'
r'select|continue|until|esac|elif)\s*\b',
Keyword),
(r'\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|'
r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|'
r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|'
r'local|logout|popd|printf|pushd|pwd|read|readonly|set|shift|'
r'shopt|source|suspend|test|time|times|trap|true|type|typeset|'
r'ulimit|umask|unalias|unset|wait)\s*\b(?!\.)',
Name.Builtin),
(r'#.*\n', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]', Operator),
(r'<<<', Operator), # here-string
(r'<<-?\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r'&&|\|\|', Operator),
],
'data': [
(r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)\$?'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r';', Punctuation),
(r'&', Punctuation),
(r'\|', Punctuation),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
(r'<', Text),
],
'curly': [
(r'}', Keyword, '#pop'),
(r':-', Keyword),
(r'[a-zA-Z0-9_]+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'math': [
(r'\)\)', Keyword, '#pop'),
(r'[-+*/%^|&]|\*\*|\|\|', Operator),
(r'\d+', Number),
include('root'),
],
'backticks': [
(r'`', String.Backtick, '#pop'),
include('root'),
],
}
def analyse_text(text):
if shebang_matches(text, r'(ba|z|)sh'):
return 1
if text.startswith('$ '):
return 0.2
class BashSessionLexer(Lexer):
"""
Lexer for simplistic shell sessions.
*New in Pygments 1.1.*
"""
name = 'Bash Session'
aliases = ['console']
filenames = ['*.sh-session']
mimetypes = ['application/x-shell-session']
def get_tokens_unprocessed(self, text):
bashlexer = BashLexer(**self.options)
pos = 0
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = re.match(r'^((?:\(\S+\))?(?:|sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)'
r'?|\[\S+[@:][^\n]+\].+)[$#%])(.*\n?)' , line)
if m:
# To support output lexers (say diff output), the output
# needs to be broken by prompts whenever the output lexer
# changes.
if not insertions:
pos = match.start()
insertions.append((len(curcode),
[(0, Generic.Prompt, m.group(1))]))
curcode += m.group(2)
elif line.startswith('>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:1])]))
curcode += line[1:]
else:
if insertions:
toks = bashlexer.get_tokens_unprocessed(curcode)
for i, t, v in do_insertions(insertions, toks):
yield pos+i, t, v
yield match.start(), Generic.Output, line
insertions = []
curcode = ''
if insertions:
for i, t, v in do_insertions(insertions,
bashlexer.get_tokens_unprocessed(curcode)):
yield pos+i, t, v
class ShellSessionLexer(Lexer):
"""
Lexer for shell sessions that works with different command prompts
*New in Pygments 1.6.*
"""
name = 'Shell Session'
aliases = ['shell-session']
filenames = ['*.shell-session']
mimetypes = ['application/x-sh-session']
def get_tokens_unprocessed(self, text):
bashlexer = BashLexer(**self.options)
pos = 0
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = re.match(r'^((?:\[?\S+@[^$#%]+)[$#%])(.*\n?)', line)
if m:
# To support output lexers (say diff output), the output
# needs to be broken by prompts whenever the output lexer
# changes.
if not insertions:
pos = match.start()
insertions.append((len(curcode),
[(0, Generic.Prompt, m.group(1))]))
curcode += m.group(2)
else:
if insertions:
toks = bashlexer.get_tokens_unprocessed(curcode)
for i, t, v in do_insertions(insertions, toks):
yield pos+i, t, v
yield match.start(), Generic.Output, line
insertions = []
curcode = ''
if insertions:
for i, t, v in do_insertions(insertions,
bashlexer.get_tokens_unprocessed(curcode)):
yield pos+i, t, v
class BatchLexer(RegexLexer):
"""
Lexer for the DOS/Windows Batch file format.
*New in Pygments 0.7.*
"""
name = 'Batchfile'
aliases = ['bat', 'dosbatch', 'winbatch']
filenames = ['*.bat', '*.cmd']
mimetypes = ['application/x-dos-batch']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Lines can start with @ to prevent echo
(r'^\s*@', Punctuation),
(r'^(\s*)(rem\s.*)$', bygroups(Text, Comment)),
(r'".*?"', String.Double),
(r"'.*?'", String.Single),
# If made more specific, make sure you still allow expansions
# like %~$VAR:zlt
(r'%%?[~$:\w]+%?', Name.Variable),
(r'::.*', Comment), # Technically :: only works at BOL
(r'(set)(\s+)(\w+)', bygroups(Keyword, Text, Name.Variable)),
(r'(call)(\s+)(:\w+)', bygroups(Keyword, Text, Name.Label)),
(r'(goto)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)),
(r'\b(set|call|echo|on|off|endlocal|for|do|goto|if|pause|'
r'setlocal|shift|errorlevel|exist|defined|cmdextversion|'
r'errorlevel|else|cd|md|del|deltree|cls|choice)\b', Keyword),
(r'\b(equ|neq|lss|leq|gtr|geq)\b', Operator),
include('basic'),
(r'.', Text),
],
'echo': [
# Escapes only valid within echo args?
(r'\^\^|\^<|\^>|\^\|', String.Escape),
(r'\n', Text, '#pop'),
include('basic'),
(r'[^\'"^]+', Text),
],
'basic': [
(r'".*?"', String.Double),
(r"'.*?'", String.Single),
(r'`.*?`', String.Backtick),
(r'-?\d+', Number),
(r',', Punctuation),
(r'=', Operator),
(r'/\S+', Name),
(r':\w+', Name.Label),
(r'\w:\w+', Text),
(r'([<>|])(\s*)(\w+)', bygroups(Punctuation, Text, Name)),
],
}
class TcshLexer(RegexLexer):
"""
Lexer for tcsh scripts.
*New in Pygments 0.10.*
"""
name = 'Tcsh'
aliases = ['tcsh', 'csh']
filenames = ['*.tcsh', '*.csh']
mimetypes = ['application/x-csh']
tokens = {
'root': [
include('basic'),
(r'\$\(', Keyword, 'paren'),
(r'\${#?', Keyword, 'curly'),
(r'`', String.Backtick, 'backticks'),
include('data'),
],
'basic': [
(r'\b(if|endif|else|while|then|foreach|case|default|'
r'continue|goto|breaksw|end|switch|endsw)\s*\b',
Keyword),
(r'\b(alias|alloc|bg|bindkey|break|builtins|bye|caller|cd|chdir|'
r'complete|dirs|echo|echotc|eval|exec|exit|fg|filetest|getxvers|'
r'glob|getspath|hashstat|history|hup|inlib|jobs|kill|'
r'limit|log|login|logout|ls-F|migrate|newgrp|nice|nohup|notify|'
r'onintr|popd|printenv|pushd|rehash|repeat|rootnode|popd|pushd|'
r'set|shift|sched|setenv|setpath|settc|setty|setxvers|shift|'
r'source|stop|suspend|source|suspend|telltc|time|'
r'umask|unalias|uncomplete|unhash|universe|unlimit|unset|unsetenv|'
r'ver|wait|warp|watchlog|where|which)\s*\b',
Name.Builtin),
(r'#.*\n', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\]+', Text),
(r'\d+(?= |\Z)', Number),
(r'\$#?(\w+|.)', Name.Variable),
],
'curly': [
(r'}', Keyword, '#pop'),
(r':-', Keyword),
(r'[a-zA-Z0-9_]+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
'backticks': [
(r'`', String.Backtick, '#pop'),
include('root'),
],
}
class PowerShellLexer(RegexLexer):
"""
For Windows PowerShell code.
*New in Pygments 1.5.*
"""
name = 'PowerShell'
aliases = ['powershell', 'posh', 'ps1', 'psm1']
filenames = ['*.ps1','*.psm1']
mimetypes = ['text/x-powershell']
flags = re.DOTALL | re.IGNORECASE | re.MULTILINE
keywords = (
'while validateset validaterange validatepattern validatelength '
'validatecount until trap switch return ref process param parameter in '
'if global: function foreach for finally filter end elseif else '
'dynamicparam do default continue cmdletbinding break begin alias \\? '
'% #script #private #local #global mandatory parametersetname position '
'valuefrompipeline valuefrompipelinebypropertyname '
'valuefromremainingarguments helpmessage try catch throw').split()
operators = (
'and as band bnot bor bxor casesensitive ccontains ceq cge cgt cle '
'clike clt cmatch cne cnotcontains cnotlike cnotmatch contains '
'creplace eq exact f file ge gt icontains ieq ige igt ile ilike ilt '
'imatch ine inotcontains inotlike inotmatch ireplace is isnot le like '
'lt match ne not notcontains notlike notmatch or regex replace '
'wildcard').split()
verbs = (
'write where wait use update unregister undo trace test tee take '
'suspend stop start split sort skip show set send select scroll resume '
'restore restart resolve resize reset rename remove register receive '
'read push pop ping out new move measure limit join invoke import '
'group get format foreach export expand exit enter enable disconnect '
'disable debug cxnew copy convertto convertfrom convert connect '
'complete compare clear checkpoint aggregate add').split()
commenthelp = (
'component description example externalhelp forwardhelpcategory '
'forwardhelptargetname functionality inputs link '
'notes outputs parameter remotehelprunspace role synopsis').split()
tokens = {
'root': [
# we need to count pairs of parentheses for correct highlight
# of '$(...)' blocks in strings
(r'\(', Punctuation, 'child'),
(r'\s+', Text),
(r'^(\s*#[#\s]*)(\.(?:%s))([^\n]*$)' % '|'.join(commenthelp),
bygroups(Comment, String.Doc, Comment)),
(r'#[^\n]*?$', Comment),
(r'(<|<)#', Comment.Multiline, 'multline'),
(r'@"\n', String.Heredoc, 'heredoc-double'),
(r"@'\n.*?\n'@", String.Heredoc),
# escaped syntax
(r'`[\'"$@-]', Punctuation),
(r'"', String.Double, 'string'),
(r"'([^']|'')*'", String.Single),
(r'(\$|@@|@)((global|script|private|env):)?[a-z0-9_]+',
Name.Variable),
(r'(%s)\b' % '|'.join(keywords), Keyword),
(r'-(%s)\b' % '|'.join(operators), Operator),
(r'(%s)-[a-z_][a-z0-9_]*\b' % '|'.join(verbs), Name.Builtin),
(r'\[[a-z_\[][a-z0-9_. `,\[\]]*\]', Name.Constant), # .net [type]s
(r'-[a-z_][a-z0-9_]*', Name),
(r'\w+', Name),
(r'[.,;@{}\[\]$()=+*/\\&%!~?^`|<>-]|::', Punctuation),
],
'child': [
(r'\)', Punctuation, '#pop'),
include('root'),
],
'multline': [
(r'[^#&.]+', Comment.Multiline),
(r'#(>|>)', Comment.Multiline, '#pop'),
(r'\.(%s)' % '|'.join(commenthelp), String.Doc),
(r'[#&.]', Comment.Multiline),
],
'string': [
(r"`[0abfnrtv'\"\$]", String.Escape),
(r'[^$`"]+', String.Double),
(r'\$\(', Punctuation, 'child'),
(r'""', String.Double),
(r'[`$]', String.Double),
(r'"', String.Double, '#pop'),
],
'heredoc-double': [
(r'\n"@', String.Heredoc, '#pop'),
(r'\$\(', Punctuation, 'child'),
(r'[^@\n]+"]', String.Heredoc),
(r".", String.Heredoc),
]
}
| mit |
marclaporte/fail2ban | fail2ban/tests/clientreadertestcase.py | 2 | 21517 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
__author__ = "Cyril Jaquier, Yaroslav Halchenko"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2011-2013 Yaroslav Halchenko"
__license__ = "GPL"
import os, glob, shutil, tempfile, unittest
from ..client.configreader import ConfigReader
from ..client.jailreader import JailReader
from ..client.filterreader import FilterReader
from ..client.jailsreader import JailsReader
from ..client.actionreader import ActionReader
from ..client.configurator import Configurator
from .utils import LogCaptureTestCase
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), "files")
STOCK = os.path.exists(os.path.join('config','fail2ban.conf'))
CONFIG_DIR='config' if STOCK else '/etc/fail2ban'
IMPERFECT_CONFIG = os.path.join(os.path.dirname(__file__), 'config')
class ConfigReaderTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.d = tempfile.mkdtemp(prefix="f2b-temp")
self.c = ConfigReader(basedir=self.d)
def tearDown(self):
"""Call after every test case."""
shutil.rmtree(self.d)
def _write(self, fname, value=None, content=None):
# verify if we don't need to create .d directory
if os.path.sep in fname:
d = os.path.dirname(fname)
d_ = os.path.join(self.d, d)
if not os.path.exists(d_):
os.makedirs(d_)
f = open("%s/%s" % (self.d, fname), "w")
if value is not None:
f.write("""
[section]
option = %s
""" % value)
if content is not None:
f.write(content)
f.close()
def _remove(self, fname):
os.unlink("%s/%s" % (self.d, fname))
self.assertTrue(self.c.read('c')) # we still should have some
def _getoption(self, f='c'):
self.assertTrue(self.c.read(f)) # we got some now
return self.c.getOptions('section', [("int", 'option')])['option']
def testInaccessibleFile(self):
f = os.path.join(self.d, "d.conf") # inaccessible file
self._write('d.conf', 0)
self.assertEqual(self._getoption('d'), 0)
os.chmod(f, 0)
# fragile test and known to fail e.g. under Cygwin where permissions
# seems to be not enforced, thus condition
if not os.access(f, os.R_OK):
self.assertFalse(self.c.read('d')) # should not be readable BUT present
else:
# SkipTest introduced only in 2.7 thus can't yet use generally
# raise unittest.SkipTest("Skipping on %s -- access rights are not enforced" % platform)
pass
def testOptionalDotDDir(self):
self.assertFalse(self.c.read('c')) # nothing is there yet
self._write("c.conf", "1")
self.assertEqual(self._getoption(), 1)
self._write("c.conf", "2") # overwrite
self.assertEqual(self._getoption(), 2)
self._write("c.d/98.conf", "998") # add 1st override in .d/
self.assertEqual(self._getoption(), 998)
self._write("c.d/90.conf", "990") # add previously sorted override in .d/
self.assertEqual(self._getoption(), 998) # should stay the same
self._write("c.d/99.conf", "999") # now override in a way without sorting we possibly get a failure
self.assertEqual(self._getoption(), 999)
self._write("c.local", "3") # add override in .local
self.assertEqual(self._getoption(), 3)
self._write("c.d/1.local", "4") # add override in .local
self.assertEqual(self._getoption(), 4)
self._remove("c.d/1.local")
self._remove("c.local")
self.assertEqual(self._getoption(), 999)
self._remove("c.d/99.conf")
self.assertEqual(self._getoption(), 998)
self._remove("c.d/98.conf")
self.assertEqual(self._getoption(), 990)
self._remove("c.d/90.conf")
self.assertEqual(self._getoption(), 2)
def testInterpolations(self):
self.assertFalse(self.c.read('i')) # nothing is there yet
self._write("i.conf", value=None, content="""
[DEFAULT]
b = a
zz = the%(__name__)s
[section]
y = 4%(b)s
e = 5${b}
z = %(__name__)s
[section2]
z = 3%(__name__)s
""")
self.assertTrue(self.c.read('i'))
self.assertEqual(self.c.sections(), ['section', 'section2'])
self.assertEqual(self.c.get('section', 'y'), '4a') # basic interpolation works
self.assertEqual(self.c.get('section', 'e'), '5${b}') # no extended interpolation
self.assertEqual(self.c.get('section', 'z'), 'section') # __name__ works
self.assertEqual(self.c.get('section', 'zz'), 'thesection') # __name__ works even 'delayed'
self.assertEqual(self.c.get('section2', 'z'), '3section2') # and differs per section ;)
def testComments(self):
self.assertFalse(self.c.read('g')) # nothing is there yet
self._write("g.conf", value=None, content="""
[DEFAULT]
# A comment
b = a
c = d ;in line comment
""")
self.assertTrue(self.c.read('g'))
self.assertEqual(self.c.get('DEFAULT', 'b'), 'a')
self.assertEqual(self.c.get('DEFAULT', 'c'), 'd')
class JailReaderTest(LogCaptureTestCase):
def testIncorrectJail(self):
jail = JailReader('XXXABSENTXXX', basedir=CONFIG_DIR)
self.assertRaises(ValueError, jail.read)
def testJailActionEmpty(self):
jail = JailReader('emptyaction', basedir=IMPERFECT_CONFIG)
self.assertTrue(jail.read())
self.assertTrue(jail.getOptions())
self.assertTrue(jail.isEnabled())
self.assertTrue(self._is_logged('No filter set for jail emptyaction'))
self.assertTrue(self._is_logged('No actions were defined for emptyaction'))
def testJailActionFilterMissing(self):
jail = JailReader('missingbitsjail', basedir=IMPERFECT_CONFIG)
self.assertTrue(jail.read())
self.assertFalse(jail.getOptions())
self.assertTrue(jail.isEnabled())
self.assertTrue(self._is_logged("Found no accessible config files for 'filter.d/catchallthebadies' under %s" % IMPERFECT_CONFIG))
self.assertTrue(self._is_logged('Unable to read the filter'))
def TODOtestJailActionBrokenDef(self):
jail = JailReader('brokenactiondef', basedir=IMPERFECT_CONFIG)
self.assertTrue(jail.read())
self.assertFalse(jail.getOptions())
self.assertTrue(jail.isEnabled())
self.printLog()
self.assertTrue(self._is_logged('Error in action definition joho[foo'))
self.assertTrue(self._is_logged('Caught exception: While reading action joho[foo we should have got 1 or 2 groups. Got: 0'))
if STOCK:
def testStockSSHJail(self):
jail = JailReader('sshd', basedir=CONFIG_DIR) # we are running tests from root project dir atm
self.assertTrue(jail.read())
self.assertTrue(jail.getOptions())
self.assertFalse(jail.isEnabled())
self.assertEqual(jail.getName(), 'sshd')
jail.setName('ssh-funky-blocker')
self.assertEqual(jail.getName(), 'ssh-funky-blocker')
def testSplitOption(self):
# Simple example
option = "mail-whois[name=SSH]"
expected = ('mail-whois', {'name': 'SSH'})
result = JailReader.extractOptions(option)
self.assertEqual(expected, result)
self.assertEqual(('mail.who_is', {}), JailReader.extractOptions("mail.who_is"))
self.assertEqual(('mail.who_is', {'a':'cat', 'b':'dog'}), JailReader.extractOptions("mail.who_is[a=cat,b=dog]"))
self.assertEqual(('mail--ho_is', {}), JailReader.extractOptions("mail--ho_is"))
self.assertEqual(('mail--ho_is', {}), JailReader.extractOptions("mail--ho_is['s']"))
#self.printLog()
#self.assertTrue(self._is_logged("Invalid argument ['s'] in ''s''"))
self.assertEqual(('mail', {'a': ','}), JailReader.extractOptions("mail[a=',']"))
#self.assertRaises(ValueError, JailReader.extractOptions ,'mail-how[')
# Empty option
option = "abc[]"
expected = ('abc', {})
result = JailReader.extractOptions(option)
self.assertEqual(expected, result)
# More complex examples
option = 'option[opt01=abc,opt02="123",opt03="with=okay?",opt04="andwith,okay...",opt05="how about spaces",opt06="single\'in\'double",opt07=\'double"in"single\', opt08= leave some space, opt09=one for luck, opt10=, opt11=]'
expected = ('option', {
'opt01': "abc",
'opt02': "123",
'opt03': "with=okay?",
'opt04': "andwith,okay...",
'opt05': "how about spaces",
'opt06': "single'in'double",
'opt07': "double\"in\"single",
'opt08': "leave some space",
'opt09': "one for luck",
'opt10': "",
'opt11': "",
})
result = JailReader.extractOptions(option)
self.assertEqual(expected, result)
def testGlob(self):
d = tempfile.mkdtemp(prefix="f2b-temp")
# Generate few files
# regular file
f1 = os.path.join(d, 'f1')
open(f1, 'w').close()
# dangling link
f2 = os.path.join(d, 'f2')
os.symlink('nonexisting',f2)
# must be only f1
self.assertEqual(JailReader._glob(os.path.join(d, '*')), [f1])
# since f2 is dangling -- empty list
self.assertEqual(JailReader._glob(f2), [])
self.assertTrue(self._is_logged('File %s is a dangling link, thus cannot be monitored' % f2))
self.assertEqual(JailReader._glob(os.path.join(d, 'nonexisting')), [])
os.remove(f1)
os.remove(f2)
os.rmdir(d)
class FilterReaderTest(unittest.TestCase):
def testConvert(self):
output = [['set', 'testcase01', 'addfailregex',
"^\\s*(?:\\S+ )?(?:kernel: \\[\\d+\\.\\d+\\] )?(?:@vserver_\\S+ )"
"?(?:(?:\\[\\d+\\])?:\\s+[\\[\\(]?sshd(?:\\(\\S+\\))?[\\]\\)]?:?|"
"[\\[\\(]?sshd(?:\\(\\S+\\))?[\\]\\)]?:?(?:\\[\\d+\\])?:)?\\s*(?:"
"error: PAM: )?Authentication failure for .* from <HOST>\\s*$"],
['set', 'testcase01', 'addfailregex',
"^\\s*(?:\\S+ )?(?:kernel: \\[\\d+\\.\\d+\\] )?(?:@vserver_\\S+ )"
"?(?:(?:\\[\\d+\\])?:\\s+[\\[\\(]?sshd(?:\\(\\S+\\))?[\\]\\)]?:?|"
"[\\[\\(]?sshd(?:\\(\\S+\\))?[\\]\\)]?:?(?:\\[\\d+\\])?:)?\\s*(?:"
"error: PAM: )?User not known to the underlying authentication mo"
"dule for .* from <HOST>\\s*$"],
['set', 'testcase01', 'addfailregex',
"^\\s*(?:\\S+ )?(?:kernel: \\[\\d+\\.\\d+\\] )?(?:@vserver_\\S+ )"
"?(?:(?:\\[\\d+\\])?:\\s+[\\[\\(]?sshd(?:\\(\\S+\\))?[\\]\\)]?:?|"
"[\\[\\(]?sshd(?:\\(\\S+\\))?[\\]\\)]?:?(?:\\[\\d+\\])?:)?\\s*(?:"
"error: PAM: )?User not known to the\\nunderlying authentication."
"+$<SKIPLINES>^.+ module for .* from <HOST>\\s*$"],
['set', 'testcase01', 'addignoreregex',
"^.+ john from host 192.168.1.1\\s*$"],
['set', 'testcase01', 'addjournalmatch',
"_COMM=sshd", "+", "_SYSTEMD_UNIT=sshd.service", "_UID=0"],
['set', 'testcase01', 'addjournalmatch',
"FIELD= with spaces ", "+", "AFIELD= with + char and spaces"],
['set', 'testcase01', 'datepattern', "%Y %m %d %H:%M:%S"],
['set', 'testcase01', 'maxlines', "1"], # Last for overide test
]
filterReader = FilterReader("testcase01", "testcase01", {})
filterReader.setBaseDir(TEST_FILES_DIR)
filterReader.read()
#filterReader.getOptions(["failregex", "ignoreregex"])
filterReader.getOptions(None)
# Add sort as configreader uses dictionary and therefore order
# is unreliable
self.assertEqual(sorted(filterReader.convert()), sorted(output))
filterReader = FilterReader(
"testcase01", "testcase01", {'maxlines': "5"})
filterReader.setBaseDir(TEST_FILES_DIR)
filterReader.read()
#filterReader.getOptions(["failregex", "ignoreregex"])
filterReader.getOptions(None)
output[-1][-1] = "5"
self.assertEqual(sorted(filterReader.convert()), sorted(output))
def testFilterReaderSubstitionDefault(self):
output = [['set', 'jailname', 'addfailregex', '[email protected] fromip=<IP>']]
filterReader = FilterReader('substition', "jailname", {})
filterReader.setBaseDir(TEST_FILES_DIR)
filterReader.read()
filterReader.getOptions(None)
c = filterReader.convert()
self.assertEqual(sorted(c), sorted(output))
def testFilterReaderSubstitionSet(self):
output = [['set', 'jailname', 'addfailregex', '[email protected] fromip=<IP>']]
filterReader = FilterReader('substition', "jailname", {'honeypot': '[email protected]'})
filterReader.setBaseDir(TEST_FILES_DIR)
filterReader.read()
filterReader.getOptions(None)
c = filterReader.convert()
self.assertEqual(sorted(c), sorted(output))
def testFilterReaderSubstitionFail(self):
filterReader = FilterReader('substition', "jailname", {'honeypot': '<sweet>', 'sweet': '<honeypot>'})
filterReader.setBaseDir(TEST_FILES_DIR)
filterReader.read()
filterReader.getOptions(None)
self.assertRaises(ValueError, FilterReader.convert, filterReader)
class JailsReaderTest(LogCaptureTestCase):
def testProvidingBadBasedir(self):
if not os.path.exists('/XXX'):
reader = JailsReader(basedir='/XXX')
self.assertRaises(ValueError, reader.read)
def testReadTestJailConf(self):
jails = JailsReader(basedir=IMPERFECT_CONFIG)
self.assertTrue(jails.read())
self.assertFalse(jails.getOptions())
self.assertRaises(ValueError, jails.convert)
comm_commands = jails.convert(allow_no_files=True)
self.maxDiff = None
self.assertEqual(sorted(comm_commands),
sorted([['add', 'emptyaction', 'auto'],
['add', 'missinglogfiles', 'auto'],
['set', 'missinglogfiles', 'addfailregex', '<IP>'],
['add', 'brokenaction', 'auto'],
['set', 'brokenaction', 'addfailregex', '<IP>'],
['set', 'brokenaction', 'addaction', 'brokenaction'],
['set',
'brokenaction',
'action',
'brokenaction',
'actionban',
'hit with big stick <ip>'],
['add', 'parse_to_end_of_jail.conf', 'auto'],
['set', 'parse_to_end_of_jail.conf', 'addfailregex', '<IP>'],
['start', 'emptyaction'],
['start', 'missinglogfiles'],
['start', 'brokenaction'],
['start', 'parse_to_end_of_jail.conf'],]))
self.assertTrue(self._is_logged("Errors in jail 'missingbitsjail'. Skipping..."))
self.assertTrue(self._is_logged("No file(s) found for glob /weapons/of/mass/destruction"))
if STOCK:
def testReadStockJailConf(self):
jails = JailsReader(basedir=CONFIG_DIR) # we are running tests from root project dir atm
self.assertTrue(jails.read()) # opens fine
self.assertTrue(jails.getOptions()) # reads fine
comm_commands = jails.convert()
# by default None of the jails is enabled and we get no
# commands to communicate to the server
self.assertEqual(comm_commands, [])
# TODO: make sure this is handled well
## We should not "read" some bogus jail
#old_comm_commands = comm_commands[:] # make a copy
#self.assertRaises(ValueError, jails.getOptions, "BOGUS")
#self.printLog()
#self.assertTrue(self._is_logged("No section: 'BOGUS'"))
## and there should be no side-effects
#self.assertEqual(jails.convert(), old_comm_commands)
allFilters = set()
# All jails must have filter and action set
# TODO: evolve into a parametric test
for jail in jails.sections():
if jail == 'INCLUDES':
continue
filterName = jails.get(jail, 'filter')
allFilters.add(filterName)
self.assertTrue(len(filterName))
# moreover we must have a file for it
# and it must be readable as a Filter
filterReader = FilterReader(filterName, jail, {})
filterReader.setBaseDir(CONFIG_DIR)
self.assertTrue(filterReader.read(),"Failed to read filter:" + filterName) # opens fine
filterReader.getOptions({}) # reads fine
# test if filter has failregex set
self.assertTrue(filterReader._opts.get('failregex', '').strip())
actions = jails.get(jail, 'action')
self.assertTrue(len(actions.strip()))
# somewhat duplicating here what is done in JailsReader if
# the jail is enabled
for act in actions.split('\n'):
actName, actOpt = JailReader.extractOptions(act)
self.assertTrue(len(actName))
self.assertTrue(isinstance(actOpt, dict))
if actName == 'iptables-multiport':
self.assertTrue('port' in actOpt)
actionReader = ActionReader(
actName, jail, {}, basedir=CONFIG_DIR)
self.assertTrue(actionReader.read())
actionReader.getOptions({}) # populate _opts
cmds = actionReader.convert()
self.assertTrue(len(cmds))
# all must have some actionban
self.assertTrue(actionReader._opts.get('actionban', '').strip())
# Verify that all filters found under config/ have a jail
def testReadStockJailFilterComplete(self):
jails = JailsReader(basedir=CONFIG_DIR, force_enable=True)
self.assertTrue(jails.read()) # opens fine
self.assertTrue(jails.getOptions()) # reads fine
# grab all filter names
filters = set(os.path.splitext(os.path.split(a)[1])[0]
for a in glob.glob(os.path.join('config', 'filter.d', '*.conf'))
if not a.endswith('common.conf'))
filters_jail = set(jail.options['filter'] for jail in jails.jails)
self.maxDiff = None
self.assertTrue(filters.issubset(filters_jail),
"More filters exists than are referenced in stock jail.conf %r" % filters.difference(filters_jail))
self.assertTrue(filters_jail.issubset(filters),
"Stock jail.conf references non-existent filters %r" % filters_jail.difference(filters))
def testReadStockJailConfForceEnabled(self):
# more of a smoke test to make sure that no obvious surprises
# on users' systems when enabling shipped jails
jails = JailsReader(basedir=CONFIG_DIR, force_enable=True) # we are running tests from root project dir atm
self.assertTrue(jails.read()) # opens fine
self.assertTrue(jails.getOptions()) # reads fine
comm_commands = jails.convert(allow_no_files=True)
# by default we have lots of jails ;)
self.assertTrue(len(comm_commands))
# and we know even some of them by heart
for j in ['sshd', 'recidive']:
# by default we have 'auto' backend ATM
self.assertTrue(['add', j, 'auto'] in comm_commands)
# and warn on useDNS
self.assertTrue(['set', j, 'usedns', 'warn'] in comm_commands)
self.assertTrue(['start', j] in comm_commands)
# last commands should be the 'start' commands
self.assertEqual(comm_commands[-1][0], 'start')
for j in jails._JailsReader__jails:
actions = j._JailReader__actions
jail_name = j.getName()
# make sure that all of the jails have actions assigned,
# otherwise it makes little to no sense
self.assertTrue(len(actions),
msg="No actions found for jail %s" % jail_name)
# Test for presence of blocktype (in relation to gh-232)
for action in actions:
commands = action.convert()
action_name = action.getName()
if '<blocktype>' in str(commands):
# Verify that it is among cInfo
self.assertTrue('blocktype' in action._initOpts)
# Verify that we have a call to set it up
blocktype_present = False
target_command = ['set', jail_name, 'action', action_name, 'blocktype']
for command in commands:
if (len(command) > 5 and
command[:5] == target_command):
blocktype_present = True
continue
self.assertTrue(
blocktype_present,
msg="Found no %s command among %s"
% (target_command, str(commands)) )
def testStockConfigurator(self):
configurator = Configurator()
configurator.setBaseDir(CONFIG_DIR)
self.assertEqual(configurator.getBaseDir(), CONFIG_DIR)
configurator.readEarly()
opts = configurator.getEarlyOptions()
# our current default settings
self.assertEqual(opts['socket'], '/var/run/fail2ban/fail2ban.sock')
self.assertEqual(opts['pidfile'], '/var/run/fail2ban/fail2ban.pid')
configurator.getOptions()
configurator.convertToProtocol()
commands = configurator.getConfigStream()
# and there is logging information left to be passed into the
# server
self.assertEqual(sorted(commands),
[['set', 'dbfile',
'/var/lib/fail2ban/fail2ban.sqlite3'],
['set', 'dbpurgeage', 86400],
['set', 'loglevel', "INFO"],
['set', 'logtarget', '/var/log/fail2ban.log']])
# and if we force change configurator's fail2ban's baseDir
# there should be an error message (test visually ;) --
# otherwise just a code smoke test)
configurator._Configurator__jails.setBaseDir('/tmp')
self.assertEqual(configurator._Configurator__jails.getBaseDir(), '/tmp')
self.assertEqual(configurator.getBaseDir(), CONFIG_DIR)
def testMultipleSameAction(self):
basedir = tempfile.mkdtemp("fail2ban_conf")
os.mkdir(os.path.join(basedir, "filter.d"))
os.mkdir(os.path.join(basedir, "action.d"))
open(os.path.join(basedir, "action.d", "testaction1.conf"), 'w').close()
open(os.path.join(basedir, "filter.d", "testfilter1.conf"), 'w').close()
jailfd = open(os.path.join(basedir, "jail.conf"), 'w')
jailfd.write("""
[testjail1]
enabled = true
action = testaction1[actname=test1]
testaction1[actname=test2]
testaction.py
testaction.py[actname=test3]
filter = testfilter1
""")
jailfd.close()
jails = JailsReader(basedir=basedir)
self.assertTrue(jails.read())
self.assertTrue(jails.getOptions())
comm_commands = jails.convert(allow_no_files=True)
add_actions = [comm[3:] for comm in comm_commands
if comm[:3] == ['set', 'testjail1', 'addaction']]
self.assertEqual(len(set(action[0] for action in add_actions)), 4)
# Python actions should not be passed `actname`
self.assertEqual(add_actions[-1][-1], "{}")
shutil.rmtree(basedir)
| gpl-2.0 |
mlperf/training_results_v0.6 | Google/benchmarks/ssd/implementations/tpu-v3-1024-ssd/ssd/topk_mask.py | 6 | 6592 | # Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Efficient implementation of topk_mask for TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def topk_mask(score, k):
"""Efficient implementation of topk_mask for TPUs.
This is a more efficient implementation of the following snippet with support
for higher rank tensors. It has the limitation that it only supports float32
as element type. The mask only contains k elements even if other elements
have the same value as the kth largest.
def topk_mask(score, k):
_, indices = tf.nn.top_k(score, k=k)
return tf.scatter_nd(tf.expand_dims(indices, -1), tf.ones(k),
tf.squeeze(score).shape.as_list())
The implementation binary searches for the kth value along each row of the
input and once the kth value is found it creates the mask via a single select
instruction. This approach is more than 100x faster on TPUs for large inputs
compared with the above snippet.
Args:
score: 1-D or higher Tensor with last dimension at least k.
k: Number of top elements to look for along the last dimension (along each
row for matrices).
"""
last_dim_size = score.get_shape().as_list()[-1]
# Choose top k+epsilon where epsilon is the number of times the k'th largest i
# element is present in the input.
topk_mask_with_duplicate = topk_mask_internal(score, k)
# Calculate the number of redudant duplicate values to discard.
select_num = tf.cast(
tf.reduce_sum(topk_mask_with_duplicate, axis=-1, keepdims=True), tf.int32)
redudant_num = select_num - k
# softmax cross entropy value range [0, 1].
# k's largest value is the smallest value being selected.
k_th_value = tf.reduce_min(
tf.where(
tf.cast(topk_mask_with_duplicate, tf.bool), score,
tf.ones_like(score) * 2.0),
axis=-1,
keepdims=True)
# Mask to indicate if score equals k th largest value.
equal_k_th_value = tf.equal(score, k_th_value)
# Creates a tensor wherer the value is 1 if the value is equal to kth largest
# value, otherwise, 0.
k_th_value = tf.where(equal_k_th_value, tf.ones_like(score, dtype=tf.int32),
tf.zeros_like(score, dtype=tf.int32))
index = tf.range(last_dim_size)
k_th_value_index = tf.multiply(k_th_value, index)
duplicate_mask = topk_mask_internal(
tf.cast(k_th_value_index, tf.float32), redudant_num)
return tf.where(
tf.cast(duplicate_mask, tf.bool), tf.zeros_like(topk_mask_with_duplicate),
topk_mask_with_duplicate)
def topk_mask_internal(score, k):
"""Efficient implementation of topk_mask for TPUs.
This is a more efficient implementation of the following snippet with support
for higher rank tensors. It has the limitation that it only supports float32
as element type. The mask may contain more than k elements if other elements
have the same value as the kth largest.
The implementation binary searches for the kth value along each row of the
input and once the kth value is found it creates the mask via a single select
instruction. This approach is more than 100x faster on TPUs for large inputs
compared with the above snippet.
Args:
score: 1-D or higher Tensor with last dimension at least k.
k: Number of top elements to look for along the last dimension (along each
row for matrices).
"""
def larger_count(data, limit):
"""Number of elements larger than limit along the most minor dimension.
Args:
data: Rn tensor with the data to compare.
limit: Rn tensor with last dimension being 1 and rest of the dimensions
being same as for data.
Returns:
Rn tensor with same shape as limit and int32 as element type containing
the number of elements larger then limit inside data.
"""
return tf.reduce_sum(
tf.cast(data > tf.broadcast_to(limit, data.shape), tf.int32),
axis=-1, keepdims=True)
# Predicate specifying if the kth value is negative or positive.
kth_negative = (larger_count(score, 0.0) < k)
# Value of the sign bit for each row.
limit_sign = tf.where(kth_negative,
tf.broadcast_to(1, kth_negative.shape),
tf.broadcast_to(0, kth_negative.shape))
# Initial value for the binary search with the sign bit set.
next_value = tf.bitwise.left_shift(limit_sign, 31)
def cond(bit_index, _):
return bit_index >= 0
def body(bit_index, value):
"""Body for the while loop executing the binary search.
Args:
bit_index: Index of the bit to be updated next.
value: Current value of the binary search separator. Stored as an int32
but bitcasted to a float32 for comparison.
Returns:
The updated value of bit_index and value
"""
# Calculate new value via `new_value = value | (1 << bit_index)`
new_value = tf.bitwise.bitwise_or(
value, tf.bitwise.left_shift(1, bit_index))
# Calculate number of values larger than new_value
larger = larger_count(score, tf.bitcast(new_value, tf.float32))
# Update next_value based on new_value. For positive numbers new_value is
# larger than value while for negative numbers it is the other way around.
next_value = tf.where(tf.logical_xor(larger >= k, kth_negative),
new_value, value)
return bit_index - 1, next_value
# Executes a binary search for the value of the limits. We run the loop 31
# times to calculate the 31 bits of the float32 value (the sign is calculated
# separately).
_, limit = tf.while_loop(cond, body, (30, next_value))
# Create a mask by comparing the individual values to the kth value and then
# selecting zero or one accordingly.
return tf.where(
score >= tf.broadcast_to(tf.bitcast(limit, tf.float32), score.shape),
tf.ones(score.shape), tf.zeros(score.shape))
| apache-2.0 |
RuudBurger/CouchPotatoServer | libs/guessit/transfo/guess_language.py | 94 | 1946 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import Guess
from guessit.transfo import SingleNodeGuesser
from guessit.language import search_language
import logging
log = logging.getLogger(__name__)
def guess_language(string, node, skip=None):
if skip:
relative_skip = []
for entry in skip:
node_idx = entry['node_idx']
span = entry['span']
if node_idx == node.node_idx[:len(node_idx)]:
relative_span = (span[0] - node.offset + 1, span[1] - node.offset + 1)
relative_skip.append(relative_span)
skip = relative_skip
language, span, confidence = search_language(string, skip=skip)
if language:
return (Guess({'language': language},
confidence=confidence,
raw= string[span[0]:span[1]]),
span)
return None, None
guess_language.use_node = True
def process(mtree, *args, **kwargs):
SingleNodeGuesser(guess_language, None, log, *args, **kwargs).process(mtree)
# Note: 'language' is promoted to 'subtitleLanguage' in the post_process transfo
| gpl-3.0 |
WhySoGeeky/DroidPot | venv/lib/python2.7/site-packages/sphinx/util/osutil.py | 4 | 5151 | # -*- coding: utf-8 -*-
"""
sphinx.util.osutil
~~~~~~~~~~~~~~~~~~
Operating system-related utility functions for Sphinx.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import re
import sys
import time
import errno
import locale
import shutil
from os import path
import contextlib
from six import PY2, text_type
# Errnos that we need.
EEXIST = getattr(errno, 'EEXIST', 0)
ENOENT = getattr(errno, 'ENOENT', 0)
EPIPE = getattr(errno, 'EPIPE', 0)
EINVAL = getattr(errno, 'EINVAL', 0)
# SEP separates path elements in the canonical file names
#
# Define SEP as a manifest constant, not so much because we expect it to change
# in the future as to avoid the suspicion that a stray "/" in the code is a
# hangover from more *nix-oriented origins.
SEP = "/"
def os_path(canonicalpath):
return canonicalpath.replace(SEP, path.sep)
def relative_uri(base, to):
"""Return a relative URL from ``base`` to ``to``."""
if to.startswith(SEP):
return to
b2 = base.split(SEP)
t2 = to.split(SEP)
# remove common segments (except the last segment)
for x, y in zip(b2[:-1], t2[:-1]):
if x != y:
break
b2.pop(0)
t2.pop(0)
if b2 == t2:
# Special case: relative_uri('f/index.html','f/index.html')
# returns '', not 'index.html'
return ''
if len(b2) == 1 and t2 == ['']:
# Special case: relative_uri('f/index.html','f/') should
# return './', not ''
return '.' + SEP
return ('..' + SEP) * (len(b2)-1) + SEP.join(t2)
def ensuredir(path):
"""Ensure that a path exists."""
try:
os.makedirs(path)
except OSError as err:
# 0 for Jython/Win32
if err.errno not in [0, EEXIST]:
raise
# This function is same as os.walk of Python2.6, 2.7, 3.2, 3.3 except a
# customization that check UnicodeError.
# The customization obstacle to replace the function with the os.walk.
def walk(top, topdown=True, followlinks=False):
"""Backport of os.walk from 2.6, where the *followlinks* argument was
added.
"""
names = os.listdir(top)
dirs, nondirs = [], []
for name in names:
try:
fullpath = path.join(top, name)
except UnicodeError:
print('%s:: ERROR: non-ASCII filename not supported on this '
'filesystem encoding %r, skipped.' % (name, fs_encoding),
file=sys.stderr)
continue
if path.isdir(fullpath):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
fullpath = path.join(top, name)
if followlinks or not path.islink(fullpath):
for x in walk(fullpath, topdown, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
def mtimes_of_files(dirnames, suffix):
for dirname in dirnames:
for root, dirs, files in os.walk(dirname):
for sfile in files:
if sfile.endswith(suffix):
try:
yield path.getmtime(path.join(root, sfile))
except EnvironmentError:
pass
def movefile(source, dest):
"""Move a file, removing the destination if it exists."""
if os.path.exists(dest):
try:
os.unlink(dest)
except OSError:
pass
os.rename(source, dest)
def copytimes(source, dest):
"""Copy a file's modification times."""
st = os.stat(source)
if hasattr(os, 'utime'):
os.utime(dest, (st.st_atime, st.st_mtime))
def copyfile(source, dest):
"""Copy a file and its modification times, if possible."""
shutil.copyfile(source, dest)
try:
# don't do full copystat because the source may be read-only
copytimes(source, dest)
except OSError:
pass
no_fn_re = re.compile(r'[^a-zA-Z0-9_-]')
def make_filename(string):
return no_fn_re.sub('', string) or 'sphinx'
if PY2:
# strftime for unicode strings
def ustrftime(format, *args):
# if a locale is set, the time strings are encoded in the encoding
# given by LC_TIME; if that is available, use it
enc = locale.getlocale(locale.LC_TIME)[1] or 'utf-8'
return time.strftime(text_type(format).encode(enc), *args).decode(enc)
else:
ustrftime = time.strftime
def safe_relpath(path, start=None):
try:
return os.path.relpath(path, start)
except ValueError:
return path
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
def abspath(pathdir):
pathdir = path.abspath(pathdir)
if isinstance(pathdir, bytes):
pathdir = pathdir.decode(fs_encoding)
return pathdir
def getcwd():
if hasattr(os, 'getcwdu'):
return os.getcwdu()
return os.getcwd()
@contextlib.contextmanager
def cd(target_dir):
cwd = getcwd()
try:
os.chdir(target_dir)
yield
finally:
os.chdir(cwd)
| mit |
NationalSecurityAgency/ghidra | Ghidra/Extensions/SleighDevTools/pcodetest/pcode_defs.py | 1 | 18035 | ## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# The available pcode tests are recorded here as instances of the 'name'
# python class.
PCodeTest({
'name': 'ARM',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-arm',
'toolchain': 'ARM/arm-eabi',
'language_id': 'ARM:LE:32:v7',
'ccflags': '-L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s -lgcc',
})
PCodeTest({
'name': 'ARM_BE',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-armbe',
'toolchain': 'ARM/armbe-eabi',
'language_id': 'ARM:BE:32:v7',
'ccflags': '-mbig-endian -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s -lgcc',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
})
PCodeTest({
'name': 'ARM2',
'toolchain': 'ARM/arm-eabi',
'ccflags': '-mcpu=arm2 -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s -lgcc',
'language_id': 'ARM:LE:32:v7',
})
PCodeTest({
'name': 'ARMv5',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-arm',
'toolchain': 'ARM/arm-eabi',
'language_id': 'ARM:LE:32:v5',
'ccflags': '-march=armv5 -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s -lgcc',
})
PCodeTest({
'name': 'ARM7',
'toolchain': 'ARM/arm-eabi',
'ccflags': '-mcpu=arm7 -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s -lgcc',
'language_id': 'ARM:LE:32:v7',
})
PCodeTest({
'name': 'ARM8',
'toolchain': 'ARM/arm-eabi',
'ccflags': '-mcpu=arm8 -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s -lgcc',
'language_id': 'ARM:LE:32:v7',
})
PCodeTest({
'name': 'ARM9',
'toolchain': 'ARM/arm-eabi',
'ccflags': '-mcpu=arm9 -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s -lgcc',
'language_id': 'ARM:LE:32:v7',
})
PCodeTest({
'name': 'ARM10e',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-arm',
'toolchain': 'ARM/arm-eabi',
'ccflags': '-mcpu=arm10e -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s -lgcc',
'language_id': 'ARM:LE:32:v7',
})
PCodeTest({
'name': 'ARM_thumb',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-arm -cpu cortex-a8',
'toolchain': 'ARM/arm-eabi',
'ccflags': '-mthumb -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s/thumb -lgcc',
'language_id': 'ARM:LE:32:v7',
})
PCodeTest({
'name': 'ARM_BE_thumb',
'build_all': 1,
'toolchain': 'ARM/armbe-eabi',
'ccflags': '-mthumb -mbig-endian -L %(toolchain_dir)s/lib/gcc/armbe-eabi/%(gcc_version)s/thumb -lgcc',
'language_id': 'ARM:BE:32:v7',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
})
PCodeTest({
'name': 'ARM_cortex',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-arm -cpu cortex-a8',
'toolchain': 'ARM/arm-eabi',
'ccflags': '-mthumb -mcpu=cortex-a8 -mfloat-abi=softfp -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s/thumb -lgcc',
'language_id': 'ARM:LE:32:v7',
})
PCodeTest({
'name': 'AARCH64',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-aarch64',
'toolchain': 'ARM/aarch64-elf',
'language_id': 'AARCH64:LE:64:v8A',
})
PCodeTest({
'name': 'AARCH64_ILP32',
'toolchain': 'ARM/aarch64-elf',
'ccflags': '-mabi=ilp32',
'language_id': 'AARCH64:LE:64:v8A',
})
PCodeTest({
'name': 'AARCH64_BE',
'build_all': 1,
'toolchain': 'ARM/aarch64_be-elf',
'language_id': 'AARCH64:BE:64:v8A',
})
PCodeTest({
'name': 'AARCH64_BE_ILP32',
'toolchain': 'ARM/aarch64_be-elf',
'ccflags': '-mabi=ilp32',
'language_id': 'AARCH64:BE:64:v8A',
})
PCodeTest({
'name': 'AVR',
'build_all': 1,
'toolchain': 'AVR/avr-elf',
'ccflags': '-mmcu=avr6 -lgcc',
'language_id': 'avr32:BE:32:default',
'processor': 'Atmel',
'has_float': 0,
'has_double': 0,
})
PCodeTest({
'name': 'AVR8_31',
'toolchain': 'AVR/avr-elf',
'ccflags': '-mmcu=avr31 -lgcc',
'language_id': 'avr8:LE:16:default',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
'small_build': 1,
})
PCodeTest({
'name': 'AVR8_51',
'toolchain': 'AVR/avr-elf',
'ccflags': '-mmcu=avr51 -lgcc',
'language_id': 'avr8:LE:16:extended',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
'small_build': 1,
})
PCodeTest({
'name': 'AVR8_6',
'toolchain': 'AVR/avr-elf',
'ccflags': '-mmcu=avr6 -lgcc',
'language_id': 'avr8:LE:16:atmega256',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
'small_build': 1,
})
PCodeTest({
'name': 'HCS12',
'toolchain': 'HCS12/m6812',
'language_id': 'HCS12:BE:16:default',
})
PCodeTest({
'name': 'HPPA1.1',
'build_all': 1,
'toolchain': 'HPPA/hppa-linux',
'ccflags': '-march=1.1 -static -mlong-calls -L %(toolchain_dir)s/lib/gcc/hppa-linux/%(gcc_version)s -lgcc',
'language_id': 'pa-risc:BE:32:default',
'processor': 'PA-RISC',
'architecture_test': 'PARISC',
})
# Note that libgcc.a was built for m68020 which has a different function calling convention from pre-68020
PCodeTest({
'name': 'm68000',
'build_all': 1,
'build_exe': 0,
'qemu_command': 'qemu-m68k', # qemu: fatal: Illegal instruction
'toolchain': 'm68k/m68k-elf',
'ccflags': '-mcpu=68020 -m68020 -L %(toolchain_dir)s/lib/gcc/m68k-elf/%(gcc_version)s -lgcc',
'language_id': '68000:BE:32:default',
})
PCodeTest({
'name': 'MIPS',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-mips',
'toolchain': 'MIPS/mips-elf',
'ccflags': '-L %(toolchain_dir)s/lib/gcc/mips-mti-elf/%(gcc_version)s -lgcc -mno-gpopt',
'language_id': 'MIPS:BE:32:default',
})
PCodeTest({
'name': 'MIPSEL',
'build_all': 1,
'build_exe': 1,
'toolchain': 'MIPS/mips-elf',
'ccflags': '-L %(toolchain_dir)s/lib/gcc/mips-mti-elf/%(gcc_version)s/el -lgcc -mno-gpopt -mel',
'language_id': 'MIPS:LE:32:default',
})
PCodeTest({
'name': 'MIPS16',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-mips',
'toolchain': 'MIPS/mips-elf',
'ccflags': '-mno-gpopt',
'language_id': 'MIPS:BE:32:default',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
})
PCodeTest({
'name': 'MIPS16MIX',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-mips',
'toolchain': 'MIPS/mips-elf',
'ccflags': '-mno-gpopt',
'language_id': 'MIPS:BE:32:default',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
})
PCodeTest({
'name': 'MIPSMIC',
'build_all': 1,
'toolchain': 'MIPS/mips-elf',
'ccflags': '-mmicromips -L %(toolchain_dir)s/lib/gcc/mips-mti-elf/%(gcc_version)s/micromips -lgcc',
'language_id': 'MIPS:BE:32:micro',
'architecture_test': 'MIPSMICRO',
})
PCodeTest({
'name': 'MIPSMICMIX',
'build_all': 1,
'toolchain': 'MIPS/mips-elf',
'ccflags': '-minterlink-compressed -D BODYNEW=micromips -L %(toolchain_dir)s/lib/gcc/mips-mti-elf/%(gcc_version)s/micromips -lgcc',
'language_id': 'MIPS:BE:32:micro',
'architecture_test': 'MIPSMICROMIX',
})
PCodeTest({
'name': 'MIPSMIC64',
'build_all': 1,
'toolchain': 'MIPS/mipsr6-elf',
'ccflags': '-mips64r5 -mmicromips -minterlink-compressed',
'language_id': 'MIPS:BE:64:micro',
})
PCodeTest({
'name': 'MIPS64_32addr',
'build_all': 1,
'toolchain': 'MIPS/mipsr6-elf',
'ccflags': '-mips64r2',
'language_id': 'MIPS:BE:64:64-32addr',
})
PCodeTest({
'name': 'MIPS64_64addr',
'build_all': 1,
'toolchain': 'MIPS/mipsr6-elf',
'ccflags': '-mips64r2 -mabi=64',
'language_id': 'MIPS:BE:64:64-64addr',
})
PCodeTest({
'name': 'MIPS64_64addrLE',
'build_all': 1,
'toolchain': 'MIPS/mipsr6-elf',
'ccflags': '-mips64r2 -mabi=64 -EL',
'language_id': 'MIPS:LE:64:64-64addr',
})
PCodeTest({
'name': 'MIPSR6',
'build_all': 1,
'toolchain': 'MIPS/mipsr6-elf',
'ccflags': '-mips32r6 -L %(toolchain_dir)s/lib/gcc/mips-mti-elf/%(gcc_version)s -lgcc',
'language_id': 'MIPS:BE:32:R6',
})
PCodeTest({
'name': 'MIPS64R6',
'build_all': 1,
'toolchain': 'MIPS/mipsr6-elf',
'ccflags': '-mips64r6 -mabi=64',
'language_id': 'MIPS:BE:64:R6',
})
PCodeTest({
'name': 'NDS32BE',
'build_all': 1,
'toolchain': 'NDS32/nds32be-elf',
'ccflags': '-L %(toolchain_dir)s/lib/gcc/nds32be-linux-elf/%(gcc_version)s -lgcc',
'language_id': 'NDS32:BE:32:default',
})
PCodeTest({
'name': 'NDS32LE',
'build_all': 1,
'toolchain': 'NDS32/nds32le-elf',
'ccflags': '-L %(toolchain_dir)s/lib/gcc/nds32le-linux-elf/%(gcc_version)s -lgcc',
'language_id': 'NDS32:LE:32:default',
})
PCodeTest({
'name': 'power6',
'toolchain': 'PPC/powerpc-elf',
'ccflags': '-mcpu=G5 -m32 -mno-relocatable -L %(toolchain_dir)s/lib/gcc/powerpc-elf/%(gcc_version)s -lgcc',
'language_id': 'PowerPC:BE:32:default',
})
PCodeTest({
'name': 'powerpc32',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-ppc64abi32',
'toolchain': 'PPC/powerpc-elf',
'ccflags': '-mcpu=powerpc -m32 -maltivec -mno-relocatable -L %(toolchain_dir)s/lib/gcc/powerpc-elf/%(gcc_version)s -lgcc',
'language_id': 'PowerPC:BE:32:default',
'architecture_test': 'PPC',
})
PCodeTest({
'name': 'powerpc64',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-ppc64',
'toolchain': 'PPC/powerpc64-linux',
'ccflags': '-mabi=elfv1 -maltivec -mno-relocatable -L %(toolchain_dir)s/lib/gcc/powerpc-elf/%(gcc_version)s -lgcc',
'language_id': 'PowerPC:BE:64:default',
'architecture_test': 'PPC64',
})
PCodeTest({
'name': 'powerpc64v2',
'toolchain': 'PPC/powerpc64-linux',
'ccflags': '-mabi=elfv2 -maltivec -mno-relocatable -L %(toolchain_dir)s/lib/gcc/powerpc-elf/%(gcc_version)s -lgcc',
'language_id': 'PowerPC:BE:64:default',
})
PCodeTest({
'name': 'ppcA2',
'build_all': 1,
'toolchain': 'PPC/powerpc-elf',
'ccflags': '-mcpu=a2 -L %(toolchain_dir)s/lib/gcc/powerpc-elf/%(gcc_version)s -lgcc',
'language_id': 'PowerPC:BE:32:A2',
'architecture_test': 'PPCA2',
})
PCodeTest({
'name': 'ppcA2Alt',
'build_all': 1,
'toolchain': 'PPC/powerpc-elf',
'ccflags': '-mcpu=a2 -maltivec -L %(toolchain_dir)s/lib/gcc/powerpc-elf/%(gcc_version)s -lgcc',
'language_id': 'PowerPC:BE:32:A2ALT',
'architecture_test': 'PPCA2Alt',
})
PCodeTest({
'name': 'ppcP8Alt',
'build_all': 1,
'toolchain': 'PPC/powerpc-elf',
'ccflags': '-mcpu=power8 -mvsx -maltivec -L %(toolchain_dir)s/lib/gcc/powerpc-elf/%(gcc_version)s -lgcc',
'language_id': 'PowerPC:BE:32:A2ALT',
'architecture_test': 'PPCP8Alt',
})
PCodeTest({
'name': 'ppcP9Alt',
'build_all': 1,
'toolchain': 'PPC/powerpc-elf',
'ccflags': '-mcpu=power9 -mvsx -maltivec -L %(toolchain_dir)s/lib/gcc/powerpc-elf/%(gcc_version)s -lgcc',
'language_id': 'PowerPC:BE:32:A2ALT',
'architecture_test': 'PPCP9Alt',
})
PCodeTest({
'name': 'msp430x',
'build_all': 1,
'toolchain': 'TI/msp430-elf',
'ccflags': '-g -mmcu=msp430x -mlarge -mhwmult=none -fno-builtin -Wl,-T,msp430x.ld -L %(toolchain_dir)s/lib/gcc/msp430-elf/%(gcc_version)s/large/ -lgcc -lmul_none',
'language_id': 'TI_MSP430X:LE:32:default',
'processor': 'TI',
'architecture_test': 'MSP430X',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
'small_build': 1,
'skip_files': ['PointerManipulation.test', 'misc.test'],
})
PCodeTest({
'name': 'SH4',
'build_all': 1,
'build_exe': 0,
'qemu_command': 'qemu-sh4eb', # qemu gets "Invalid argument" error
'toolchain': 'SuperH4/sh4-elf',
'ccflags': '-mb -mrenesas -m4 -L %(toolchain_dir)s/lib/gcc/sh4-elf/%(gcc_version)s -lgcc',
'language_id': 'SuperH4:BE:32:default',
'architecture_test': 'SuperH4_BE',
})
PCodeTest({
'name': 'SH4_LE',
'build_all': 1,
'toolchain': 'SuperH4/sh4le-elf',
'ccflags': '-ml -mrenesas -m4 -L %(toolchain_dir)s/lib/gcc/sh4le-elf/%(gcc_version)s -lgcc',
'language_id': 'SuperH4:LE:32:default',
'architecture_test': 'SuperH4',
})
PCodeTest({
'name': 'sparcV9_32',
'build_all': 1,
'build_exe': 1,
'can_run': 0, # instruction error causes infinite loop
'qemu_command': 'qemu-sparc32plus',
'toolchain': 'SparcV9/sparc-elf',
'ccflags': '-mcpu=v9 -m32',
'language_id': 'sparc:BE:32:default',
'processor': 'Sparc',
'architecture_test': 'SparcV9_m32',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
})
# to suppress usage of application registers g2 and g3, add -mno-app-regs here
PCodeTest({
'name': 'sparcV9_64',
'build_all': 1,
'toolchain': 'SparcV9/sparc64-elf',
'ccflags': '-mcpu=v9 -m64',
'language_id': 'sparc:BE:64:default',
})
PCodeTest({
'name': 'pentium',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-i386',
'toolchain': 'x86/i386-elf-linux',
'ccflags': '-march=pentium -m32 -L %(toolchain_dir)s/lib/gcc/i386-elf-linux/%(gcc_version)s -lgcc',
'objdump_option': '-M intel',
'language_id': 'x86:LE:32:default',
'architecture_test': 'X86m32',
'has_vector': 1,
})
PCodeTest({
'name': 'i386_CLANG',
'toolchain': 'LLVM/llvm',
'toolchain_type': 'llvm',
'ccflags': '--target=i386',
'objdump_option': '-M intel',
'language_id': 'x86:LE:32:default',
})
PCodeTest({
'name': 'i686_CLANG',
'toolchain': 'LLVM/llvm',
'toolchain_type': 'llvm',
'ccflags': '--target=i686',
'objdump_option': '-M intel',
'language_id': 'x86:LE:32:default',
})
PCodeTest({
'name': 'AVX2',
'build_all': 1,
'toolchain': 'x86/x86_64-elf',
'ccflags': '-march=core-avx2',
'objdump_option': '-M intel',
'language_id': 'x86:LE:64:default',
'has_vector': 1,
})
PCodeTest({
'name': 'AVXi',
'toolchain': 'x86/x86_64-elf',
'ccflags': '-march=core-avx-i',
'objdump_option': '-M intel',
'language_id': 'x86:LE:64:default',
})
PCodeTest({
'name': 'bdver2',
'toolchain': 'x86/x86_64-elf',
'ccflags': '-march=bdver2',
'objdump_option': '-M intel',
'language_id': 'x86:LE:64:default',
})
PCodeTest({
'name': 'core2',
'toolchain': 'x86/x86_64-elf',
'ccflags': '-march=bdver2',
'objdump_option': '-M intel',
'language_id': 'x86:LE:64:default',
})
PCodeTest({
'name': 'x86_m64',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-x86_64',
'toolchain': 'x86/x86_64-elf',
'ccflags': '-static -m64',
'objdump_option': '-M intel',
'language_id': 'x86:LE:64:default',
'architecture_test': 'X86m64',
})
PCodeTest({
'name': 'x86_fma4',
'toolchain': 'x86/x86_64-elf',
'ccflags': '-mfma',
'objdump_option': '-M intel',
'language_id': 'x86:LE:64:default',
})
# the PIC30 toolchain is distributed by mchp. So when making the
# toolchain, specify toolchain_type to be mchp. But it is based on
# gcc, and after it's installed, it behaves exactly like gcc. So, when
# making a pcode test, specify toolchain_type to be gcc.
PCodeTest({
'name': 'PIC30',
'build_all': 1,
'toolchain': 'PIC/xc16',
'compile_exe': 'bin/xc16-gcc',
'objdump_exe': 'bin/xc16-objdump',
'readelf_exe': 'bin/xc16-readelf',
'nm_exe': 'bin/xc16-nm',
'ccflags': '-mcpu=30F2011 -DINT4_IS_LONG -Xlinker --defsym -Xlinker _main=0x0 -L %(toolchain_dir)s/lib -lpic30 -lc -lm',
'language_id': 'dsPIC30F:LE:24:default',
'skip_files': ['misc.test'],
'variants': {'O0': '-O0'},
'small_build': 1,
})
PCodeTest({
'name': 'PIC16',
'toolchain': 'PIC/xc8',
'compile_exe': 'bin/xc8',
'objdump_exe': 'bin/dump',
'ccflags': '-chip=16C57 -DINT4_IS_LONG -DSTATIC_MAIN -L %(toolchain_dir)s/lib -lpic30 -lc -lm',
'language_id': 'dsPIC16F:LE:24:default',
'small_build': 1,
})
PCodeTest({
'name': 'HCS08',
'toolchain': 'SDCC/s08',
'toolchain_type': 'sdcc',
'compile_exe': 'bin/sdcc',
'ccflags': '--out-fmt-elf --std-sdcc11',
'language_id': 'HCS08:BE:16:MC9S08GB60',
'variants': {'OX': ''},
'has_double': 0,
'has_longlong': 0,
})
PCodeTest({
'name': 'Z80',
'toolchain': 'SDCC/z80',
'toolchain_type': 'sdcc',
'compile_exe': 'bin/sdcc',
'ccflags': '-mz80 -V --verbose --std-sdcc11 -DINT4_IS_LONG',
'language_id': 'z80:LE:16:default',
'variants': {'OX':''},
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
'small_build': 1,
# Currently the 'omitted' option is only supported by the SDCC toolchain!
# Causes a bit of funk with tpp.py still including references to these
# tests in cunit_main.c but the compiler accepts it with a warning.
'skip_files': ['PointerManipulation.test', 'StructUnionManipulation.test'],
# These tests are omitted because the SDCC compiler doesn't properly handle
# structs in functions and requires a more strict format than ANSI C requires.
})
PCodeTest({
'name': 'CR16C',
'build_all': 1,
'toolchain': 'NS/cr16-elf',
'language_id': 'CR16C:LE:16:default',
'processor': 'CR16',
'architecture_test': 'CRC16C',
'ccflags': '-lgcc',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
})
PCodeTest({
'name': 'RISCV',
'build_all': 1,
'toolchain': 'RISCV/riscv32-elf',
'language_id': 'RISCV:BE:32:default',
'architecture_test': 'RISCV',
'ccflags': '-lgcc',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
})
| apache-2.0 |
jbuchbinder/youtube-dl | youtube_dl/extractor/kickstarter.py | 71 | 2708 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import smuggle_url
class KickStarterIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?kickstarter\.com/projects/(?P<id>[^/]*)/.*'
_TESTS = [{
'url': 'https://www.kickstarter.com/projects/1404461844/intersection-the-story-of-josh-grant/description',
'md5': 'c81addca81327ffa66c642b5d8b08cab',
'info_dict': {
'id': '1404461844',
'ext': 'mp4',
'title': 'Intersection: The Story of Josh Grant by Kyle Cowling',
'description': (
'A unique motocross documentary that examines the '
'life and mind of one of sports most elite athletes: Josh Grant.'
),
},
}, {
'note': 'Embedded video (not using the native kickstarter video service)',
'url': 'https://www.kickstarter.com/projects/597507018/pebble-e-paper-watch-for-iphone-and-android/posts/659178',
'info_dict': {
'id': '78704821',
'ext': 'mp4',
'uploader_id': 'pebble',
'uploader': 'Pebble Technology',
'title': 'Pebble iOS Notifications',
},
'add_ie': ['Vimeo'],
}, {
'url': 'https://www.kickstarter.com/projects/1420158244/power-drive-2000/widget/video.html',
'info_dict': {
'id': '1420158244',
'ext': 'mp4',
'title': 'Power Drive 2000',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<title>\s*(.*?)(?:\s*—\s*Kickstarter)?\s*</title>',
webpage, 'title')
video_url = self._search_regex(
r'data-video-url="(.*?)"',
webpage, 'video URL', default=None)
if video_url is None: # No native kickstarter, look for embedded videos
return {
'_type': 'url_transparent',
'ie_key': 'Generic',
'url': smuggle_url(url, {'to_generic': True}),
'title': title,
}
thumbnail = self._og_search_thumbnail(webpage, default=None)
if thumbnail is None:
thumbnail = self._html_search_regex(
r'<img[^>]+class="[^"]+\s*poster\s*[^"]+"[^>]+src="([^"]+)"',
webpage, 'thumbnail image', fatal=False)
return {
'id': video_id,
'url': video_url,
'title': title,
'description': self._og_search_description(webpage, default=None),
'thumbnail': thumbnail,
}
| unlicense |
Venturi/cms | env/lib/python2.7/site-packages/phonenumbers/shortdata/region_SE.py | 11 | 1356 | """Auto-generated file, do not edit by hand. SE metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_SE = PhoneMetadata(id='SE', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[1-37-9]\\d{2,5}', possible_number_pattern='\\d{3,6}'),
toll_free=PhoneNumberDesc(national_number_pattern='116\\d{3}', possible_number_pattern='\\d{6}', example_number='116000'),
premium_rate=PhoneNumberDesc(national_number_pattern='11811[89]|72\\d{3}', possible_number_pattern='\\d{5,6}', example_number='118118'),
emergency=PhoneNumberDesc(national_number_pattern='112|90000', possible_number_pattern='\\d{3,5}', example_number='112'),
short_code=PhoneNumberDesc(national_number_pattern='11(?:[25]|313|4\\d{2}|6(?:00[06]|11[17]|123)|7[0-8]|8(?:1(?:[02-9]\\d|1[013-9])|[02-46-9]\\d{2}))|2(?:2[02358]|33|4[01]|50|6[1-4])|32[13]|7\\d{4}|8(?:22|88)|9(?:0(?:000|1(?:[02-9]\\d|1[013-9])|[2-4]\\d{2}|510)|12)', possible_number_pattern='\\d{3,6}', example_number='11313'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_number_pattern='2(?:2[02358]|33|4[01]|50|6[1-4])|32[13]|8(?:22|88)|912', possible_number_pattern='\\d{3}', example_number='222'),
short_data=True)
| gpl-2.0 |
gjtorikian/readthedocs.org | readthedocs/rtd_tests/tests/test_middleware.py | 3 | 2874 | from django.http import Http404
from django.core.cache import cache
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from readthedocs.core.middleware import SubdomainMiddleware
class MiddlewareTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.middleware = SubdomainMiddleware()
self.url = '/'
def test_failey_cname(self):
request = self.factory.get(self.url, HTTP_HOST='my.host.com')
with self.assertRaises(Http404):
self.middleware.process_request(request)
self.assertEqual(request.cname, True)
@override_settings(PRODUCTION_DOMAIN='readthedocs.org')
def test_proper_subdomain(self):
request = self.factory.get(self.url, HTTP_HOST='pip.readthedocs.org')
self.middleware.process_request(request)
self.assertEqual(request.urlconf, 'core.subdomain_urls')
self.assertEqual(request.subdomain, True)
self.assertEqual(request.slug, 'pip')
def test_proper_cname(self):
cache.get = lambda x: 'my_slug'
request = self.factory.get(self.url, HTTP_HOST='my.valid.homename')
self.middleware.process_request(request)
self.assertEqual(request.urlconf, 'core.subdomain_urls')
self.assertEqual(request.cname, True)
self.assertEqual(request.slug, 'my_slug')
def test_request_header(self):
request = self.factory.get(self.url, HTTP_HOST='some.random.com', HTTP_X_RTD_SLUG='pip')
self.middleware.process_request(request)
self.assertEqual(request.urlconf, 'core.subdomain_urls')
self.assertEqual(request.cname, True)
self.assertEqual(request.rtdheader, True)
self.assertEqual(request.slug, 'pip')
@override_settings(PRODUCTION_DOMAIN='readthedocs.org')
def test_proper_cname_uppercase(self):
cache.get = lambda x: x.split('.')[0]
request = self.factory.get(self.url, HTTP_HOST='PIP.RANDOM.COM')
self.middleware.process_request(request)
self.assertEqual(request.urlconf, 'core.subdomain_urls')
self.assertEqual(request.cname, True)
self.assertEqual(request.slug, 'pip')
def test_request_header_uppercase(self):
request = self.factory.get(self.url, HTTP_HOST='some.random.com', HTTP_X_RTD_SLUG='PIP')
self.middleware.process_request(request)
self.assertEqual(request.urlconf, 'core.subdomain_urls')
self.assertEqual(request.cname, True)
self.assertEqual(request.rtdheader, True)
self.assertEqual(request.slug, 'pip')
@override_settings(DEBUG=True)
def test_debug_on(self):
request = self.factory.get(self.url, HTTP_HOST='doesnt.really.matter')
ret_val = self.middleware.process_request(request)
self.assertEqual(ret_val, None)
| mit |
Alp-Phone/django-mobile-app-distribution | django_mobile_app_distribution/views.py | 2 | 4690 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import os.path
from itertools import chain
from operator import attrgetter
from os.path import basename
from wsgiref.util import FileWrapper
from future.builtins import (int, map, open)
from django.contrib.auth.decorators import login_required
try:
from django.contrib.sites.models import get_current_site
except ImportError:
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import MultipleObjectsReturned
from django.http import HttpResponse, HttpResponseForbidden, Http404
from django.shortcuts import render
from django.utils import translation
import django_mobile_app_distribution.settings as app_dist_settings
from django_mobile_app_distribution.models import IosApp, AndroidApp, UserInfo
log = logging.getLogger(__name__)
@login_required
def index(request):
try:
# Activate client's language preference
lang = request.user.userinfo.language
translation.activate(lang)
except UserInfo.DoesNotExist:
pass
ios_user_apps = IosApp.objects.filter(user_id__exact=request.user.id)
android_user_apps = AndroidApp.objects.filter(user_id__exact=request.user.id)
apps = list(chain(ios_user_apps, android_user_apps))
ios_group_apps = IosApp.objects.filter(groups__in=request.user.groups.all())
android_group_apps = AndroidApp.objects.filter(groups__in=request.user.groups.all())
group_apps = list(chain(ios_group_apps, android_group_apps))
for group_app in group_apps:
if group_app not in apps:
apps.append(group_app)
apps.sort(key=attrgetter('updatedAt'), reverse=True)
apps.sort(key=attrgetter('version'), reverse=True)
apps.sort(key=attrgetter('operating_system'), reverse=True) # let iOS come first
apps.sort(key=attrgetter('name'))
return render(request, 'django_mobile_app_distribution/app_list.html', {
'apps': apps,
'ios_identifier': app_dist_settings.IOS,
'site_url': get_current_site(request).domain
})
@login_required
def send_apk(request, app_id):
"""
Send a file through Django without loading the whole file into
memory at once. The FileWrapper will turn the file object into an
iterator for chunks of 8KB.
"""
android_app = None
try:
android_app = AndroidApp.objects.get(pk=app_id)
except (AndroidApp.DoesNotExist, MultipleObjectsReturned):
return HttpResponse('App does not exist', status=404)
authenticated = False
if android_app.user:
if android_app.user.id == request.user.id:
authenticated = True
if not authenticated:
app_group_ids = android_app.groups.all().values_list('pk', flat=True)
app_group_ids = list(map(int, app_group_ids))
for user_group in request.user.groups.all():
user_group_id = int(user_group.id)
if user_group_id in app_group_ids:
authenticated = True
break
if not authenticated:
return HttpResponseForbidden('This is not your app')
filename = os.path.join(
app_dist_settings.MOBILE_APP_DISTRIBUTION_ANDROID_FILE_STORAGE_PATH,
android_app.app_binary.name
)
response = HttpResponse(FileWrapper(open(filename, 'rb')))
response['Content-Length'] = os.path.getsize(filename)
response['Content-Type'] = app_dist_settings.MOBILE_APP_DISTRIBUTION_CONTENT_TYPES[android_app.operating_system]
response['Content-Disposition'] = 'inline; filename=%s' % basename(filename)
return response
def ios_app_plist(request, app_id):
ios_app = None
try:
ios_app = IosApp.objects.get(pk=app_id)
except (IosApp.DoesNotExist, MultipleObjectsReturned):
raise Http404
from . import settings as mad_settings
plist = ''
if ios_app.display_image and ios_app.full_size_image:
plist = mad_settings.IOS_PLIST_BLUEPRINT_IOS9
plist = plist.replace(mad_settings.PLIST_DISPLAY_IMAGE, ios_app.get_display_image_url())
plist = plist.replace(mad_settings.PLIST_FULL_SIZE_IMAGE, ios_app.get_full_size_image_url())
else:
plist = mad_settings.IOS_PLIST_BLUEPRINT
plist = plist.replace(mad_settings.PLIST_APP_URL, ios_app.get_binary_url())
plist = plist.replace(mad_settings.PLIST_BUNDLE_IDENTIFIER, ios_app.bundle_identifier)
plist = plist.replace(mad_settings.PLIST_BUNDLE_VERSION, ios_app.version)
plist = plist.replace(mad_settings.PLIST_APP_TITLE, ios_app.name)
return HttpResponse(
plist,
content_type=mad_settings.MOBILE_APP_DISTRIBUTION_CONTENT_TYPES[mad_settings.IOS_PLIST]
)
| mit |
bigoldboy/repository.bigoldboy | plugin.video.VADER/unidecode/util.py | 51 | 1790 | # vim:ts=4 sw=4 expandtab softtabstop=4
from __future__ import print_function
import optparse
import locale
import os
import sys
import warnings
from unidecode import unidecode
PY3 = sys.version_info[0] >= 3
def fatal(msg):
sys.stderr.write(msg + "\n")
sys.exit(1)
def main():
default_encoding = locale.getpreferredencoding()
parser = optparse.OptionParser('%prog [options] [FILE]',
description="Transliterate Unicode text into ASCII. FILE is path to file to transliterate. "
"Standard input is used if FILE is omitted and -c is not specified.")
parser.add_option('-e', '--encoding', metavar='ENCODING', default=default_encoding,
help='Specify an encoding (default is %s)' % (default_encoding,))
parser.add_option('-c', metavar='TEXT', dest='text',
help='Transliterate TEXT instead of FILE')
options, args = parser.parse_args()
encoding = options.encoding
if args:
if options.text:
fatal("Can't use both FILE and -c option")
else:
with open(args[0], 'rb') as f:
stream = f.read()
elif options.text:
if PY3:
stream = os.fsencode(options.text)
else:
stream = options.text
# add a newline to the string if it comes from the
# command line so that the result is printed nicely
# on the console.
stream += '\n'.encode('ascii')
else:
if PY3:
stream = sys.stdin.buffer.read()
else:
stream = sys.stdin.read()
try:
stream = stream.decode(encoding)
except UnicodeDecodeError as e:
fatal('Unable to decode input: %s, start: %d, end: %d' % (e.reason, e.start, e.end))
sys.stdout.write(unidecode(stream))
| gpl-3.0 |
boundarydevices/android_external_chromium_org | chrome/tools/build/win/resedit.py | 152 | 11259 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A utility script that can extract and edit resources in a Windows binary.
For detailed help, see the script's usage by invoking it with --help."""
import ctypes
import ctypes.wintypes
import logging
import optparse
import os
import shutil
import sys
import tempfile
import win32api
import win32con
_LOGGER = logging.getLogger(__name__)
# The win32api-supplied UpdateResource wrapper unfortunately does not allow
# one to remove resources due to overzealous parameter verification.
# For that case we're forced to go straight to the native API implementation.
UpdateResource = ctypes.windll.kernel32.UpdateResourceW
UpdateResource.argtypes = [
ctypes.wintypes.HANDLE, # HANDLE hUpdate
ctypes.c_wchar_p, # LPCTSTR lpType
ctypes.c_wchar_p, # LPCTSTR lpName
ctypes.c_short, # WORD wLanguage
ctypes.c_void_p, # LPVOID lpData
ctypes.c_ulong, # DWORD cbData
]
UpdateResource.restype = ctypes.c_short
def _ResIdToString(res_id):
# Convert integral res types/ids to a string.
if isinstance(res_id, int):
return "#%d" % res_id
return res_id
class _ResourceEditor(object):
"""A utility class to make it easy to extract and manipulate resources in a
Windows binary."""
def __init__(self, input_file, output_file):
"""Create a new editor.
Args:
input_file: path to the input file.
output_file: (optional) path to the output file.
"""
self._input_file = input_file
self._output_file = output_file
self._modified = False
self._module = None
self._temp_dir = None
self._temp_file = None
self._update_handle = None
def __del__(self):
if self._module:
win32api.FreeLibrary(self._module)
self._module = None
if self._update_handle:
_LOGGER.info('Canceling edits to "%s".', self.input_file)
win32api.EndUpdateResource(self._update_handle, False)
self._update_handle = None
if self._temp_dir:
_LOGGER.info('Removing temporary directory "%s".', self._temp_dir)
shutil.rmtree(self._temp_dir)
self._temp_dir = None
def _GetModule(self):
if not self._module:
# Specify a full path to LoadLibraryEx to prevent
# it from searching the path.
input_file = os.path.abspath(self.input_file)
_LOGGER.info('Loading input_file from "%s"', input_file)
self._module = win32api.LoadLibraryEx(
input_file, None, win32con.LOAD_LIBRARY_AS_DATAFILE)
return self._module
def _GetTempDir(self):
if not self._temp_dir:
self._temp_dir = tempfile.mkdtemp()
_LOGGER.info('Created temporary directory "%s".', self._temp_dir)
return self._temp_dir
def _GetUpdateHandle(self):
if not self._update_handle:
# Make a copy of the input file in the temp dir.
self._temp_file = os.path.join(self.temp_dir,
os.path.basename(self._input_file))
shutil.copyfile(self._input_file, self._temp_file)
# Open a resource update handle on the copy.
_LOGGER.info('Opening temp file "%s".', self._temp_file)
self._update_handle = win32api.BeginUpdateResource(self._temp_file, False)
return self._update_handle
modified = property(lambda self: self._modified)
input_file = property(lambda self: self._input_file)
module = property(_GetModule)
temp_dir = property(_GetTempDir)
update_handle = property(_GetUpdateHandle)
def ExtractAllToDir(self, extract_to):
"""Extracts all resources from our input file to a directory hierarchy
in the directory named extract_to.
The generated directory hierarchy is three-level, and looks like:
resource-type/
resource-name/
lang-id.
Args:
extract_to: path to the folder to output to. This folder will be erased
and recreated if it already exists.
"""
_LOGGER.info('Extracting all resources from "%s" to directory "%s".',
self.input_file, extract_to)
if os.path.exists(extract_to):
_LOGGER.info('Destination directory "%s" exists, deleting', extract_to)
shutil.rmtree(extract_to)
# Make sure the destination dir exists.
os.makedirs(extract_to)
# Now enumerate the resource types.
for res_type in win32api.EnumResourceTypes(self.module):
res_type_str = _ResIdToString(res_type)
# And the resource names.
for res_name in win32api.EnumResourceNames(self.module, res_type):
res_name_str = _ResIdToString(res_name)
# Then the languages.
for res_lang in win32api.EnumResourceLanguages(self.module,
res_type, res_name):
res_lang_str = _ResIdToString(res_lang)
dest_dir = os.path.join(extract_to, res_type_str, res_lang_str)
dest_file = os.path.join(dest_dir, res_name_str)
_LOGGER.info('Extracting resource "%s", lang "%d" name "%s" '
'to file "%s".',
res_type_str, res_lang, res_name_str, dest_file)
# Extract each resource to a file in the output dir.
os.makedirs(dest_dir)
self.ExtractResource(res_type, res_lang, res_name, dest_file)
def ExtractResource(self, res_type, res_lang, res_name, dest_file):
"""Extracts a given resource, specified by type, language id and name,
to a given file.
Args:
res_type: the type of the resource, e.g. "B7".
res_lang: the language id of the resource e.g. 1033.
res_name: the name of the resource, e.g. "SETUP.EXE".
dest_file: path to the file where the resource data will be written.
"""
_LOGGER.info('Extracting resource "%s", lang "%d" name "%s" '
'to file "%s".', res_type, res_lang, res_name, dest_file)
data = win32api.LoadResource(self.module, res_type, res_name, res_lang)
with open(dest_file, 'wb') as f:
f.write(data)
def RemoveResource(self, res_type, res_lang, res_name):
"""Removes a given resource, specified by type, language id and name.
Args:
res_type: the type of the resource, e.g. "B7".
res_lang: the language id of the resource, e.g. 1033.
res_name: the name of the resource, e.g. "SETUP.EXE".
"""
_LOGGER.info('Removing resource "%s:%s".', res_type, res_name)
# We have to go native to perform a removal.
ret = UpdateResource(self.update_handle,
res_type,
res_name,
res_lang,
None,
0)
# Raise an error on failure.
if ret == 0:
error = win32api.GetLastError()
print "error", error
raise RuntimeError(error)
self._modified = True
def UpdateResource(self, res_type, res_lang, res_name, file_path):
"""Inserts or updates a given resource with the contents of a file.
Args:
res_type: the type of the resource, e.g. "B7".
res_lang: the language id of the resource, e.g. 1033.
res_name: the name of the resource, e.g. "SETUP.EXE".
file_path: path to the file containing the new resource data.
"""
_LOGGER.info('Writing resource "%s:%s" from file.',
res_type, res_name, file_path)
with open(file_path, 'rb') as f:
win32api.UpdateResource(self.update_handle,
res_type,
res_name,
f.read(),
res_lang);
self._modified = True
def Commit(self):
"""Commit any successful resource edits this editor has performed.
This has the effect of writing the output file.
"""
if self._update_handle:
update_handle = self._update_handle
self._update_handle = None
win32api.EndUpdateResource(update_handle, False)
_LOGGER.info('Writing edited file to "%s".', self._output_file)
shutil.copyfile(self._temp_file, self._output_file)
_USAGE = """\
usage: %prog [options] input_file
A utility script to extract and edit the resources in a Windows executable.
EXAMPLE USAGE:
# Extract from mini_installer.exe, the resource type "B7", langid 1033 and
# name "CHROME.PACKED.7Z" to a file named chrome.7z.
# Note that 1033 corresponds to English (United States).
%prog mini_installer.exe --extract B7 1033 CHROME.PACKED.7Z chrome.7z
# Update mini_installer.exe by removing the resouce type "BL", langid 1033 and
# name "SETUP.EXE". Add the resource type "B7", langid 1033 and name
# "SETUP.EXE.packed.7z" from the file setup.packed.7z.
# Write the edited file to mini_installer_packed.exe.
%prog mini_installer.exe \\
--remove BL 1033 SETUP.EXE \\
--update B7 1033 SETUP.EXE.packed.7z setup.packed.7z \\
--output-file mini_installer_packed.exe
"""
def _ParseArgs():
parser = optparse.OptionParser(_USAGE)
parser.add_option('', '--verbose', action='store_true',
help='Enable verbose logging.')
parser.add_option('', '--extract_all',
help='Path to a folder which will be created, in which all resources '
'from the input_file will be stored, each in a file named '
'"res_type/lang_id/res_name".')
parser.add_option('', '--extract', action='append', default=[], nargs=4,
help='Extract the resource with the given type, language id and name '
'to the given file.',
metavar='type langid name file_path')
parser.add_option('', '--remove', action='append', default=[], nargs=3,
help='Remove the resource with the given type, langid and name.',
metavar='type langid name')
parser.add_option('', '--update', action='append', default=[], nargs=4,
help='Insert or update the resource with the given type, langid and '
'name with the contents of the file given.',
metavar='type langid name file_path')
parser.add_option('', '--output_file',
help='On success, OUTPUT_FILE will be written with a copy of the '
'input file with the edits specified by any remove or update '
'options.')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('You have to specify an input file to work on.')
modify = options.remove or options.update
if modify and not options.output_file:
parser.error('You have to specify an output file with edit options.')
return options, args
def main(options, args):
"""Main program for the script."""
if options.verbose:
logging.basicConfig(level=logging.INFO)
# Create the editor for our input file.
editor = _ResourceEditor(args[0], options.output_file)
if options.extract_all:
editor.ExtractAllToDir(options.extract_all)
for res_type, res_lang, res_name, dest_file in options.extract:
editor.ExtractResource(res_type, int(res_lang), res_name, dest_file)
for res_type, res_lang, res_name in options.remove:
editor.RemoveResource(res_type, int(res_lang), res_name)
for res_type, res_lang, res_name, src_file in options.update:
editor.UpdateResource(res_type, int(res_lang), res_name, src_file)
if editor.modified:
editor.Commit()
if __name__ == '__main__':
sys.exit(main(*_ParseArgs()))
| bsd-3-clause |
ryansb/mdtocs | mdtocs/__init__.py | 1 | 7184 | #!/bin/env python2
# mdtocs/__init__.py
#
# Copyright (c) 2013, Ryan S. Brown <[email protected]>
#
# All rights reserved.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License version 3.0 as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY: without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library: if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA
import os
import re
import md5
from argparse import ArgumentParser
def strip_existing_toc(lines):
skip = 0
if '#Table of Contents' in lines[0]:
skip += 1
for l in lines[1:]:
if l.startswith('#'):
break
skip += 1
return lines[skip:]
def get_headers(lines):
# Header forms
# h1 h2 #h1 ##h2 ###h3 ...
# == --
underlined_h1 = re.compile(r'^=+$')
underlined_h2 = re.compile(r'^-+$')
hashed_re = re.compile(r'^#+')
for idx in range(len(lines)):
if hashed_re.match(lines[idx]):
h = re.sub(r'[ #]+$', '', lines[idx]) # remove trailing #'s
yield ( # H<N> where N=number of #'s
len(re.search('^(#+)', h).group()) - 1,
hashed_re.sub('', h).strip()
)
elif underlined_h1.match(lines[idx]):
yield (0, lines[idx - 1].strip())
elif underlined_h2.match(lines[idx]):
yield (1, lines[idx - 1].strip())
def tocify(lines):
"""
Takes a list of lines in a markdown file, returns string with TOC
generated/updated
"""
toc, body = tocify_split(lines)
return toc + '\n\n' + body
def tocify_string(body):
return tocify([l + '\n' for l in body.split('\n')])
def tocify_split(lines):
"""
Same as tocify(), but returns tuple of strings (toc, body)
"""
slugify_re = re.compile(r'[^a-z0-9-]')
TOC = [
'#Table of Contents\n',
'generated by [mtdocs](http://ryansb.github.io/mdtocs/)\n',
]
TOC_line = "{indent}- [{header}](#{slug})"
for level, header in get_headers(strip_existing_toc(lines)):
TOC.append(TOC_line.format(
indent=('\t' * level),
header=header,
slug=slugify_re.sub('', header.lower().replace(' ', '-'))
))
return '\n'.join(TOC), ''.join(strip_existing_toc(lines))
def tocify_file_list(fnames):
for fname in fnames:
with open(fname, 'r+w') as f:
lines = f.readlines()
orig_hash = md5.new('\n'.join(lines)).hexdigest()
out = tocify(lines)
if not md5.new(out).hexdigest() == orig_hash:
print 'Updated TOC in', fname
f.seek(0)
f.write(out)
def find_files(names, recurse):
if not len(names):
if not recurse:
names = [f for f in os.listdir(os.getcwd()) if os.path.isfile(f)]
names = os.listdir(os.getcwd())
for name in [n for n in names if os.path.exists(n)]:
if os.path.isfile(name) and (name.endswith('.md')
or name.endswith('.markdown')):
yield name
continue
if os.path.isdir(name) and recurse:
for path, _, files in os.walk(name):
for f in files:
if f.endswith('.md') or f.endswith('.markdown'):
yield os.path.join(path, f)
if os.path.isdir(name) and not recurse:
for f in os.listdir(name):
if os.path.isfile(f) and (name.endswith('.md')
or name.endswith('.markdown')):
yield f
def run_tests():
import unittest
class TestMDTOCS(unittest.TestCase):
def setUp(self):
self.corpus = """#My Cool Document
Is a document
##Subheading
Subheadings are key to nirvana
With Underlined subheader
-------------------------
And information
###And Subsubheadings
The end, dude.
"""
self.expected = """#Table of Contents
generated by [mtdocs](http://ryansb.github.io/mdtocs/)
- [My Cool Document](#my-cool-document)
\t- [Subheading](#subheading)
\t- [With Underlined subheader](#with-underlined-subheader)
\t\t- [And Subsubheadings](#and-subsubheadings)
""" + self.corpus
self.modified_toc = self.expected.replace(
'- [My Cool Document](#my-cool-document)', '')
def test_underline(self):
h = get_headers([
'Hello H1',
'========',
'Hello H2',
'--------'
])
self.assertListEqual(list(h), [(0, 'Hello H1'), (1, 'Hello H2')])
def test_hash(self):
h = get_headers([
'#Hello H1',
'##Hello H2',
'###Hello H3',
'####Hello H4',
'#####Hello H5',
])
self.assertListEqual(list(h), [
(0, 'Hello H1'),
(1, 'Hello H2'),
(2, 'Hello H3'),
(3, 'Hello H4'),
(4, 'Hello H5'),
])
def test_toc_generation(self):
from StringIO import StringIO
self.assertEqual(tocify(StringIO(self.corpus).readlines()),
self.expected)
def test_toc_detection(self):
from StringIO import StringIO
self.assertEqual(
tocify(
strip_existing_toc(StringIO(self.expected).readlines())
),
self.expected)
def test_toc_update(self):
from StringIO import StringIO
self.assertEqual(
tocify(
strip_existing_toc(StringIO(self.modified_toc).readlines())
),
self.expected)
suite = unittest.TestLoader().loadTestsFromTestCase(TestMDTOCS)
unittest.TextTestRunner(verbosity=2).run(suite)
def main():
parser = ArgumentParser(
description='MDTOCS: MarkDown Table Of Contents System'
)
parser.add_argument('locations', type=str, nargs='*',
help='Markdown files or directories containing '
'markdown files.')
parser.add_argument('--recurse', '-r', dest='recurse', default=False,
action='store_true',
help='Recurse into subdirectories looking for '
'markdown files.')
parser.add_argument('--test', action='store_true', dest='test',
default=False, help='Run unittest tests')
args = parser.parse_args()
if args.test:
run_tests()
return
else:
tocify_file_list(find_files(args.locations, recurse=args.recurse))
if __name__ == '__main__':
main()
| lgpl-3.0 |
rickyzhang82/bbb-linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
goodliang/bootstrap | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py | 2354 | 10366 | # Unmodified from http://code.activestate.com/recipes/576693/
# other than to add MIT license header (as specified on page, but not in code).
# Linked from Python documentation here:
# http://docs.python.org/2/library/collections.html#collections.OrderedDict
#
# This should be deleted once Py2.7 is available on all bots, see
# http://crbug.com/241769.
#
# Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
# Suppress 'OrderedDict.update: Method has no argument':
# pylint: disable=E0211
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| mit |
MjAbuz/fabric | fabric/job_queue.py | 23 | 7673 | """
Sliding-window-based job/task queue class (& example of use.)
May use ``multiprocessing.Process`` or ``threading.Thread`` objects as queue
items, though within Fabric itself only ``Process`` objects are used/supported.
"""
from __future__ import with_statement
import time
import Queue
from fabric.state import env
from fabric.network import ssh
from fabric.context_managers import settings
class JobQueue(object):
"""
The goal of this class is to make a queue of processes to run, and go
through them running X number at any given time.
So if the bubble is 5 start with 5 running and move the bubble of running
procs along the queue looking something like this:
Start
...........................
[~~~~~]....................
___[~~~~~].................
_________[~~~~~]...........
__________________[~~~~~]..
____________________[~~~~~]
___________________________
End
"""
def __init__(self, max_running, comms_queue):
"""
Setup the class to resonable defaults.
"""
self._queued = []
self._running = []
self._completed = []
self._num_of_jobs = 0
self._max = max_running
self._comms_queue = comms_queue
self._finished = False
self._closed = False
self._debug = False
def _all_alive(self):
"""
Simply states if all procs are alive or not. Needed to determine when
to stop looping, and pop dead procs off and add live ones.
"""
if self._running:
return all([x.is_alive() for x in self._running])
else:
return False
def __len__(self):
"""
Just going to use number of jobs as the JobQueue length.
"""
return self._num_of_jobs
def close(self):
"""
A sanity check, so that the need to care about new jobs being added in
the last throws of the job_queue's run are negated.
"""
if self._debug:
print("job queue closed.")
self._closed = True
def append(self, process):
"""
Add the Process() to the queue, so that later it can be checked up on.
That is if the JobQueue is still open.
If the queue is closed, this will just silently do nothing.
To get data back out of this process, give ``process`` access to a
``multiprocessing.Queue`` object, and give it here as ``queue``. Then
``JobQueue.run`` will include the queue's contents in its return value.
"""
if not self._closed:
self._queued.append(process)
self._num_of_jobs += 1
if self._debug:
print("job queue appended %s." % process.name)
def run(self):
"""
This is the workhorse. It will take the intial jobs from the _queue,
start them, add them to _running, and then go into the main running
loop.
This loop will check for done procs, if found, move them out of
_running into _completed. It also checks for a _running queue with open
spots, which it will then fill as discovered.
To end the loop, there have to be no running procs, and no more procs
to be run in the queue.
This function returns an iterable of all its children's exit codes.
"""
def _advance_the_queue():
"""
Helper function to do the job of poping a new proc off the queue
start it, then add it to the running queue. This will eventually
depleate the _queue, which is a condition of stopping the running
while loop.
It also sets the env.host_string from the job.name, so that fabric
knows that this is the host to be making connections on.
"""
job = self._queued.pop()
if self._debug:
print("Popping '%s' off the queue and starting it" % job.name)
with settings(clean_revert=True, host_string=job.name, host=job.name):
job.start()
self._running.append(job)
# Prep return value so we can start filling it during main loop
results = {}
for job in self._queued:
results[job.name] = dict.fromkeys(('exit_code', 'results'))
if not self._closed:
raise Exception("Need to close() before starting.")
if self._debug:
print("Job queue starting.")
while len(self._running) < self._max:
_advance_the_queue()
# Main loop!
while not self._finished:
while len(self._running) < self._max and self._queued:
_advance_the_queue()
if not self._all_alive():
for id, job in enumerate(self._running):
if not job.is_alive():
if self._debug:
print("Job queue found finished proc: %s." %
job.name)
done = self._running.pop(id)
self._completed.append(done)
if self._debug:
print("Job queue has %d running." % len(self._running))
if not (self._queued or self._running):
if self._debug:
print("Job queue finished.")
for job in self._completed:
job.join()
self._finished = True
# Each loop pass, try pulling results off the queue to keep its
# size down. At this point, we don't actually care if any results
# have arrived yet; they will be picked up after the main loop.
self._fill_results(results)
time.sleep(ssh.io_sleep)
# Consume anything left in the results queue. Note that there is no
# need to block here, as the main loop ensures that all workers will
# already have finished.
self._fill_results(results)
# Attach exit codes now that we're all done & have joined all jobs
for job in self._completed:
results[job.name]['exit_code'] = job.exitcode
return results
def _fill_results(self, results):
"""
Attempt to pull data off self._comms_queue and add to 'results' dict.
If no data is available (i.e. the queue is empty), bail immediately.
"""
while True:
try:
datum = self._comms_queue.get_nowait()
results[datum['name']]['results'] = datum['result']
except Queue.Empty:
break
#### Sample
def try_using(parallel_type):
"""
This will run the queue through it's paces, and show a simple way of using
the job queue.
"""
def print_number(number):
"""
Simple function to give a simple task to execute.
"""
print(number)
if parallel_type == "multiprocessing":
from multiprocessing import Process as Bucket
elif parallel_type == "threading":
from threading import Thread as Bucket
# Make a job_queue with a bubble of len 5, and have it print verbosely
jobs = JobQueue(5)
jobs._debug = True
# Add 20 procs onto the stack
for x in range(20):
jobs.append(Bucket(
target=print_number,
args=[x],
kwargs={},
))
# Close up the queue and then start it's execution
jobs.close()
jobs.run()
if __name__ == '__main__':
try_using("multiprocessing")
try_using("threading")
| bsd-2-clause |
Brett55/moto | moto/ec2/responses/__init__.py | 9 | 2225 | from __future__ import unicode_literals
from .account_attributes import AccountAttributes
from .amazon_dev_pay import AmazonDevPay
from .amis import AmisResponse
from .availability_zones_and_regions import AvailabilityZonesAndRegions
from .customer_gateways import CustomerGateways
from .dhcp_options import DHCPOptions
from .elastic_block_store import ElasticBlockStore
from .elastic_ip_addresses import ElasticIPAddresses
from .elastic_network_interfaces import ElasticNetworkInterfaces
from .general import General
from .instances import InstanceResponse
from .internet_gateways import InternetGateways
from .ip_addresses import IPAddresses
from .key_pairs import KeyPairs
from .monitoring import Monitoring
from .network_acls import NetworkACLs
from .placement_groups import PlacementGroups
from .reserved_instances import ReservedInstances
from .route_tables import RouteTables
from .security_groups import SecurityGroups
from .spot_fleets import SpotFleets
from .spot_instances import SpotInstances
from .subnets import Subnets
from .tags import TagResponse
from .virtual_private_gateways import VirtualPrivateGateways
from .vm_export import VMExport
from .vm_import import VMImport
from .vpcs import VPCs
from .vpc_peering_connections import VPCPeeringConnections
from .vpn_connections import VPNConnections
from .windows import Windows
from .nat_gateways import NatGateways
class EC2Response(
AccountAttributes,
AmazonDevPay,
AmisResponse,
AvailabilityZonesAndRegions,
CustomerGateways,
DHCPOptions,
ElasticBlockStore,
ElasticIPAddresses,
ElasticNetworkInterfaces,
General,
InstanceResponse,
InternetGateways,
IPAddresses,
KeyPairs,
Monitoring,
NetworkACLs,
PlacementGroups,
ReservedInstances,
RouteTables,
SecurityGroups,
SpotFleets,
SpotInstances,
Subnets,
TagResponse,
VirtualPrivateGateways,
VMExport,
VMImport,
VPCs,
VPCPeeringConnections,
VPNConnections,
Windows,
NatGateways,
):
@property
def ec2_backend(self):
from moto.ec2.models import ec2_backends
return ec2_backends[self.region]
@property
def should_autoescape(self):
return True
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.